mirror of
https://codeberg.org/ziglang/zig.git
synced 2025-12-06 05:44:20 +00:00
74047 lines
3 MiB
Vendored
74047 lines
3 MiB
Vendored
/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*
|
|
*===-----------------------------------------------------------------------===
|
|
*/
|
|
|
|
#ifndef __ARM_NEON_H
|
|
#define __ARM_NEON_H
|
|
|
|
#if !defined(__arm__) && !defined(__aarch64__) && !defined(__arm64ec__)
|
|
#error "<arm_neon.h> is intended only for ARM and AArch64 targets"
|
|
#elif !defined(__ARM_FP)
|
|
#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
|
|
#else
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <arm_bf16.h>
|
|
#include <arm_vector_types.h>
|
|
#if defined(__aarch64__) || defined(__arm64ec__)
|
|
typedef uint8_t poly8_t;
|
|
typedef uint16_t poly16_t;
|
|
typedef uint64_t poly64_t;
|
|
typedef __uint128_t poly128_t;
|
|
#else
|
|
typedef int8_t poly8_t;
|
|
typedef int16_t poly16_t;
|
|
typedef int64_t poly64_t;
|
|
#endif
|
|
typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
|
|
typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
|
|
typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
|
|
typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
|
|
typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
|
|
typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
|
|
|
|
typedef struct poly8x8x2_t {
|
|
poly8x8_t val[2];
|
|
} poly8x8x2_t;
|
|
|
|
typedef struct poly8x16x2_t {
|
|
poly8x16_t val[2];
|
|
} poly8x16x2_t;
|
|
|
|
typedef struct poly16x4x2_t {
|
|
poly16x4_t val[2];
|
|
} poly16x4x2_t;
|
|
|
|
typedef struct poly16x8x2_t {
|
|
poly16x8_t val[2];
|
|
} poly16x8x2_t;
|
|
|
|
typedef struct poly64x1x2_t {
|
|
poly64x1_t val[2];
|
|
} poly64x1x2_t;
|
|
|
|
typedef struct poly64x2x2_t {
|
|
poly64x2_t val[2];
|
|
} poly64x2x2_t;
|
|
|
|
typedef struct poly8x8x3_t {
|
|
poly8x8_t val[3];
|
|
} poly8x8x3_t;
|
|
|
|
typedef struct poly8x16x3_t {
|
|
poly8x16_t val[3];
|
|
} poly8x16x3_t;
|
|
|
|
typedef struct poly16x4x3_t {
|
|
poly16x4_t val[3];
|
|
} poly16x4x3_t;
|
|
|
|
typedef struct poly16x8x3_t {
|
|
poly16x8_t val[3];
|
|
} poly16x8x3_t;
|
|
|
|
typedef struct poly64x1x3_t {
|
|
poly64x1_t val[3];
|
|
} poly64x1x3_t;
|
|
|
|
typedef struct poly64x2x3_t {
|
|
poly64x2_t val[3];
|
|
} poly64x2x3_t;
|
|
|
|
typedef struct poly8x8x4_t {
|
|
poly8x8_t val[4];
|
|
} poly8x8x4_t;
|
|
|
|
typedef struct poly8x16x4_t {
|
|
poly8x16_t val[4];
|
|
} poly8x16x4_t;
|
|
|
|
typedef struct poly16x4x4_t {
|
|
poly16x4_t val[4];
|
|
} poly16x4x4_t;
|
|
|
|
typedef struct poly16x8x4_t {
|
|
poly16x8_t val[4];
|
|
} poly16x8x4_t;
|
|
|
|
typedef struct poly64x1x4_t {
|
|
poly64x1_t val[4];
|
|
} poly64x1x4_t;
|
|
|
|
typedef struct poly64x2x4_t {
|
|
poly64x2_t val[4];
|
|
} poly64x2x4_t;
|
|
|
|
#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
|
|
|
|
#if !defined(__LITTLE_ENDIAN__)
|
|
#if defined(__aarch64__) || defined(__arm64ec__)
|
|
#define __lane_reverse_64_32 1,0
|
|
#define __lane_reverse_64_16 3,2,1,0
|
|
#define __lane_reverse_64_8 7,6,5,4,3,2,1,0
|
|
#define __lane_reverse_128_64 1,0
|
|
#define __lane_reverse_128_32 3,2,1,0
|
|
#define __lane_reverse_128_16 7,6,5,4,3,2,1,0
|
|
#define __lane_reverse_128_8 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
|
|
#else
|
|
#define __lane_reverse_64_32 1,0
|
|
#define __lane_reverse_64_16 3,2,1,0
|
|
#define __lane_reverse_64_8 7,6,5,4,3,2,1,0
|
|
#define __lane_reverse_128_64 0,1
|
|
#define __lane_reverse_128_32 1,0,3,2
|
|
#define __lane_reverse_128_16 3,2,1,0,7,6,5,4
|
|
#define __lane_reverse_128_8 7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8
|
|
#endif
|
|
#endif
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_lane_bf16(__builtin_bit_cast(int8x8_t, __s0), __p1, 11)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_lane_bf16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 11)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_lane_bf16(__builtin_bit_cast(int8x8_t, __s0), __p1, 11)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_lane_bf16(__builtin_bit_cast(int8x8_t, __s0), __p1, 11)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_lane_bf16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 11)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_lane_bf16(__builtin_bit_cast(int8x8_t, __s0), __p1, 11)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_laneq_bf16(__builtin_bit_cast(int8x16_t, __s0), __p1, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_laneq_bf16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_splatq_laneq_bf16(__builtin_bit_cast(int8x16_t, __s0), __p1, 43)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_laneq_bf16(__builtin_bit_cast(int8x16_t, __s0), __p1, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_laneq_bf16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_splat_laneq_bf16(__builtin_bit_cast(int8x16_t, __s0), __p1, 43)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfdotq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfdotq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfdotq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vbfdot_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vbfdot_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vbfdot_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlalbq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlalbq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlalbq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlaltq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlaltq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmlaltq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmmlaq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbfmmlaq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vcreate_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __promote); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("bf16,neon"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, (uint32_t)(__builtin_bit_cast(uint16_t, __p0)) << 16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) {
|
|
bfloat16_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vcvth_bf16_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vduph_lane_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vduph_lane_bf16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_bf16(__p0_0, __p1_0) __extension__ ({ \
|
|
bfloat16x8_t __ret_0; \
|
|
bfloat16x4_t __s0_0 = __p0_0; \
|
|
__ret_0 = splatq_lane_bf16(__s0_0, __p1_0); \
|
|
__ret_0; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_bf16(__p0_1, __p1_1) __extension__ ({ \
|
|
bfloat16x8_t __ret_1; \
|
|
bfloat16x4_t __s0_1 = __p0_1; \
|
|
bfloat16x4_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, __lane_reverse_64_16); \
|
|
__ret_1 = __noswap_splatq_lane_bf16(__rev0_1, __p1_1); \
|
|
__ret_1 = __builtin_shufflevector(__ret_1, __ret_1, __lane_reverse_128_16); \
|
|
__ret_1; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_bf16(__p0_2, __p1_2) __extension__ ({ \
|
|
bfloat16x4_t __ret_2; \
|
|
bfloat16x4_t __s0_2 = __p0_2; \
|
|
__ret_2 = splat_lane_bf16(__s0_2, __p1_2); \
|
|
__ret_2; \
|
|
})
|
|
#else
|
|
#define vdup_lane_bf16(__p0_3, __p1_3) __extension__ ({ \
|
|
bfloat16x4_t __ret_3; \
|
|
bfloat16x4_t __s0_3 = __p0_3; \
|
|
bfloat16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, __lane_reverse_64_16); \
|
|
__ret_3 = __noswap_splat_lane_bf16(__rev0_3, __p1_3); \
|
|
__ret_3 = __builtin_shufflevector(__ret_3, __ret_3, __lane_reverse_64_16); \
|
|
__ret_3; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vduph_laneq_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vduph_laneq_bf16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_bf16(__p0_4, __p1_4) __extension__ ({ \
|
|
bfloat16x8_t __ret_4; \
|
|
bfloat16x8_t __s0_4 = __p0_4; \
|
|
__ret_4 = splatq_laneq_bf16(__s0_4, __p1_4); \
|
|
__ret_4; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_bf16(__p0_5, __p1_5) __extension__ ({ \
|
|
bfloat16x8_t __ret_5; \
|
|
bfloat16x8_t __s0_5 = __p0_5; \
|
|
bfloat16x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, __lane_reverse_128_16); \
|
|
__ret_5 = __noswap_splatq_laneq_bf16(__rev0_5, __p1_5); \
|
|
__ret_5 = __builtin_shufflevector(__ret_5, __ret_5, __lane_reverse_128_16); \
|
|
__ret_5; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_bf16(__p0_6, __p1_6) __extension__ ({ \
|
|
bfloat16x4_t __ret_6; \
|
|
bfloat16x8_t __s0_6 = __p0_6; \
|
|
__ret_6 = splat_laneq_bf16(__s0_6, __p1_6); \
|
|
__ret_6; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_bf16(__p0_7, __p1_7) __extension__ ({ \
|
|
bfloat16x4_t __ret_7; \
|
|
bfloat16x8_t __s0_7 = __p0_7; \
|
|
bfloat16x8_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, __lane_reverse_128_16); \
|
|
__ret_7 = __noswap_splat_laneq_bf16(__rev0_7, __p1_7); \
|
|
__ret_7 = __builtin_shufflevector(__ret_7, __ret_7, __lane_reverse_64_16); \
|
|
__ret_7; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vgetq_lane_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vgetq_lane_bf16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vgetq_lane_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vget_lane_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vget_lane_bf16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(bfloat16_t, __builtin_neon_vget_lane_bf16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_bf16(__p0, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_bf16(__p0, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_bf16(__p0, 11)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_bf16(__p0, 11)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_dup_bf16(__p0, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_dup_bf16(__p0, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_dup_bf16(__p0, 11)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_dup_bf16(__p0, 11)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vld1q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 11)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vld1_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 11)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_bf16_x2(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_bf16_x2(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_bf16_x2(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_bf16_x2(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_bf16_x3(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_bf16_x3(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_bf16_x3(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_bf16_x3(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_bf16_x4(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_bf16_x4(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_bf16_x4(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_bf16_x4(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld2_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld2_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x2_t __ret; \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vld2q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x2_t __ret; \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
bfloat16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vld2_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld3_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld3_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x3_t __ret; \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
bfloat16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vld3q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x3_t __ret; \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
bfloat16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vld3_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld4_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld4_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_bf16(__p0) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 43); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x4_t __ret; \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
bfloat16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vld4q_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 43); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 11); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x4_t __ret; \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
bfloat16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vld4_lane_bf16(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 11); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vsetq_lane_bf16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vsetq_lane_bf16(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vsetq_lane_bf16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vset_lane_bf16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vset_lane_bf16(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __ret; \
|
|
bfloat16_t __s0 = __p0; \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vset_lane_bf16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1), 43); \
|
|
})
|
|
#else
|
|
#define vst1q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1), 11); \
|
|
})
|
|
#else
|
|
#define vst1_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 43); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __s1 = __p1; \
|
|
bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 11); \
|
|
})
|
|
#else
|
|
#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4_t __s1 = __p1; \
|
|
bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_bf16_x2(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 43); \
|
|
})
|
|
#else
|
|
#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_bf16_x2(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_bf16_x2(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 11); \
|
|
})
|
|
#else
|
|
#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
bfloat16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_bf16_x2(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_bf16_x3(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 43); \
|
|
})
|
|
#else
|
|
#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
bfloat16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_bf16_x3(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_bf16_x3(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 11); \
|
|
})
|
|
#else
|
|
#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
bfloat16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_bf16_x3(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_bf16_x4(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 43); \
|
|
})
|
|
#else
|
|
#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
bfloat16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_bf16_x4(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_bf16_x4(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 11); \
|
|
})
|
|
#else
|
|
#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
bfloat16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_bf16_x4(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 43); \
|
|
})
|
|
#else
|
|
#define vst2q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 11); \
|
|
})
|
|
#else
|
|
#define vst2_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
bfloat16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 43); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x2_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 11); \
|
|
})
|
|
#else
|
|
#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x2_t __s1 = __p1; \
|
|
bfloat16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 43); \
|
|
})
|
|
#else
|
|
#define vst3q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
bfloat16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 11); \
|
|
})
|
|
#else
|
|
#define vst3_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
bfloat16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 43); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x3_t __s1 = __p1; \
|
|
bfloat16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 11); \
|
|
})
|
|
#else
|
|
#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x3_t __s1 = __p1; \
|
|
bfloat16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 43); \
|
|
})
|
|
#else
|
|
#define vst4q_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
bfloat16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 11); \
|
|
})
|
|
#else
|
|
#define vst4_bf16(__p0, __p1) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
bfloat16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 43); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8x4_t __s1 = __p1; \
|
|
bfloat16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_lane_bf16(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 43); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 11); \
|
|
})
|
|
#else
|
|
#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x4x4_t __s1 = __p1; \
|
|
bfloat16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_lane_bf16(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 11); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vdotq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vdotq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vdotq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vdotq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vdotq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("dotprod,neon"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vdotq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vdot_u32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vdot_u32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("dotprod,neon"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vdot_u32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vdot_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vdot_s32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("dotprod,neon"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vdot_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vabdq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vabdq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vabd_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vabd_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vabsq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vabsq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vabs_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vabs_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcageq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcageq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcage_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcage_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcagtq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcagtq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcagt_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcagt_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcaleq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcaleq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcale_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcale_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcaltq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcaltq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcalt_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcalt_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgezq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgezq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgez_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgez_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgtzq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgtzq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgtz_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgtz_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclezq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclezq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclez_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclez_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcltzq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcltzq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcltz_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcltz_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_f16_u16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_f16_u16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_f16_s16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_f16_s16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_u16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_u16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_s16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_s16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_n_f16_u16(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_n_f16_u16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_n_f16_s16(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvtq_n_f16_s16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_n_f16_u16(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_n_f16_u16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_n_f16_s16(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_n_f16_s16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtq_n_s16_f16(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtq_n_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvt_n_s16_f16(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvt_n_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtq_n_u16_f16(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtq_n_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvt_n_u16_f16(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvt_n_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtq_s16_f16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtq_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvt_s16_f16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvt_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtq_u16_f16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtq_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvt_u16_f16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvt_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtaq_s16_f16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtaq_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvta_s16_f16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvta_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtaq_u16_f16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtaq_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvta_u16_f16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvta_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtmq_s16_f16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtmq_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtm_s16_f16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtm_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtmq_u16_f16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtmq_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtm_u16_f16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtm_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtnq_s16_f16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtnq_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtn_s16_f16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtn_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtnq_u16_f16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtnq_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtn_u16_f16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtn_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtpq_s16_f16(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vcvtpq_s16_f16(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtp_s16_f16(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcvtp_s16_f16(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtpq_u16_f16(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcvtpq_u16_f16(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtp_u16_f16(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcvtp_u16_f16(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = vfmaq_f16(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = vfma_f16(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmaxq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmaxq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmax_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmax_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vminq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vminq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmin_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmin_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
__ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
__ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmul_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpadd_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpadd_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmax_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmax_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmin_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmin_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrecpeq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrecpeq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrecpe_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrecpe_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrecpsq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrecpsq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrecps_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrecps_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrsqrteq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrsqrteq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrsqrte_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrsqrte_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrsqrtsq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrsqrtsq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrsqrts_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrsqrts_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmmlaq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmmlaq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmmlaq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmmlaq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vusdotq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vusdotq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vusdotq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vusdot_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vusdot_s32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("i8mm,neon"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vusdot_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vusmmlaq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vusmmlaq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 4)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define splat_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 5)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 4)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 6)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 6)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 6)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 5)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 10)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 10)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 10)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 9)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 8)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define splat_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define splat_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 9)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 8)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define splat_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 36)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 38)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 38)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 37)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 36)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 38)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 37)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 42)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 41)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 40)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 42)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 42)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 41)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 40)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 6));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 5));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 5));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 38));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 38));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 37));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 37));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vaddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 & __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 & __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 & ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 & ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 5));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
|
|
poly16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 5));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 37));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
|
|
poly16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 37));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcageq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcageq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcage_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcage_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcagtq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcagtq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcagt_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcagt_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcaleq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcaleq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcale_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcale_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcaltq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcaltq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcalt_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcalt_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclsq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vcls_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vclzq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vclz_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __p0), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __rev0), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __p0), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __rev0), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vcntq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vcnt_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vcreate_p8(__p0) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_p16(__p0) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_u8(__p0) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_u32(__p0) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_u64(__p0) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_u16(__p0) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_s8(__p0) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_f32(__p0) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_s32(__p0) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_s64(__p0) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_s16(__p0) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_f32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_f32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_f32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_f32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_n_f32_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_n_f32_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_n_f32_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvtq_n_f32_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_n_f32_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_n_f32_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_n_f32_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_n_f32_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtq_n_s32_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtq_n_s32_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvt_n_s32_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvt_n_s32_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtq_n_u32_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtq_n_u32_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvt_n_u32_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvt_n_u32_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtq_s32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtq_s32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvt_s32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvt_s32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtq_u32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtq_u32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvt_u32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvt_u32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_p8(__p0_8, __p1_8) __extension__ ({ \
|
|
poly8x8_t __ret_8; \
|
|
poly8x8_t __s0_8 = __p0_8; \
|
|
__ret_8 = splat_lane_p8(__s0_8, __p1_8); \
|
|
__ret_8; \
|
|
})
|
|
#else
|
|
#define vdup_lane_p8(__p0_9, __p1_9) __extension__ ({ \
|
|
poly8x8_t __ret_9; \
|
|
poly8x8_t __s0_9 = __p0_9; \
|
|
poly8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, __lane_reverse_64_8); \
|
|
__ret_9 = __noswap_splat_lane_p8(__rev0_9, __p1_9); \
|
|
__ret_9 = __builtin_shufflevector(__ret_9, __ret_9, __lane_reverse_64_8); \
|
|
__ret_9; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_p16(__p0_10, __p1_10) __extension__ ({ \
|
|
poly16x4_t __ret_10; \
|
|
poly16x4_t __s0_10 = __p0_10; \
|
|
__ret_10 = splat_lane_p16(__s0_10, __p1_10); \
|
|
__ret_10; \
|
|
})
|
|
#else
|
|
#define vdup_lane_p16(__p0_11, __p1_11) __extension__ ({ \
|
|
poly16x4_t __ret_11; \
|
|
poly16x4_t __s0_11 = __p0_11; \
|
|
poly16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, __lane_reverse_64_16); \
|
|
__ret_11 = __noswap_splat_lane_p16(__rev0_11, __p1_11); \
|
|
__ret_11 = __builtin_shufflevector(__ret_11, __ret_11, __lane_reverse_64_16); \
|
|
__ret_11; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_p8(__p0_12, __p1_12) __extension__ ({ \
|
|
poly8x16_t __ret_12; \
|
|
poly8x8_t __s0_12 = __p0_12; \
|
|
__ret_12 = splatq_lane_p8(__s0_12, __p1_12); \
|
|
__ret_12; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_p8(__p0_13, __p1_13) __extension__ ({ \
|
|
poly8x16_t __ret_13; \
|
|
poly8x8_t __s0_13 = __p0_13; \
|
|
poly8x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, __lane_reverse_64_8); \
|
|
__ret_13 = __noswap_splatq_lane_p8(__rev0_13, __p1_13); \
|
|
__ret_13 = __builtin_shufflevector(__ret_13, __ret_13, __lane_reverse_128_8); \
|
|
__ret_13; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_p16(__p0_14, __p1_14) __extension__ ({ \
|
|
poly16x8_t __ret_14; \
|
|
poly16x4_t __s0_14 = __p0_14; \
|
|
__ret_14 = splatq_lane_p16(__s0_14, __p1_14); \
|
|
__ret_14; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_p16(__p0_15, __p1_15) __extension__ ({ \
|
|
poly16x8_t __ret_15; \
|
|
poly16x4_t __s0_15 = __p0_15; \
|
|
poly16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, __lane_reverse_64_16); \
|
|
__ret_15 = __noswap_splatq_lane_p16(__rev0_15, __p1_15); \
|
|
__ret_15 = __builtin_shufflevector(__ret_15, __ret_15, __lane_reverse_128_16); \
|
|
__ret_15; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_u8(__p0_16, __p1_16) __extension__ ({ \
|
|
uint8x16_t __ret_16; \
|
|
uint8x8_t __s0_16 = __p0_16; \
|
|
__ret_16 = splatq_lane_u8(__s0_16, __p1_16); \
|
|
__ret_16; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_u8(__p0_17, __p1_17) __extension__ ({ \
|
|
uint8x16_t __ret_17; \
|
|
uint8x8_t __s0_17 = __p0_17; \
|
|
uint8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, __lane_reverse_64_8); \
|
|
__ret_17 = __noswap_splatq_lane_u8(__rev0_17, __p1_17); \
|
|
__ret_17 = __builtin_shufflevector(__ret_17, __ret_17, __lane_reverse_128_8); \
|
|
__ret_17; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_u32(__p0_18, __p1_18) __extension__ ({ \
|
|
uint32x4_t __ret_18; \
|
|
uint32x2_t __s0_18 = __p0_18; \
|
|
__ret_18 = splatq_lane_u32(__s0_18, __p1_18); \
|
|
__ret_18; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_u32(__p0_19, __p1_19) __extension__ ({ \
|
|
uint32x4_t __ret_19; \
|
|
uint32x2_t __s0_19 = __p0_19; \
|
|
uint32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, __lane_reverse_64_32); \
|
|
__ret_19 = __noswap_splatq_lane_u32(__rev0_19, __p1_19); \
|
|
__ret_19 = __builtin_shufflevector(__ret_19, __ret_19, __lane_reverse_128_32); \
|
|
__ret_19; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_u64(__p0_20, __p1_20) __extension__ ({ \
|
|
uint64x2_t __ret_20; \
|
|
uint64x1_t __s0_20 = __p0_20; \
|
|
__ret_20 = splatq_lane_u64(__s0_20, __p1_20); \
|
|
__ret_20; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_u64(__p0_21, __p1_21) __extension__ ({ \
|
|
uint64x2_t __ret_21; \
|
|
uint64x1_t __s0_21 = __p0_21; \
|
|
__ret_21 = __noswap_splatq_lane_u64(__s0_21, __p1_21); \
|
|
__ret_21 = __builtin_shufflevector(__ret_21, __ret_21, __lane_reverse_128_64); \
|
|
__ret_21; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_u16(__p0_22, __p1_22) __extension__ ({ \
|
|
uint16x8_t __ret_22; \
|
|
uint16x4_t __s0_22 = __p0_22; \
|
|
__ret_22 = splatq_lane_u16(__s0_22, __p1_22); \
|
|
__ret_22; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_u16(__p0_23, __p1_23) __extension__ ({ \
|
|
uint16x8_t __ret_23; \
|
|
uint16x4_t __s0_23 = __p0_23; \
|
|
uint16x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, __lane_reverse_64_16); \
|
|
__ret_23 = __noswap_splatq_lane_u16(__rev0_23, __p1_23); \
|
|
__ret_23 = __builtin_shufflevector(__ret_23, __ret_23, __lane_reverse_128_16); \
|
|
__ret_23; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_s8(__p0_24, __p1_24) __extension__ ({ \
|
|
int8x16_t __ret_24; \
|
|
int8x8_t __s0_24 = __p0_24; \
|
|
__ret_24 = splatq_lane_s8(__s0_24, __p1_24); \
|
|
__ret_24; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_s8(__p0_25, __p1_25) __extension__ ({ \
|
|
int8x16_t __ret_25; \
|
|
int8x8_t __s0_25 = __p0_25; \
|
|
int8x8_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, __lane_reverse_64_8); \
|
|
__ret_25 = __noswap_splatq_lane_s8(__rev0_25, __p1_25); \
|
|
__ret_25 = __builtin_shufflevector(__ret_25, __ret_25, __lane_reverse_128_8); \
|
|
__ret_25; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_f32(__p0_26, __p1_26) __extension__ ({ \
|
|
float32x4_t __ret_26; \
|
|
float32x2_t __s0_26 = __p0_26; \
|
|
__ret_26 = splatq_lane_f32(__s0_26, __p1_26); \
|
|
__ret_26; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_f32(__p0_27, __p1_27) __extension__ ({ \
|
|
float32x4_t __ret_27; \
|
|
float32x2_t __s0_27 = __p0_27; \
|
|
float32x2_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, __lane_reverse_64_32); \
|
|
__ret_27 = __noswap_splatq_lane_f32(__rev0_27, __p1_27); \
|
|
__ret_27 = __builtin_shufflevector(__ret_27, __ret_27, __lane_reverse_128_32); \
|
|
__ret_27; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_f16(__p0_28, __p1_28) __extension__ ({ \
|
|
float16x8_t __ret_28; \
|
|
float16x4_t __s0_28 = __p0_28; \
|
|
__ret_28 = splatq_lane_f16(__s0_28, __p1_28); \
|
|
__ret_28; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_f16(__p0_29, __p1_29) __extension__ ({ \
|
|
float16x8_t __ret_29; \
|
|
float16x4_t __s0_29 = __p0_29; \
|
|
float16x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, __lane_reverse_64_16); \
|
|
__ret_29 = __noswap_splatq_lane_f16(__rev0_29, __p1_29); \
|
|
__ret_29 = __builtin_shufflevector(__ret_29, __ret_29, __lane_reverse_128_16); \
|
|
__ret_29; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_s32(__p0_30, __p1_30) __extension__ ({ \
|
|
int32x4_t __ret_30; \
|
|
int32x2_t __s0_30 = __p0_30; \
|
|
__ret_30 = splatq_lane_s32(__s0_30, __p1_30); \
|
|
__ret_30; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_s32(__p0_31, __p1_31) __extension__ ({ \
|
|
int32x4_t __ret_31; \
|
|
int32x2_t __s0_31 = __p0_31; \
|
|
int32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, __lane_reverse_64_32); \
|
|
__ret_31 = __noswap_splatq_lane_s32(__rev0_31, __p1_31); \
|
|
__ret_31 = __builtin_shufflevector(__ret_31, __ret_31, __lane_reverse_128_32); \
|
|
__ret_31; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_s64(__p0_32, __p1_32) __extension__ ({ \
|
|
int64x2_t __ret_32; \
|
|
int64x1_t __s0_32 = __p0_32; \
|
|
__ret_32 = splatq_lane_s64(__s0_32, __p1_32); \
|
|
__ret_32; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_s64(__p0_33, __p1_33) __extension__ ({ \
|
|
int64x2_t __ret_33; \
|
|
int64x1_t __s0_33 = __p0_33; \
|
|
__ret_33 = __noswap_splatq_lane_s64(__s0_33, __p1_33); \
|
|
__ret_33 = __builtin_shufflevector(__ret_33, __ret_33, __lane_reverse_128_64); \
|
|
__ret_33; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_s16(__p0_34, __p1_34) __extension__ ({ \
|
|
int16x8_t __ret_34; \
|
|
int16x4_t __s0_34 = __p0_34; \
|
|
__ret_34 = splatq_lane_s16(__s0_34, __p1_34); \
|
|
__ret_34; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_s16(__p0_35, __p1_35) __extension__ ({ \
|
|
int16x8_t __ret_35; \
|
|
int16x4_t __s0_35 = __p0_35; \
|
|
int16x4_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, __lane_reverse_64_16); \
|
|
__ret_35 = __noswap_splatq_lane_s16(__rev0_35, __p1_35); \
|
|
__ret_35 = __builtin_shufflevector(__ret_35, __ret_35, __lane_reverse_128_16); \
|
|
__ret_35; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_u8(__p0_36, __p1_36) __extension__ ({ \
|
|
uint8x8_t __ret_36; \
|
|
uint8x8_t __s0_36 = __p0_36; \
|
|
__ret_36 = splat_lane_u8(__s0_36, __p1_36); \
|
|
__ret_36; \
|
|
})
|
|
#else
|
|
#define vdup_lane_u8(__p0_37, __p1_37) __extension__ ({ \
|
|
uint8x8_t __ret_37; \
|
|
uint8x8_t __s0_37 = __p0_37; \
|
|
uint8x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, __lane_reverse_64_8); \
|
|
__ret_37 = __noswap_splat_lane_u8(__rev0_37, __p1_37); \
|
|
__ret_37 = __builtin_shufflevector(__ret_37, __ret_37, __lane_reverse_64_8); \
|
|
__ret_37; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_u32(__p0_38, __p1_38) __extension__ ({ \
|
|
uint32x2_t __ret_38; \
|
|
uint32x2_t __s0_38 = __p0_38; \
|
|
__ret_38 = splat_lane_u32(__s0_38, __p1_38); \
|
|
__ret_38; \
|
|
})
|
|
#else
|
|
#define vdup_lane_u32(__p0_39, __p1_39) __extension__ ({ \
|
|
uint32x2_t __ret_39; \
|
|
uint32x2_t __s0_39 = __p0_39; \
|
|
uint32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, __lane_reverse_64_32); \
|
|
__ret_39 = __noswap_splat_lane_u32(__rev0_39, __p1_39); \
|
|
__ret_39 = __builtin_shufflevector(__ret_39, __ret_39, __lane_reverse_64_32); \
|
|
__ret_39; \
|
|
})
|
|
#endif
|
|
|
|
#define vdup_lane_u64(__p0_40, __p1_40) __extension__ ({ \
|
|
uint64x1_t __ret_40; \
|
|
uint64x1_t __s0_40 = __p0_40; \
|
|
__ret_40 = splat_lane_u64(__s0_40, __p1_40); \
|
|
__ret_40; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_u16(__p0_41, __p1_41) __extension__ ({ \
|
|
uint16x4_t __ret_41; \
|
|
uint16x4_t __s0_41 = __p0_41; \
|
|
__ret_41 = splat_lane_u16(__s0_41, __p1_41); \
|
|
__ret_41; \
|
|
})
|
|
#else
|
|
#define vdup_lane_u16(__p0_42, __p1_42) __extension__ ({ \
|
|
uint16x4_t __ret_42; \
|
|
uint16x4_t __s0_42 = __p0_42; \
|
|
uint16x4_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, __lane_reverse_64_16); \
|
|
__ret_42 = __noswap_splat_lane_u16(__rev0_42, __p1_42); \
|
|
__ret_42 = __builtin_shufflevector(__ret_42, __ret_42, __lane_reverse_64_16); \
|
|
__ret_42; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_s8(__p0_43, __p1_43) __extension__ ({ \
|
|
int8x8_t __ret_43; \
|
|
int8x8_t __s0_43 = __p0_43; \
|
|
__ret_43 = splat_lane_s8(__s0_43, __p1_43); \
|
|
__ret_43; \
|
|
})
|
|
#else
|
|
#define vdup_lane_s8(__p0_44, __p1_44) __extension__ ({ \
|
|
int8x8_t __ret_44; \
|
|
int8x8_t __s0_44 = __p0_44; \
|
|
int8x8_t __rev0_44; __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, __lane_reverse_64_8); \
|
|
__ret_44 = __noswap_splat_lane_s8(__rev0_44, __p1_44); \
|
|
__ret_44 = __builtin_shufflevector(__ret_44, __ret_44, __lane_reverse_64_8); \
|
|
__ret_44; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_f32(__p0_45, __p1_45) __extension__ ({ \
|
|
float32x2_t __ret_45; \
|
|
float32x2_t __s0_45 = __p0_45; \
|
|
__ret_45 = splat_lane_f32(__s0_45, __p1_45); \
|
|
__ret_45; \
|
|
})
|
|
#else
|
|
#define vdup_lane_f32(__p0_46, __p1_46) __extension__ ({ \
|
|
float32x2_t __ret_46; \
|
|
float32x2_t __s0_46 = __p0_46; \
|
|
float32x2_t __rev0_46; __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, __lane_reverse_64_32); \
|
|
__ret_46 = __noswap_splat_lane_f32(__rev0_46, __p1_46); \
|
|
__ret_46 = __builtin_shufflevector(__ret_46, __ret_46, __lane_reverse_64_32); \
|
|
__ret_46; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_f16(__p0_47, __p1_47) __extension__ ({ \
|
|
float16x4_t __ret_47; \
|
|
float16x4_t __s0_47 = __p0_47; \
|
|
__ret_47 = splat_lane_f16(__s0_47, __p1_47); \
|
|
__ret_47; \
|
|
})
|
|
#else
|
|
#define vdup_lane_f16(__p0_48, __p1_48) __extension__ ({ \
|
|
float16x4_t __ret_48; \
|
|
float16x4_t __s0_48 = __p0_48; \
|
|
float16x4_t __rev0_48; __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, __lane_reverse_64_16); \
|
|
__ret_48 = __noswap_splat_lane_f16(__rev0_48, __p1_48); \
|
|
__ret_48 = __builtin_shufflevector(__ret_48, __ret_48, __lane_reverse_64_16); \
|
|
__ret_48; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_s32(__p0_49, __p1_49) __extension__ ({ \
|
|
int32x2_t __ret_49; \
|
|
int32x2_t __s0_49 = __p0_49; \
|
|
__ret_49 = splat_lane_s32(__s0_49, __p1_49); \
|
|
__ret_49; \
|
|
})
|
|
#else
|
|
#define vdup_lane_s32(__p0_50, __p1_50) __extension__ ({ \
|
|
int32x2_t __ret_50; \
|
|
int32x2_t __s0_50 = __p0_50; \
|
|
int32x2_t __rev0_50; __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, __lane_reverse_64_32); \
|
|
__ret_50 = __noswap_splat_lane_s32(__rev0_50, __p1_50); \
|
|
__ret_50 = __builtin_shufflevector(__ret_50, __ret_50, __lane_reverse_64_32); \
|
|
__ret_50; \
|
|
})
|
|
#endif
|
|
|
|
#define vdup_lane_s64(__p0_51, __p1_51) __extension__ ({ \
|
|
int64x1_t __ret_51; \
|
|
int64x1_t __s0_51 = __p0_51; \
|
|
__ret_51 = splat_lane_s64(__s0_51, __p1_51); \
|
|
__ret_51; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_s16(__p0_52, __p1_52) __extension__ ({ \
|
|
int16x4_t __ret_52; \
|
|
int16x4_t __s0_52 = __p0_52; \
|
|
__ret_52 = splat_lane_s16(__s0_52, __p1_52); \
|
|
__ret_52; \
|
|
})
|
|
#else
|
|
#define vdup_lane_s16(__p0_53, __p1_53) __extension__ ({ \
|
|
int16x4_t __ret_53; \
|
|
int16x4_t __s0_53 = __p0_53; \
|
|
int16x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, __lane_reverse_64_16); \
|
|
__ret_53 = __noswap_splat_lane_s16(__rev0_53, __p1_53); \
|
|
__ret_53 = __builtin_shufflevector(__ret_53, __ret_53, __lane_reverse_64_16); \
|
|
__ret_53; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = (uint64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = (uint64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = (float32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = (float32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_n_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupq_n_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = (int32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = (int32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = (int64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = (int64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = (uint32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = (uint32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vdup_n_u64(uint64_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = (uint64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = (float32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = (float32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_n_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdup_n_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = (int32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = (int32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vdup_n_s64(int64_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = (int64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = (int16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = (int16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 ^ __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 ^ __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 2, 3);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) {
|
|
int64x1_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vget_lane_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vget_lane_i8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vget_lane_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vget_lane_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vget_lane_i16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vget_lane_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vgetq_lane_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vgetq_lane_i8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vgetq_lane_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vgetq_lane_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vgetq_lane_i16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vgetq_lane_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vgetq_lane_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vgetq_lane_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vgetq_lane_f32(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vgetq_lane_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vgetq_lane_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vgetq_lane_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vgetq_lane_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vget_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vget_lane_i64(__builtin_bit_cast(int64x1_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vget_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vget_lane_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vget_lane_f32(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vget_lane_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vget_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vget_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vget_lane_i64(__builtin_bit_cast(int64x1_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vget_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) {
|
|
int64x1_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vhsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vhsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p8(__p0) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_v(__p0, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p8(__p0) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_v(__p0, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p16(__p0) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_v(__p0, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p16(__p0) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_v(__p0, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p8(__p0) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_v(__p0, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p8(__p0) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_v(__p0, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p16(__p0) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_v(__p0, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p16(__p0) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_v(__p0, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u8(__p0) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_v(__p0, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u8(__p0) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_v(__p0, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u32(__p0) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_v(__p0, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u32(__p0) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_v(__p0, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u64(__p0) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_v(__p0, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u64(__p0) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_v(__p0, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u16(__p0) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_v(__p0, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u16(__p0) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_v(__p0, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s8(__p0) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_v(__p0, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s8(__p0) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_v(__p0, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f32(__p0) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_v(__p0, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f32(__p0) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_v(__p0, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s32(__p0) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_v(__p0, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s32(__p0) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_v(__p0, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s64(__p0) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_v(__p0, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s64(__p0) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_v(__p0, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s16(__p0) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_v(__p0, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s16(__p0) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_v(__p0, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u8(__p0) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_v(__p0, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u8(__p0) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_v(__p0, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u32(__p0) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_v(__p0, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u32(__p0) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_v(__p0, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_u64(__p0) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vld1_v(__p0, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u16(__p0) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_v(__p0, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u16(__p0) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_v(__p0, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s8(__p0) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_v(__p0, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s8(__p0) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_v(__p0, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f32(__p0) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_v(__p0, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f32(__p0) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_v(__p0, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s32(__p0) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_v(__p0, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s32(__p0) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_v(__p0, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_s64(__p0) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vld1_v(__p0, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s16(__p0) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_v(__p0, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s16(__p0) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_v(__p0, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_dup_v(__p0, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_dup_v(__p0, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_dup_v(__p0, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_dup_v(__p0, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_dup_v(__p0, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_dup_v(__p0, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_dup_v(__p0, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_dup_v(__p0, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_dup_v(__p0, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_dup_v(__p0, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_dup_v(__p0, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_dup_v(__p0, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_dup_v(__p0, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_dup_v(__p0, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_dup_v(__p0, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_dup_v(__p0, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_dup_v(__p0, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_dup_v(__p0, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_dup_v(__p0, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_dup_v(__p0, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_dup_v(__p0, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_dup_v(__p0, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_dup_v(__p0, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_dup_v(__p0, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_dup_v(__p0, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_dup_v(__p0, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_dup_v(__p0, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_dup_v(__p0, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_dup_v(__p0, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_dup_v(__p0, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_dup_u64(__p0) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vld1_dup_v(__p0, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_dup_v(__p0, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_dup_v(__p0, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_s8(__p0) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_dup_v(__p0, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_s8(__p0) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_dup_v(__p0, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_f32(__p0) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_dup_v(__p0, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_f32(__p0) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_dup_v(__p0, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_s32(__p0) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_dup_v(__p0, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_s32(__p0) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_dup_v(__p0, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_dup_s64(__p0) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vld1_dup_v(__p0, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_s16(__p0) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_dup_v(__p0, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_s16(__p0) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_dup_v(__p0, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p8_x2(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p8_x2(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p16_x2(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p16_x2(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p8_x2(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p8_x2(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p16_x2(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p16_x2(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u8_x2(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u8_x2(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u32_x2(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u32_x2(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u64_x2(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u64_x2(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u16_x2(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u16_x2(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s8_x2(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s8_x2(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f32_x2(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f32_x2(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s32_x2(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s32_x2(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s64_x2(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s64_x2(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s16_x2(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s16_x2(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u8_x2(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u8_x2(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u32_x2(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u32_x2(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_u64_x2(__p0) __extension__ ({ \
|
|
uint64x1x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u16_x2(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u16_x2(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s8_x2(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s8_x2(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f32_x2(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f32_x2(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s32_x2(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s32_x2(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_s64_x2(__p0) __extension__ ({ \
|
|
int64x1x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s16_x2(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s16_x2(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p8_x3(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p8_x3(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p16_x3(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p16_x3(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p8_x3(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p8_x3(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p16_x3(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p16_x3(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u8_x3(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u8_x3(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u32_x3(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u32_x3(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u64_x3(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u64_x3(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u16_x3(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u16_x3(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s8_x3(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s8_x3(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f32_x3(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f32_x3(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s32_x3(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s32_x3(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s64_x3(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s64_x3(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s16_x3(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s16_x3(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u8_x3(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u8_x3(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u32_x3(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u32_x3(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_u64_x3(__p0) __extension__ ({ \
|
|
uint64x1x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u16_x3(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u16_x3(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s8_x3(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s8_x3(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f32_x3(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f32_x3(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s32_x3(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s32_x3(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_s64_x3(__p0) __extension__ ({ \
|
|
int64x1x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s16_x3(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s16_x3(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p8_x4(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p8_x4(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_p16_x4(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_p16_x4(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p8_x4(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p8_x4(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p16_x4(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p16_x4(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u8_x4(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u8_x4(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u32_x4(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u32_x4(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u64_x4(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u64_x4(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_u16_x4(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_u16_x4(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s8_x4(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s8_x4(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f32_x4(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f32_x4(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s32_x4(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s32_x4(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s64_x4(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s64_x4(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_s16_x4(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_s16_x4(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u8_x4(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u8_x4(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u32_x4(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u32_x4(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_u64_x4(__p0) __extension__ ({ \
|
|
uint64x1x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_u16_x4(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_u16_x4(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s8_x4(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s8_x4(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f32_x4(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f32_x4(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s32_x4(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s32_x4(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_s64_x4(__p0) __extension__ ({ \
|
|
int64x1x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_s16_x4(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_s16_x4(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_p8(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_p8(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_p16(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_p16(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_p8(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_p8(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_p16(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_p16(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_u8(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_u8(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_u32(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_u32(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_u16(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_u16(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_s8(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_s8(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_f32(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_f32(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_s32(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_s32(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_s16(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_s16(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_u8(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_u8(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_u32(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_u32(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_u64(__p0) __extension__ ({ \
|
|
uint64x1x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_u16(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_u16(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_s8(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_s8(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_f32(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_f32(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_s32(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_s32(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_s64(__p0) __extension__ ({ \
|
|
int64x1x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_s16(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_s16(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_dup_u64(__p0) __extension__ ({ \
|
|
uint64x1x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_dup_s64(__p0) __extension__ ({ \
|
|
int64x1x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x2_t __ret; \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
poly8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x2_t __ret; \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
poly16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x2_t __ret; \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
poly16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x2_t __ret; \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
uint32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x2_t __ret; \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
uint16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
float32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x2_t __ret; \
|
|
float32x4x2_t __s1 = __p1; \
|
|
float32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
int32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x2_t __ret; \
|
|
int32x4x2_t __s1 = __p1; \
|
|
int32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
int16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x2_t __ret; \
|
|
int16x8x2_t __s1 = __p1; \
|
|
int16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x2_t __ret; \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
uint8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x2_t __ret; \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
uint32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x2_t __ret; \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
uint16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
int8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x2_t __ret; \
|
|
int8x8x2_t __s1 = __p1; \
|
|
int8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
float32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x2_t __ret; \
|
|
float32x2x2_t __s1 = __p1; \
|
|
float32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
int32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x2_t __ret; \
|
|
int32x2x2_t __s1 = __p1; \
|
|
int32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
int16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x2_t __ret; \
|
|
int16x4x2_t __s1 = __p1; \
|
|
int16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_p8(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_p8(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_p16(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_p16(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_p8(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_p8(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_p16(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_p16(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_u8(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_u8(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_u32(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_u32(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_u16(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_u16(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_s8(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_s8(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_f32(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_f32(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_s32(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_s32(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_s16(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_s16(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_u8(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_u8(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_u32(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_u32(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_u64(__p0) __extension__ ({ \
|
|
uint64x1x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_u16(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_u16(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_s8(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_s8(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_f32(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_f32(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_s32(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_s32(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_s64(__p0) __extension__ ({ \
|
|
int64x1x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_s16(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_s16(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_dup_u64(__p0) __extension__ ({ \
|
|
uint64x1x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_dup_s64(__p0) __extension__ ({ \
|
|
int64x1x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x3_t __ret; \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
poly8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x3_t __ret; \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
poly16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x3_t __ret; \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
poly16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x3_t __ret; \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
uint32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x3_t __ret; \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
uint16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
float32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x3_t __ret; \
|
|
float32x4x3_t __s1 = __p1; \
|
|
float32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
int32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x3_t __ret; \
|
|
int32x4x3_t __s1 = __p1; \
|
|
int32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
int16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x3_t __ret; \
|
|
int16x8x3_t __s1 = __p1; \
|
|
int16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x3_t __ret; \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
uint8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x3_t __ret; \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
uint32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x3_t __ret; \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
uint16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
int8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x3_t __ret; \
|
|
int8x8x3_t __s1 = __p1; \
|
|
int8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
float32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x3_t __ret; \
|
|
float32x2x3_t __s1 = __p1; \
|
|
float32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
int32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x3_t __ret; \
|
|
int32x2x3_t __s1 = __p1; \
|
|
int32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
int16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x3_t __ret; \
|
|
int16x4x3_t __s1 = __p1; \
|
|
int16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_p8(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_p8(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_p16(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_p16(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_p8(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_p8(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_p16(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_p16(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_u8(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_u8(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_u32(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_u32(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_u16(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_u16(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_s8(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_s8(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_f32(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_f32(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_s32(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_s32(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_s16(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_s16(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_u8(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_u8(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_u32(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_u32(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_u64(__p0) __extension__ ({ \
|
|
uint64x1x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_u16(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_u16(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_s8(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_s8(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_f32(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_f32(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_s32(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_s32(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_s64(__p0) __extension__ ({ \
|
|
int64x1x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_s16(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_s16(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_p8(__p0) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_p16(__p0) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_p8(__p0) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_p16(__p0) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_u8(__p0) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_u32(__p0) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_u64(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_u16(__p0) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_s8(__p0) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_f32(__p0) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_s32(__p0) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_s64(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_s16(__p0) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_u8(__p0) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_u32(__p0) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_dup_u64(__p0) __extension__ ({ \
|
|
uint64x1x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_u16(__p0) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_s8(__p0) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_f32(__p0) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_s32(__p0) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_dup_s64(__p0) __extension__ ({ \
|
|
int64x1x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_s16(__p0) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 4); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x4_t __ret; \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
poly8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 4); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 5); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x4_t __ret; \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
poly16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 5); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 37); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x4_t __ret; \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
poly16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 37); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 50); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x4_t __ret; \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
uint32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 50); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 49); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x4_t __ret; \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
uint16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 49); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
float32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 41); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x4_t __ret; \
|
|
float32x4x4_t __s1 = __p1; \
|
|
float32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 41); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
int32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 34); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x4_t __ret; \
|
|
int32x4x4_t __s1 = __p1; \
|
|
int32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 34); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
int16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 33); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x4_t __ret; \
|
|
int16x8x4_t __s1 = __p1; \
|
|
int16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 33); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 16); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x4_t __ret; \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
uint8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 16); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 18); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x4_t __ret; \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
uint32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 18); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 17); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x4_t __ret; \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
uint16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 17); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
int8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 0); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x4_t __ret; \
|
|
int8x8x4_t __s1 = __p1; \
|
|
int8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 0); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
float32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 9); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x4_t __ret; \
|
|
float32x2x4_t __s1 = __p1; \
|
|
float32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 9); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
int32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 2); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x4_t __ret; \
|
|
int32x2x4_t __s1 = __p1; \
|
|
int32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 2); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_32); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
int16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 1); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x4_t __ret; \
|
|
int16x4x4_t __s1 = __p1; \
|
|
int16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 1); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_lane_u32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
|
|
uint32x4_t __ret_54; \
|
|
uint32x4_t __s0_54 = __p0_54; \
|
|
uint32x4_t __s1_54 = __p1_54; \
|
|
uint32x2_t __s2_54 = __p2_54; \
|
|
__ret_54 = __s0_54 + __s1_54 * splatq_lane_u32(__s2_54, __p3_54); \
|
|
__ret_54; \
|
|
})
|
|
#else
|
|
#define vmlaq_lane_u32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
|
|
uint32x4_t __ret_55; \
|
|
uint32x4_t __s0_55 = __p0_55; \
|
|
uint32x4_t __s1_55 = __p1_55; \
|
|
uint32x2_t __s2_55 = __p2_55; \
|
|
uint32x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, __lane_reverse_128_32); \
|
|
uint32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, __lane_reverse_64_32); \
|
|
__ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_u32(__rev2_55, __p3_55); \
|
|
__ret_55 = __builtin_shufflevector(__ret_55, __ret_55, __lane_reverse_128_32); \
|
|
__ret_55; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_lane_u16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
|
|
uint16x8_t __ret_56; \
|
|
uint16x8_t __s0_56 = __p0_56; \
|
|
uint16x8_t __s1_56 = __p1_56; \
|
|
uint16x4_t __s2_56 = __p2_56; \
|
|
__ret_56 = __s0_56 + __s1_56 * splatq_lane_u16(__s2_56, __p3_56); \
|
|
__ret_56; \
|
|
})
|
|
#else
|
|
#define vmlaq_lane_u16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
|
|
uint16x8_t __ret_57; \
|
|
uint16x8_t __s0_57 = __p0_57; \
|
|
uint16x8_t __s1_57 = __p1_57; \
|
|
uint16x4_t __s2_57 = __p2_57; \
|
|
uint16x8_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, __lane_reverse_128_16); \
|
|
uint16x4_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, __lane_reverse_64_16); \
|
|
__ret_57 = __rev0_57 + __rev1_57 * __noswap_splatq_lane_u16(__rev2_57, __p3_57); \
|
|
__ret_57 = __builtin_shufflevector(__ret_57, __ret_57, __lane_reverse_128_16); \
|
|
__ret_57; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_lane_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
|
|
float32x4_t __ret_58; \
|
|
float32x4_t __s0_58 = __p0_58; \
|
|
float32x4_t __s1_58 = __p1_58; \
|
|
float32x2_t __s2_58 = __p2_58; \
|
|
__ret_58 = __s0_58 + __s1_58 * splatq_lane_f32(__s2_58, __p3_58); \
|
|
__ret_58; \
|
|
})
|
|
#else
|
|
#define vmlaq_lane_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
|
|
float32x4_t __ret_59; \
|
|
float32x4_t __s0_59 = __p0_59; \
|
|
float32x4_t __s1_59 = __p1_59; \
|
|
float32x2_t __s2_59 = __p2_59; \
|
|
float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, __lane_reverse_64_32); \
|
|
__ret_59 = __rev0_59 + __rev1_59 * __noswap_splatq_lane_f32(__rev2_59, __p3_59); \
|
|
__ret_59 = __builtin_shufflevector(__ret_59, __ret_59, __lane_reverse_128_32); \
|
|
__ret_59; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_lane_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
|
|
int32x4_t __ret_60; \
|
|
int32x4_t __s0_60 = __p0_60; \
|
|
int32x4_t __s1_60 = __p1_60; \
|
|
int32x2_t __s2_60 = __p2_60; \
|
|
__ret_60 = __s0_60 + __s1_60 * splatq_lane_s32(__s2_60, __p3_60); \
|
|
__ret_60; \
|
|
})
|
|
#else
|
|
#define vmlaq_lane_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
|
|
int32x4_t __ret_61; \
|
|
int32x4_t __s0_61 = __p0_61; \
|
|
int32x4_t __s1_61 = __p1_61; \
|
|
int32x2_t __s2_61 = __p2_61; \
|
|
int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, __lane_reverse_64_32); \
|
|
__ret_61 = __rev0_61 + __rev1_61 * __noswap_splatq_lane_s32(__rev2_61, __p3_61); \
|
|
__ret_61 = __builtin_shufflevector(__ret_61, __ret_61, __lane_reverse_128_32); \
|
|
__ret_61; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_lane_s16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
|
|
int16x8_t __ret_62; \
|
|
int16x8_t __s0_62 = __p0_62; \
|
|
int16x8_t __s1_62 = __p1_62; \
|
|
int16x4_t __s2_62 = __p2_62; \
|
|
__ret_62 = __s0_62 + __s1_62 * splatq_lane_s16(__s2_62, __p3_62); \
|
|
__ret_62; \
|
|
})
|
|
#else
|
|
#define vmlaq_lane_s16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
|
|
int16x8_t __ret_63; \
|
|
int16x8_t __s0_63 = __p0_63; \
|
|
int16x8_t __s1_63 = __p1_63; \
|
|
int16x4_t __s2_63 = __p2_63; \
|
|
int16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, __lane_reverse_64_16); \
|
|
__ret_63 = __rev0_63 + __rev1_63 * __noswap_splatq_lane_s16(__rev2_63, __p3_63); \
|
|
__ret_63 = __builtin_shufflevector(__ret_63, __ret_63, __lane_reverse_128_16); \
|
|
__ret_63; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_lane_u32(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
|
|
uint32x2_t __ret_64; \
|
|
uint32x2_t __s0_64 = __p0_64; \
|
|
uint32x2_t __s1_64 = __p1_64; \
|
|
uint32x2_t __s2_64 = __p2_64; \
|
|
__ret_64 = __s0_64 + __s1_64 * splat_lane_u32(__s2_64, __p3_64); \
|
|
__ret_64; \
|
|
})
|
|
#else
|
|
#define vmla_lane_u32(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
|
|
uint32x2_t __ret_65; \
|
|
uint32x2_t __s0_65 = __p0_65; \
|
|
uint32x2_t __s1_65 = __p1_65; \
|
|
uint32x2_t __s2_65 = __p2_65; \
|
|
uint32x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, __lane_reverse_64_32); \
|
|
uint32x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, __lane_reverse_64_32); \
|
|
__ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_u32(__rev2_65, __p3_65); \
|
|
__ret_65 = __builtin_shufflevector(__ret_65, __ret_65, __lane_reverse_64_32); \
|
|
__ret_65; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_lane_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
|
|
uint16x4_t __ret_66; \
|
|
uint16x4_t __s0_66 = __p0_66; \
|
|
uint16x4_t __s1_66 = __p1_66; \
|
|
uint16x4_t __s2_66 = __p2_66; \
|
|
__ret_66 = __s0_66 + __s1_66 * splat_lane_u16(__s2_66, __p3_66); \
|
|
__ret_66; \
|
|
})
|
|
#else
|
|
#define vmla_lane_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
|
|
uint16x4_t __ret_67; \
|
|
uint16x4_t __s0_67 = __p0_67; \
|
|
uint16x4_t __s1_67 = __p1_67; \
|
|
uint16x4_t __s2_67 = __p2_67; \
|
|
uint16x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, __lane_reverse_64_16); \
|
|
uint16x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, __lane_reverse_64_16); \
|
|
__ret_67 = __rev0_67 + __rev1_67 * __noswap_splat_lane_u16(__rev2_67, __p3_67); \
|
|
__ret_67 = __builtin_shufflevector(__ret_67, __ret_67, __lane_reverse_64_16); \
|
|
__ret_67; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_lane_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
|
|
float32x2_t __ret_68; \
|
|
float32x2_t __s0_68 = __p0_68; \
|
|
float32x2_t __s1_68 = __p1_68; \
|
|
float32x2_t __s2_68 = __p2_68; \
|
|
__ret_68 = __s0_68 + __s1_68 * splat_lane_f32(__s2_68, __p3_68); \
|
|
__ret_68; \
|
|
})
|
|
#else
|
|
#define vmla_lane_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
|
|
float32x2_t __ret_69; \
|
|
float32x2_t __s0_69 = __p0_69; \
|
|
float32x2_t __s1_69 = __p1_69; \
|
|
float32x2_t __s2_69 = __p2_69; \
|
|
float32x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, __lane_reverse_64_32); \
|
|
__ret_69 = __rev0_69 + __rev1_69 * __noswap_splat_lane_f32(__rev2_69, __p3_69); \
|
|
__ret_69 = __builtin_shufflevector(__ret_69, __ret_69, __lane_reverse_64_32); \
|
|
__ret_69; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_lane_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
|
|
int32x2_t __ret_70; \
|
|
int32x2_t __s0_70 = __p0_70; \
|
|
int32x2_t __s1_70 = __p1_70; \
|
|
int32x2_t __s2_70 = __p2_70; \
|
|
__ret_70 = __s0_70 + __s1_70 * splat_lane_s32(__s2_70, __p3_70); \
|
|
__ret_70; \
|
|
})
|
|
#else
|
|
#define vmla_lane_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
|
|
int32x2_t __ret_71; \
|
|
int32x2_t __s0_71 = __p0_71; \
|
|
int32x2_t __s1_71 = __p1_71; \
|
|
int32x2_t __s2_71 = __p2_71; \
|
|
int32x2_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, __lane_reverse_64_32); \
|
|
__ret_71 = __rev0_71 + __rev1_71 * __noswap_splat_lane_s32(__rev2_71, __p3_71); \
|
|
__ret_71 = __builtin_shufflevector(__ret_71, __ret_71, __lane_reverse_64_32); \
|
|
__ret_71; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_lane_s16(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
|
|
int16x4_t __ret_72; \
|
|
int16x4_t __s0_72 = __p0_72; \
|
|
int16x4_t __s1_72 = __p1_72; \
|
|
int16x4_t __s2_72 = __p2_72; \
|
|
__ret_72 = __s0_72 + __s1_72 * splat_lane_s16(__s2_72, __p3_72); \
|
|
__ret_72; \
|
|
})
|
|
#else
|
|
#define vmla_lane_s16(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
|
|
int16x4_t __ret_73; \
|
|
int16x4_t __s0_73 = __p0_73; \
|
|
int16x4_t __s1_73 = __p1_73; \
|
|
int16x4_t __s2_73 = __p2_73; \
|
|
int16x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, __lane_reverse_64_16); \
|
|
__ret_73 = __rev0_73 + __rev1_73 * __noswap_splat_lane_s16(__rev2_73, __p3_73); \
|
|
__ret_73 = __builtin_shufflevector(__ret_73, __ret_73, __lane_reverse_64_16); \
|
|
__ret_73; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_lane_u32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
|
|
uint32x4_t __ret_74; \
|
|
uint32x4_t __s0_74 = __p0_74; \
|
|
uint32x4_t __s1_74 = __p1_74; \
|
|
uint32x2_t __s2_74 = __p2_74; \
|
|
__ret_74 = __s0_74 - __s1_74 * splatq_lane_u32(__s2_74, __p3_74); \
|
|
__ret_74; \
|
|
})
|
|
#else
|
|
#define vmlsq_lane_u32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
|
|
uint32x4_t __ret_75; \
|
|
uint32x4_t __s0_75 = __p0_75; \
|
|
uint32x4_t __s1_75 = __p1_75; \
|
|
uint32x2_t __s2_75 = __p2_75; \
|
|
uint32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, __lane_reverse_128_32); \
|
|
uint32x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, __lane_reverse_64_32); \
|
|
__ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_u32(__rev2_75, __p3_75); \
|
|
__ret_75 = __builtin_shufflevector(__ret_75, __ret_75, __lane_reverse_128_32); \
|
|
__ret_75; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_lane_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
|
|
uint16x8_t __ret_76; \
|
|
uint16x8_t __s0_76 = __p0_76; \
|
|
uint16x8_t __s1_76 = __p1_76; \
|
|
uint16x4_t __s2_76 = __p2_76; \
|
|
__ret_76 = __s0_76 - __s1_76 * splatq_lane_u16(__s2_76, __p3_76); \
|
|
__ret_76; \
|
|
})
|
|
#else
|
|
#define vmlsq_lane_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
|
|
uint16x8_t __ret_77; \
|
|
uint16x8_t __s0_77 = __p0_77; \
|
|
uint16x8_t __s1_77 = __p1_77; \
|
|
uint16x4_t __s2_77 = __p2_77; \
|
|
uint16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, __lane_reverse_128_16); \
|
|
uint16x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, __lane_reverse_64_16); \
|
|
__ret_77 = __rev0_77 - __rev1_77 * __noswap_splatq_lane_u16(__rev2_77, __p3_77); \
|
|
__ret_77 = __builtin_shufflevector(__ret_77, __ret_77, __lane_reverse_128_16); \
|
|
__ret_77; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_lane_f32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
|
|
float32x4_t __ret_78; \
|
|
float32x4_t __s0_78 = __p0_78; \
|
|
float32x4_t __s1_78 = __p1_78; \
|
|
float32x2_t __s2_78 = __p2_78; \
|
|
__ret_78 = __s0_78 - __s1_78 * splatq_lane_f32(__s2_78, __p3_78); \
|
|
__ret_78; \
|
|
})
|
|
#else
|
|
#define vmlsq_lane_f32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
|
|
float32x4_t __ret_79; \
|
|
float32x4_t __s0_79 = __p0_79; \
|
|
float32x4_t __s1_79 = __p1_79; \
|
|
float32x2_t __s2_79 = __p2_79; \
|
|
float32x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, __lane_reverse_64_32); \
|
|
__ret_79 = __rev0_79 - __rev1_79 * __noswap_splatq_lane_f32(__rev2_79, __p3_79); \
|
|
__ret_79 = __builtin_shufflevector(__ret_79, __ret_79, __lane_reverse_128_32); \
|
|
__ret_79; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_lane_s32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
|
|
int32x4_t __ret_80; \
|
|
int32x4_t __s0_80 = __p0_80; \
|
|
int32x4_t __s1_80 = __p1_80; \
|
|
int32x2_t __s2_80 = __p2_80; \
|
|
__ret_80 = __s0_80 - __s1_80 * splatq_lane_s32(__s2_80, __p3_80); \
|
|
__ret_80; \
|
|
})
|
|
#else
|
|
#define vmlsq_lane_s32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
|
|
int32x4_t __ret_81; \
|
|
int32x4_t __s0_81 = __p0_81; \
|
|
int32x4_t __s1_81 = __p1_81; \
|
|
int32x2_t __s2_81 = __p2_81; \
|
|
int32x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, __lane_reverse_64_32); \
|
|
__ret_81 = __rev0_81 - __rev1_81 * __noswap_splatq_lane_s32(__rev2_81, __p3_81); \
|
|
__ret_81 = __builtin_shufflevector(__ret_81, __ret_81, __lane_reverse_128_32); \
|
|
__ret_81; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_lane_s16(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
|
|
int16x8_t __ret_82; \
|
|
int16x8_t __s0_82 = __p0_82; \
|
|
int16x8_t __s1_82 = __p1_82; \
|
|
int16x4_t __s2_82 = __p2_82; \
|
|
__ret_82 = __s0_82 - __s1_82 * splatq_lane_s16(__s2_82, __p3_82); \
|
|
__ret_82; \
|
|
})
|
|
#else
|
|
#define vmlsq_lane_s16(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
|
|
int16x8_t __ret_83; \
|
|
int16x8_t __s0_83 = __p0_83; \
|
|
int16x8_t __s1_83 = __p1_83; \
|
|
int16x4_t __s2_83 = __p2_83; \
|
|
int16x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, __lane_reverse_64_16); \
|
|
__ret_83 = __rev0_83 - __rev1_83 * __noswap_splatq_lane_s16(__rev2_83, __p3_83); \
|
|
__ret_83 = __builtin_shufflevector(__ret_83, __ret_83, __lane_reverse_128_16); \
|
|
__ret_83; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_lane_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
|
|
uint32x2_t __ret_84; \
|
|
uint32x2_t __s0_84 = __p0_84; \
|
|
uint32x2_t __s1_84 = __p1_84; \
|
|
uint32x2_t __s2_84 = __p2_84; \
|
|
__ret_84 = __s0_84 - __s1_84 * splat_lane_u32(__s2_84, __p3_84); \
|
|
__ret_84; \
|
|
})
|
|
#else
|
|
#define vmls_lane_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
|
|
uint32x2_t __ret_85; \
|
|
uint32x2_t __s0_85 = __p0_85; \
|
|
uint32x2_t __s1_85 = __p1_85; \
|
|
uint32x2_t __s2_85 = __p2_85; \
|
|
uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, __lane_reverse_64_32); \
|
|
uint32x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, __lane_reverse_64_32); \
|
|
__ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_u32(__rev2_85, __p3_85); \
|
|
__ret_85 = __builtin_shufflevector(__ret_85, __ret_85, __lane_reverse_64_32); \
|
|
__ret_85; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_lane_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
|
|
uint16x4_t __ret_86; \
|
|
uint16x4_t __s0_86 = __p0_86; \
|
|
uint16x4_t __s1_86 = __p1_86; \
|
|
uint16x4_t __s2_86 = __p2_86; \
|
|
__ret_86 = __s0_86 - __s1_86 * splat_lane_u16(__s2_86, __p3_86); \
|
|
__ret_86; \
|
|
})
|
|
#else
|
|
#define vmls_lane_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
|
|
uint16x4_t __ret_87; \
|
|
uint16x4_t __s0_87 = __p0_87; \
|
|
uint16x4_t __s1_87 = __p1_87; \
|
|
uint16x4_t __s2_87 = __p2_87; \
|
|
uint16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, __lane_reverse_64_16); \
|
|
uint16x4_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, __lane_reverse_64_16); \
|
|
__ret_87 = __rev0_87 - __rev1_87 * __noswap_splat_lane_u16(__rev2_87, __p3_87); \
|
|
__ret_87 = __builtin_shufflevector(__ret_87, __ret_87, __lane_reverse_64_16); \
|
|
__ret_87; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_lane_f32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
|
|
float32x2_t __ret_88; \
|
|
float32x2_t __s0_88 = __p0_88; \
|
|
float32x2_t __s1_88 = __p1_88; \
|
|
float32x2_t __s2_88 = __p2_88; \
|
|
__ret_88 = __s0_88 - __s1_88 * splat_lane_f32(__s2_88, __p3_88); \
|
|
__ret_88; \
|
|
})
|
|
#else
|
|
#define vmls_lane_f32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
|
|
float32x2_t __ret_89; \
|
|
float32x2_t __s0_89 = __p0_89; \
|
|
float32x2_t __s1_89 = __p1_89; \
|
|
float32x2_t __s2_89 = __p2_89; \
|
|
float32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, __lane_reverse_64_32); \
|
|
__ret_89 = __rev0_89 - __rev1_89 * __noswap_splat_lane_f32(__rev2_89, __p3_89); \
|
|
__ret_89 = __builtin_shufflevector(__ret_89, __ret_89, __lane_reverse_64_32); \
|
|
__ret_89; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_lane_s32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
|
|
int32x2_t __ret_90; \
|
|
int32x2_t __s0_90 = __p0_90; \
|
|
int32x2_t __s1_90 = __p1_90; \
|
|
int32x2_t __s2_90 = __p2_90; \
|
|
__ret_90 = __s0_90 - __s1_90 * splat_lane_s32(__s2_90, __p3_90); \
|
|
__ret_90; \
|
|
})
|
|
#else
|
|
#define vmls_lane_s32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
|
|
int32x2_t __ret_91; \
|
|
int32x2_t __s0_91 = __p0_91; \
|
|
int32x2_t __s1_91 = __p1_91; \
|
|
int32x2_t __s2_91 = __p2_91; \
|
|
int32x2_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, __lane_reverse_64_32); \
|
|
__ret_91 = __rev0_91 - __rev1_91 * __noswap_splat_lane_s32(__rev2_91, __p3_91); \
|
|
__ret_91 = __builtin_shufflevector(__ret_91, __ret_91, __lane_reverse_64_32); \
|
|
__ret_91; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_lane_s16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
|
|
int16x4_t __ret_92; \
|
|
int16x4_t __s0_92 = __p0_92; \
|
|
int16x4_t __s1_92 = __p1_92; \
|
|
int16x4_t __s2_92 = __p2_92; \
|
|
__ret_92 = __s0_92 - __s1_92 * splat_lane_s16(__s2_92, __p3_92); \
|
|
__ret_92; \
|
|
})
|
|
#else
|
|
#define vmls_lane_s16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
|
|
int16x4_t __ret_93; \
|
|
int16x4_t __s0_93 = __p0_93; \
|
|
int16x4_t __s1_93 = __p1_93; \
|
|
int16x4_t __s2_93 = __p2_93; \
|
|
int16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, __lane_reverse_64_16); \
|
|
__ret_93 = __rev0_93 - __rev1_93 * __noswap_splat_lane_s16(__rev2_93, __p3_93); \
|
|
__ret_93 = __builtin_shufflevector(__ret_93, __ret_93, __lane_reverse_64_16); \
|
|
__ret_93; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = (uint64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = (uint64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = (float32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = (float32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmovq_n_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmovq_n_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = (int32x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = (int32x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = (int64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = (int64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = (uint32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = (uint32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vmov_n_u64(uint64_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = (uint64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = (float32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = (float32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmov_n_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmov_n_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
__ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = (int32x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = (int32x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vmov_n_s64(int64_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = (int64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = (int16x4_t) {__p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = (int16x4_t) {__p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmovl_v(__builtin_bit_cast(int8x8_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vmovn_v(__builtin_bit_cast(int8x16_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vmul_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vmul_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vmulq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vmulq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_u32(__p0_94, __p1_94, __p2_94) __extension__ ({ \
|
|
uint32x4_t __ret_94; \
|
|
uint32x4_t __s0_94 = __p0_94; \
|
|
uint32x2_t __s1_94 = __p1_94; \
|
|
__ret_94 = __s0_94 * splatq_lane_u32(__s1_94, __p2_94); \
|
|
__ret_94; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_u32(__p0_95, __p1_95, __p2_95) __extension__ ({ \
|
|
uint32x4_t __ret_95; \
|
|
uint32x4_t __s0_95 = __p0_95; \
|
|
uint32x2_t __s1_95 = __p1_95; \
|
|
uint32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, __lane_reverse_128_32); \
|
|
uint32x2_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, __lane_reverse_64_32); \
|
|
__ret_95 = __rev0_95 * __noswap_splatq_lane_u32(__rev1_95, __p2_95); \
|
|
__ret_95 = __builtin_shufflevector(__ret_95, __ret_95, __lane_reverse_128_32); \
|
|
__ret_95; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_u16(__p0_96, __p1_96, __p2_96) __extension__ ({ \
|
|
uint16x8_t __ret_96; \
|
|
uint16x8_t __s0_96 = __p0_96; \
|
|
uint16x4_t __s1_96 = __p1_96; \
|
|
__ret_96 = __s0_96 * splatq_lane_u16(__s1_96, __p2_96); \
|
|
__ret_96; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_u16(__p0_97, __p1_97, __p2_97) __extension__ ({ \
|
|
uint16x8_t __ret_97; \
|
|
uint16x8_t __s0_97 = __p0_97; \
|
|
uint16x4_t __s1_97 = __p1_97; \
|
|
uint16x8_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, __lane_reverse_128_16); \
|
|
uint16x4_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, __lane_reverse_64_16); \
|
|
__ret_97 = __rev0_97 * __noswap_splatq_lane_u16(__rev1_97, __p2_97); \
|
|
__ret_97 = __builtin_shufflevector(__ret_97, __ret_97, __lane_reverse_128_16); \
|
|
__ret_97; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_f32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
|
|
float32x4_t __ret_98; \
|
|
float32x4_t __s0_98 = __p0_98; \
|
|
float32x2_t __s1_98 = __p1_98; \
|
|
__ret_98 = __s0_98 * splatq_lane_f32(__s1_98, __p2_98); \
|
|
__ret_98; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_f32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
|
|
float32x4_t __ret_99; \
|
|
float32x4_t __s0_99 = __p0_99; \
|
|
float32x2_t __s1_99 = __p1_99; \
|
|
float32x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, __lane_reverse_128_32); \
|
|
float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, __lane_reverse_64_32); \
|
|
__ret_99 = __rev0_99 * __noswap_splatq_lane_f32(__rev1_99, __p2_99); \
|
|
__ret_99 = __builtin_shufflevector(__ret_99, __ret_99, __lane_reverse_128_32); \
|
|
__ret_99; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_s32(__p0_100, __p1_100, __p2_100) __extension__ ({ \
|
|
int32x4_t __ret_100; \
|
|
int32x4_t __s0_100 = __p0_100; \
|
|
int32x2_t __s1_100 = __p1_100; \
|
|
__ret_100 = __s0_100 * splatq_lane_s32(__s1_100, __p2_100); \
|
|
__ret_100; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_s32(__p0_101, __p1_101, __p2_101) __extension__ ({ \
|
|
int32x4_t __ret_101; \
|
|
int32x4_t __s0_101 = __p0_101; \
|
|
int32x2_t __s1_101 = __p1_101; \
|
|
int32x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, __lane_reverse_128_32); \
|
|
int32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, __lane_reverse_64_32); \
|
|
__ret_101 = __rev0_101 * __noswap_splatq_lane_s32(__rev1_101, __p2_101); \
|
|
__ret_101 = __builtin_shufflevector(__ret_101, __ret_101, __lane_reverse_128_32); \
|
|
__ret_101; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_s16(__p0_102, __p1_102, __p2_102) __extension__ ({ \
|
|
int16x8_t __ret_102; \
|
|
int16x8_t __s0_102 = __p0_102; \
|
|
int16x4_t __s1_102 = __p1_102; \
|
|
__ret_102 = __s0_102 * splatq_lane_s16(__s1_102, __p2_102); \
|
|
__ret_102; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_s16(__p0_103, __p1_103, __p2_103) __extension__ ({ \
|
|
int16x8_t __ret_103; \
|
|
int16x8_t __s0_103 = __p0_103; \
|
|
int16x4_t __s1_103 = __p1_103; \
|
|
int16x8_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, __lane_reverse_128_16); \
|
|
int16x4_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, __lane_reverse_64_16); \
|
|
__ret_103 = __rev0_103 * __noswap_splatq_lane_s16(__rev1_103, __p2_103); \
|
|
__ret_103 = __builtin_shufflevector(__ret_103, __ret_103, __lane_reverse_128_16); \
|
|
__ret_103; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_u32(__p0_104, __p1_104, __p2_104) __extension__ ({ \
|
|
uint32x2_t __ret_104; \
|
|
uint32x2_t __s0_104 = __p0_104; \
|
|
uint32x2_t __s1_104 = __p1_104; \
|
|
__ret_104 = __s0_104 * splat_lane_u32(__s1_104, __p2_104); \
|
|
__ret_104; \
|
|
})
|
|
#else
|
|
#define vmul_lane_u32(__p0_105, __p1_105, __p2_105) __extension__ ({ \
|
|
uint32x2_t __ret_105; \
|
|
uint32x2_t __s0_105 = __p0_105; \
|
|
uint32x2_t __s1_105 = __p1_105; \
|
|
uint32x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, __lane_reverse_64_32); \
|
|
__ret_105 = __rev0_105 * __noswap_splat_lane_u32(__rev1_105, __p2_105); \
|
|
__ret_105 = __builtin_shufflevector(__ret_105, __ret_105, __lane_reverse_64_32); \
|
|
__ret_105; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_u16(__p0_106, __p1_106, __p2_106) __extension__ ({ \
|
|
uint16x4_t __ret_106; \
|
|
uint16x4_t __s0_106 = __p0_106; \
|
|
uint16x4_t __s1_106 = __p1_106; \
|
|
__ret_106 = __s0_106 * splat_lane_u16(__s1_106, __p2_106); \
|
|
__ret_106; \
|
|
})
|
|
#else
|
|
#define vmul_lane_u16(__p0_107, __p1_107, __p2_107) __extension__ ({ \
|
|
uint16x4_t __ret_107; \
|
|
uint16x4_t __s0_107 = __p0_107; \
|
|
uint16x4_t __s1_107 = __p1_107; \
|
|
uint16x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, __lane_reverse_64_16); \
|
|
__ret_107 = __rev0_107 * __noswap_splat_lane_u16(__rev1_107, __p2_107); \
|
|
__ret_107 = __builtin_shufflevector(__ret_107, __ret_107, __lane_reverse_64_16); \
|
|
__ret_107; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_f32(__p0_108, __p1_108, __p2_108) __extension__ ({ \
|
|
float32x2_t __ret_108; \
|
|
float32x2_t __s0_108 = __p0_108; \
|
|
float32x2_t __s1_108 = __p1_108; \
|
|
__ret_108 = __s0_108 * splat_lane_f32(__s1_108, __p2_108); \
|
|
__ret_108; \
|
|
})
|
|
#else
|
|
#define vmul_lane_f32(__p0_109, __p1_109, __p2_109) __extension__ ({ \
|
|
float32x2_t __ret_109; \
|
|
float32x2_t __s0_109 = __p0_109; \
|
|
float32x2_t __s1_109 = __p1_109; \
|
|
float32x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, __lane_reverse_64_32); \
|
|
__ret_109 = __rev0_109 * __noswap_splat_lane_f32(__rev1_109, __p2_109); \
|
|
__ret_109 = __builtin_shufflevector(__ret_109, __ret_109, __lane_reverse_64_32); \
|
|
__ret_109; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \
|
|
int32x2_t __ret_110; \
|
|
int32x2_t __s0_110 = __p0_110; \
|
|
int32x2_t __s1_110 = __p1_110; \
|
|
__ret_110 = __s0_110 * splat_lane_s32(__s1_110, __p2_110); \
|
|
__ret_110; \
|
|
})
|
|
#else
|
|
#define vmul_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \
|
|
int32x2_t __ret_111; \
|
|
int32x2_t __s0_111 = __p0_111; \
|
|
int32x2_t __s1_111 = __p1_111; \
|
|
int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, __lane_reverse_64_32); \
|
|
__ret_111 = __rev0_111 * __noswap_splat_lane_s32(__rev1_111, __p2_111); \
|
|
__ret_111 = __builtin_shufflevector(__ret_111, __ret_111, __lane_reverse_64_32); \
|
|
__ret_111; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \
|
|
int16x4_t __ret_112; \
|
|
int16x4_t __s0_112 = __p0_112; \
|
|
int16x4_t __s1_112 = __p1_112; \
|
|
__ret_112 = __s0_112 * splat_lane_s16(__s1_112, __p2_112); \
|
|
__ret_112; \
|
|
})
|
|
#else
|
|
#define vmul_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \
|
|
int16x4_t __ret_113; \
|
|
int16x4_t __s0_113 = __p0_113; \
|
|
int16x4_t __s1_113 = __p1_113; \
|
|
int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, __lane_reverse_64_16); \
|
|
__ret_113 = __rev0_113 * __noswap_splat_lane_s16(__rev1_113, __p2_113); \
|
|
__ret_113 = __builtin_shufflevector(__ret_113, __ret_113, __lane_reverse_64_16); \
|
|
__ret_113; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 * (uint32x2_t) {__p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __rev0 * (uint32x2_t) {__p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 * (float32x2_t) {__p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __rev0 * (float32x2_t) {__p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 * (int32x2_t) {__p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __rev0 * (int32x2_t) {__p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 37));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 37));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 37));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_lane_u32(__p0_114, __p1_114, __p2_114) __extension__ ({ \
|
|
uint64x2_t __ret_114; \
|
|
uint32x2_t __s0_114 = __p0_114; \
|
|
uint32x2_t __s1_114 = __p1_114; \
|
|
__ret_114 = vmull_u32(__s0_114, splat_lane_u32(__s1_114, __p2_114)); \
|
|
__ret_114; \
|
|
})
|
|
#else
|
|
#define vmull_lane_u32(__p0_115, __p1_115, __p2_115) __extension__ ({ \
|
|
uint64x2_t __ret_115; \
|
|
uint32x2_t __s0_115 = __p0_115; \
|
|
uint32x2_t __s1_115 = __p1_115; \
|
|
uint32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, __lane_reverse_64_32); \
|
|
__ret_115 = __noswap_vmull_u32(__rev0_115, __noswap_splat_lane_u32(__rev1_115, __p2_115)); \
|
|
__ret_115 = __builtin_shufflevector(__ret_115, __ret_115, __lane_reverse_128_64); \
|
|
__ret_115; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_lane_u16(__p0_116, __p1_116, __p2_116) __extension__ ({ \
|
|
uint32x4_t __ret_116; \
|
|
uint16x4_t __s0_116 = __p0_116; \
|
|
uint16x4_t __s1_116 = __p1_116; \
|
|
__ret_116 = vmull_u16(__s0_116, splat_lane_u16(__s1_116, __p2_116)); \
|
|
__ret_116; \
|
|
})
|
|
#else
|
|
#define vmull_lane_u16(__p0_117, __p1_117, __p2_117) __extension__ ({ \
|
|
uint32x4_t __ret_117; \
|
|
uint16x4_t __s0_117 = __p0_117; \
|
|
uint16x4_t __s1_117 = __p1_117; \
|
|
uint16x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, __lane_reverse_64_16); \
|
|
__ret_117 = __noswap_vmull_u16(__rev0_117, __noswap_splat_lane_u16(__rev1_117, __p2_117)); \
|
|
__ret_117 = __builtin_shufflevector(__ret_117, __ret_117, __lane_reverse_128_32); \
|
|
__ret_117; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
|
|
int64x2_t __ret_118; \
|
|
int32x2_t __s0_118 = __p0_118; \
|
|
int32x2_t __s1_118 = __p1_118; \
|
|
__ret_118 = vmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
|
|
__ret_118; \
|
|
})
|
|
#else
|
|
#define vmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
|
|
int64x2_t __ret_119; \
|
|
int32x2_t __s0_119 = __p0_119; \
|
|
int32x2_t __s1_119 = __p1_119; \
|
|
int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, __lane_reverse_64_32); \
|
|
__ret_119 = __noswap_vmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
|
|
__ret_119 = __builtin_shufflevector(__ret_119, __ret_119, __lane_reverse_128_64); \
|
|
__ret_119; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
|
|
int32x4_t __ret_120; \
|
|
int16x4_t __s0_120 = __p0_120; \
|
|
int16x4_t __s1_120 = __p1_120; \
|
|
__ret_120 = vmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
|
|
__ret_120; \
|
|
})
|
|
#else
|
|
#define vmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
|
|
int32x4_t __ret_121; \
|
|
int16x4_t __s0_121 = __p0_121; \
|
|
int16x4_t __s1_121 = __p1_121; \
|
|
int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, __lane_reverse_64_16); \
|
|
__ret_121 = __noswap_vmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
|
|
__ret_121 = __builtin_shufflevector(__ret_121, __ret_121, __lane_reverse_128_32); \
|
|
__ret_121; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = ~__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = ~__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 | ~__p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 | ~__rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 | __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 | __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpadalq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
|
|
uint64x1_t __ret;
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __rev1), 19));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
|
|
int64x1_t __ret;
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __rev1), 3));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpadal_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpaddlq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 19));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 3));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpaddl_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpmax_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vpmin_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlal_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_lane_s32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \
|
|
int64x2_t __ret_122; \
|
|
int64x2_t __s0_122 = __p0_122; \
|
|
int32x2_t __s1_122 = __p1_122; \
|
|
int32x2_t __s2_122 = __p2_122; \
|
|
__ret_122 = vqdmlal_s32(__s0_122, __s1_122, splat_lane_s32(__s2_122, __p3_122)); \
|
|
__ret_122; \
|
|
})
|
|
#else
|
|
#define vqdmlal_lane_s32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \
|
|
int64x2_t __ret_123; \
|
|
int64x2_t __s0_123 = __p0_123; \
|
|
int32x2_t __s1_123 = __p1_123; \
|
|
int32x2_t __s2_123 = __p2_123; \
|
|
int64x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, __lane_reverse_64_32); \
|
|
__ret_123 = __noswap_vqdmlal_s32(__rev0_123, __rev1_123, __noswap_splat_lane_s32(__rev2_123, __p3_123)); \
|
|
__ret_123 = __builtin_shufflevector(__ret_123, __ret_123, __lane_reverse_128_64); \
|
|
__ret_123; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_lane_s16(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \
|
|
int32x4_t __ret_124; \
|
|
int32x4_t __s0_124 = __p0_124; \
|
|
int16x4_t __s1_124 = __p1_124; \
|
|
int16x4_t __s2_124 = __p2_124; \
|
|
__ret_124 = vqdmlal_s16(__s0_124, __s1_124, splat_lane_s16(__s2_124, __p3_124)); \
|
|
__ret_124; \
|
|
})
|
|
#else
|
|
#define vqdmlal_lane_s16(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \
|
|
int32x4_t __ret_125; \
|
|
int32x4_t __s0_125 = __p0_125; \
|
|
int16x4_t __s1_125 = __p1_125; \
|
|
int16x4_t __s2_125 = __p2_125; \
|
|
int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, __lane_reverse_64_16); \
|
|
__ret_125 = __noswap_vqdmlal_s16(__rev0_125, __rev1_125, __noswap_splat_lane_s16(__rev2_125, __p3_125)); \
|
|
__ret_125 = __builtin_shufflevector(__ret_125, __ret_125, __lane_reverse_128_32); \
|
|
__ret_125; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmlsl_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_lane_s32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \
|
|
int64x2_t __ret_126; \
|
|
int64x2_t __s0_126 = __p0_126; \
|
|
int32x2_t __s1_126 = __p1_126; \
|
|
int32x2_t __s2_126 = __p2_126; \
|
|
__ret_126 = vqdmlsl_s32(__s0_126, __s1_126, splat_lane_s32(__s2_126, __p3_126)); \
|
|
__ret_126; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_lane_s32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \
|
|
int64x2_t __ret_127; \
|
|
int64x2_t __s0_127 = __p0_127; \
|
|
int32x2_t __s1_127 = __p1_127; \
|
|
int32x2_t __s2_127 = __p2_127; \
|
|
int64x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, __lane_reverse_64_32); \
|
|
__ret_127 = __noswap_vqdmlsl_s32(__rev0_127, __rev1_127, __noswap_splat_lane_s32(__rev2_127, __p3_127)); \
|
|
__ret_127 = __builtin_shufflevector(__ret_127, __ret_127, __lane_reverse_128_64); \
|
|
__ret_127; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_lane_s16(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \
|
|
int32x4_t __ret_128; \
|
|
int32x4_t __s0_128 = __p0_128; \
|
|
int16x4_t __s1_128 = __p1_128; \
|
|
int16x4_t __s2_128 = __p2_128; \
|
|
__ret_128 = vqdmlsl_s16(__s0_128, __s1_128, splat_lane_s16(__s2_128, __p3_128)); \
|
|
__ret_128; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_lane_s16(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \
|
|
int32x4_t __ret_129; \
|
|
int32x4_t __s0_129 = __p0_129; \
|
|
int16x4_t __s1_129 = __p1_129; \
|
|
int16x4_t __s2_129 = __p2_129; \
|
|
int32x4_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, __lane_reverse_64_16); \
|
|
__ret_129 = __noswap_vqdmlsl_s16(__rev0_129, __rev1_129, __noswap_splat_lane_s16(__rev2_129, __p3_129)); \
|
|
__ret_129 = __builtin_shufflevector(__ret_129, __ret_129, __lane_reverse_128_32); \
|
|
__ret_129; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmull_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
|
|
int64x2_t __ret_130; \
|
|
int32x2_t __s0_130 = __p0_130; \
|
|
int32x2_t __s1_130 = __p1_130; \
|
|
__ret_130 = vqdmull_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
|
|
__ret_130; \
|
|
})
|
|
#else
|
|
#define vqdmull_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
|
|
int64x2_t __ret_131; \
|
|
int32x2_t __s0_131 = __p0_131; \
|
|
int32x2_t __s1_131 = __p1_131; \
|
|
int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, __lane_reverse_64_32); \
|
|
__ret_131 = __noswap_vqdmull_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
|
|
__ret_131 = __builtin_shufflevector(__ret_131, __ret_131, __lane_reverse_128_64); \
|
|
__ret_131; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
|
|
int32x4_t __ret_132; \
|
|
int16x4_t __s0_132 = __p0_132; \
|
|
int16x4_t __s1_132 = __p1_132; \
|
|
__ret_132 = vqdmull_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
|
|
__ret_132; \
|
|
})
|
|
#else
|
|
#define vqdmull_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
|
|
int32x4_t __ret_133; \
|
|
int16x4_t __s0_133 = __p0_133; \
|
|
int16x4_t __s1_133 = __p1_133; \
|
|
int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, __lane_reverse_64_16); \
|
|
__ret_133 = __noswap_vqdmull_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
|
|
__ret_133 = __builtin_shufflevector(__ret_133, __ret_133, __lane_reverse_128_32); \
|
|
__ret_133; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqmovn_v(__builtin_bit_cast(int8x16_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqmovun_v(__builtin_bit_cast(int8x16_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqrshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqshluq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshlu_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqshrun_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqsubq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqsub_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vraddhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrecpe_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrecpe_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrecpe_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrecpe_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrecpsq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrecpsq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrecps_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrecps_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrhaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrhadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsqrte_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsqrte_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrsqrte_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrsqrte_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrsqrtsq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrsqrtsq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrsqrts_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrsqrts_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vrsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vset_lane_i8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vset_lane_i8(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vset_lane_i8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vset_lane_i16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vset_lane_i16(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vset_lane_i16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsetq_lane_i8(__s0, __builtin_bit_cast(int8x16_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vsetq_lane_f32(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vsetq_lane_f32(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vsetq_lane_f32(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsetq_lane_i32(__s0, __builtin_bit_cast(int32x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __builtin_bit_cast(int64x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsetq_lane_i16(__s0, __builtin_bit_cast(int16x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vset_lane_i64(__s0, __builtin_bit_cast(int64x1_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vset_lane_i8(__s0, __builtin_bit_cast(int8x8_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vset_lane_f32(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vset_lane_f32(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vset_lane_f32(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vset_lane_i32(__s0, __builtin_bit_cast(int32x2_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vset_lane_i64(__s0, __builtin_bit_cast(int64x1_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __rev1), __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vset_lane_i16(__s0, __builtin_bit_cast(int16x4_t, __s1), __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshlq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshlq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vshl_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vshl_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshl_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshl_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshl_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_s8(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_s8(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_s32(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_s32(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_n_s16(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshll_n_s16(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshll_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vshrq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vshr_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vshr_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshr_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshr_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshr_n_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 17)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 18)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 16)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vshrn_n_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 0)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsraq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsra_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 5)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 5)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 16)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 16)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 18)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 18)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 17)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 17)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 0)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 0)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 4); \
|
|
})
|
|
#else
|
|
#define vst1_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 5); \
|
|
})
|
|
#else
|
|
#define vst1_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 36); \
|
|
})
|
|
#else
|
|
#define vst1q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 37); \
|
|
})
|
|
#else
|
|
#define vst1q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 48); \
|
|
})
|
|
#else
|
|
#define vst1q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 50); \
|
|
})
|
|
#else
|
|
#define vst1q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 51); \
|
|
})
|
|
#else
|
|
#define vst1q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 49); \
|
|
})
|
|
#else
|
|
#define vst1q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 32); \
|
|
})
|
|
#else
|
|
#define vst1q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 41); \
|
|
})
|
|
#else
|
|
#define vst1q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 34); \
|
|
})
|
|
#else
|
|
#define vst1q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 35); \
|
|
})
|
|
#else
|
|
#define vst1q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 33); \
|
|
})
|
|
#else
|
|
#define vst1q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 16); \
|
|
})
|
|
#else
|
|
#define vst1_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 18); \
|
|
})
|
|
#else
|
|
#define vst1_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 17); \
|
|
})
|
|
#else
|
|
#define vst1_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 0); \
|
|
})
|
|
#else
|
|
#define vst1_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 9); \
|
|
})
|
|
#else
|
|
#define vst1_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 2); \
|
|
})
|
|
#else
|
|
#define vst1_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 1); \
|
|
})
|
|
#else
|
|
#define vst1_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 4); \
|
|
})
|
|
#else
|
|
#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 5); \
|
|
})
|
|
#else
|
|
#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4_t __s1 = __p1; \
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 36); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 37); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __s1 = __p1; \
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 48); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 50); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 51); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 49); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __s1 = __p1; \
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 32); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __s1 = __p1; \
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 41); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 34); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 35); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 33); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 16); \
|
|
})
|
|
#else
|
|
#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 18); \
|
|
})
|
|
#else
|
|
#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2_t __s1 = __p1; \
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 17); \
|
|
})
|
|
#else
|
|
#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4_t __s1 = __p1; \
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 0); \
|
|
})
|
|
#else
|
|
#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8_t __s1 = __p1; \
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 9); \
|
|
})
|
|
#else
|
|
#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 2); \
|
|
})
|
|
#else
|
|
#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 1); \
|
|
})
|
|
#else
|
|
#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 4); \
|
|
})
|
|
#else
|
|
#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
poly8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 5); \
|
|
})
|
|
#else
|
|
#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
poly16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 36); \
|
|
})
|
|
#else
|
|
#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
poly8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 37); \
|
|
})
|
|
#else
|
|
#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
poly16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 48); \
|
|
})
|
|
#else
|
|
#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
uint8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 50); \
|
|
})
|
|
#else
|
|
#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
uint32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 51); \
|
|
})
|
|
#else
|
|
#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
uint64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 49); \
|
|
})
|
|
#else
|
|
#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
uint16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 32); \
|
|
})
|
|
#else
|
|
#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
int8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 41); \
|
|
})
|
|
#else
|
|
#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
float32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 34); \
|
|
})
|
|
#else
|
|
#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
int32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 35); \
|
|
})
|
|
#else
|
|
#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
int64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 33); \
|
|
})
|
|
#else
|
|
#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
int16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 16); \
|
|
})
|
|
#else
|
|
#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
uint8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 18); \
|
|
})
|
|
#else
|
|
#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
uint32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
|
|
uint64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 17); \
|
|
})
|
|
#else
|
|
#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
uint16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 0); \
|
|
})
|
|
#else
|
|
#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
int8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 9); \
|
|
})
|
|
#else
|
|
#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
float32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 2); \
|
|
})
|
|
#else
|
|
#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
int32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
|
|
int64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 1); \
|
|
})
|
|
#else
|
|
#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
int16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 4); \
|
|
})
|
|
#else
|
|
#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
poly8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 5); \
|
|
})
|
|
#else
|
|
#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
poly16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 36); \
|
|
})
|
|
#else
|
|
#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
poly8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 37); \
|
|
})
|
|
#else
|
|
#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
poly16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 48); \
|
|
})
|
|
#else
|
|
#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
uint8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 50); \
|
|
})
|
|
#else
|
|
#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
uint32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 51); \
|
|
})
|
|
#else
|
|
#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
uint64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 49); \
|
|
})
|
|
#else
|
|
#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
uint16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 32); \
|
|
})
|
|
#else
|
|
#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
int8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 41); \
|
|
})
|
|
#else
|
|
#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
float32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 34); \
|
|
})
|
|
#else
|
|
#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
int32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 35); \
|
|
})
|
|
#else
|
|
#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
int64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 33); \
|
|
})
|
|
#else
|
|
#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
int16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 16); \
|
|
})
|
|
#else
|
|
#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
uint8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 18); \
|
|
})
|
|
#else
|
|
#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
uint32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
|
|
uint64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 17); \
|
|
})
|
|
#else
|
|
#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
uint16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 0); \
|
|
})
|
|
#else
|
|
#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
int8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 9); \
|
|
})
|
|
#else
|
|
#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
float32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 2); \
|
|
})
|
|
#else
|
|
#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
int32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
|
|
int64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 1); \
|
|
})
|
|
#else
|
|
#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
int16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 4); \
|
|
})
|
|
#else
|
|
#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
poly8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 5); \
|
|
})
|
|
#else
|
|
#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
poly16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 36); \
|
|
})
|
|
#else
|
|
#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
poly8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 37); \
|
|
})
|
|
#else
|
|
#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
poly16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 48); \
|
|
})
|
|
#else
|
|
#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
uint8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 50); \
|
|
})
|
|
#else
|
|
#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
uint32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 51); \
|
|
})
|
|
#else
|
|
#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
uint64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 49); \
|
|
})
|
|
#else
|
|
#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
uint16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 32); \
|
|
})
|
|
#else
|
|
#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
int8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 41); \
|
|
})
|
|
#else
|
|
#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
float32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 34); \
|
|
})
|
|
#else
|
|
#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
int32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 35); \
|
|
})
|
|
#else
|
|
#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
int64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 33); \
|
|
})
|
|
#else
|
|
#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
int16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 16); \
|
|
})
|
|
#else
|
|
#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
uint8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 18); \
|
|
})
|
|
#else
|
|
#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
uint32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
|
|
uint64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 17); \
|
|
})
|
|
#else
|
|
#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
uint16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 0); \
|
|
})
|
|
#else
|
|
#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
int8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 9); \
|
|
})
|
|
#else
|
|
#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
float32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 2); \
|
|
})
|
|
#else
|
|
#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
int32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
|
|
int64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 1); \
|
|
})
|
|
#else
|
|
#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
int16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 4); \
|
|
})
|
|
#else
|
|
#define vst2_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
poly8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 5); \
|
|
})
|
|
#else
|
|
#define vst2_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
poly16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 36); \
|
|
})
|
|
#else
|
|
#define vst2q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
poly8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 37); \
|
|
})
|
|
#else
|
|
#define vst2q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
poly16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 48); \
|
|
})
|
|
#else
|
|
#define vst2q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
uint8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 50); \
|
|
})
|
|
#else
|
|
#define vst2q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
uint32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 49); \
|
|
})
|
|
#else
|
|
#define vst2q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
uint16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 32); \
|
|
})
|
|
#else
|
|
#define vst2q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
int8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 41); \
|
|
})
|
|
#else
|
|
#define vst2q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
float32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 34); \
|
|
})
|
|
#else
|
|
#define vst2q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
int32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 33); \
|
|
})
|
|
#else
|
|
#define vst2q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
int16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 16); \
|
|
})
|
|
#else
|
|
#define vst2_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
uint8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 18); \
|
|
})
|
|
#else
|
|
#define vst2_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
uint32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 17); \
|
|
})
|
|
#else
|
|
#define vst2_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
uint16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 0); \
|
|
})
|
|
#else
|
|
#define vst2_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
int8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 9); \
|
|
})
|
|
#else
|
|
#define vst2_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
float32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 2); \
|
|
})
|
|
#else
|
|
#define vst2_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
int32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 1); \
|
|
})
|
|
#else
|
|
#define vst2_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
int16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 4); \
|
|
})
|
|
#else
|
|
#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x2_t __s1 = __p1; \
|
|
poly8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 5); \
|
|
})
|
|
#else
|
|
#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x2_t __s1 = __p1; \
|
|
poly16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 37); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x2_t __s1 = __p1; \
|
|
poly16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 50); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x2_t __s1 = __p1; \
|
|
uint32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 49); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x2_t __s1 = __p1; \
|
|
uint16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 41); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x2_t __s1 = __p1; \
|
|
float32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 34); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x2_t __s1 = __p1; \
|
|
int32x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 33); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x2_t __s1 = __p1; \
|
|
int16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 16); \
|
|
})
|
|
#else
|
|
#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x2_t __s1 = __p1; \
|
|
uint8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 18); \
|
|
})
|
|
#else
|
|
#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x2_t __s1 = __p1; \
|
|
uint32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 18); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 17); \
|
|
})
|
|
#else
|
|
#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x2_t __s1 = __p1; \
|
|
uint16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 0); \
|
|
})
|
|
#else
|
|
#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x2_t __s1 = __p1; \
|
|
int8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 9); \
|
|
})
|
|
#else
|
|
#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x2_t __s1 = __p1; \
|
|
float32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 2); \
|
|
})
|
|
#else
|
|
#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x2_t __s1 = __p1; \
|
|
int32x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 2); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 1); \
|
|
})
|
|
#else
|
|
#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x2_t __s1 = __p1; \
|
|
int16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 4); \
|
|
})
|
|
#else
|
|
#define vst3_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
poly8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 5); \
|
|
})
|
|
#else
|
|
#define vst3_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
poly16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 36); \
|
|
})
|
|
#else
|
|
#define vst3q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
poly8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 37); \
|
|
})
|
|
#else
|
|
#define vst3q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
poly16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 48); \
|
|
})
|
|
#else
|
|
#define vst3q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
uint8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 50); \
|
|
})
|
|
#else
|
|
#define vst3q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
uint32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 49); \
|
|
})
|
|
#else
|
|
#define vst3q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
uint16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 32); \
|
|
})
|
|
#else
|
|
#define vst3q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
int8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 41); \
|
|
})
|
|
#else
|
|
#define vst3q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
float32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 34); \
|
|
})
|
|
#else
|
|
#define vst3q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
int32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 33); \
|
|
})
|
|
#else
|
|
#define vst3q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
int16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 16); \
|
|
})
|
|
#else
|
|
#define vst3_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
uint8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 18); \
|
|
})
|
|
#else
|
|
#define vst3_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
uint32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 17); \
|
|
})
|
|
#else
|
|
#define vst3_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
uint16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 0); \
|
|
})
|
|
#else
|
|
#define vst3_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
int8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 9); \
|
|
})
|
|
#else
|
|
#define vst3_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
float32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 2); \
|
|
})
|
|
#else
|
|
#define vst3_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
int32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 1); \
|
|
})
|
|
#else
|
|
#define vst3_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
int16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 4); \
|
|
})
|
|
#else
|
|
#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x3_t __s1 = __p1; \
|
|
poly8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 5); \
|
|
})
|
|
#else
|
|
#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x3_t __s1 = __p1; \
|
|
poly16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 37); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x3_t __s1 = __p1; \
|
|
poly16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 50); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x3_t __s1 = __p1; \
|
|
uint32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 49); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x3_t __s1 = __p1; \
|
|
uint16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 41); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x3_t __s1 = __p1; \
|
|
float32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 34); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x3_t __s1 = __p1; \
|
|
int32x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 33); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x3_t __s1 = __p1; \
|
|
int16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 16); \
|
|
})
|
|
#else
|
|
#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x3_t __s1 = __p1; \
|
|
uint8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 18); \
|
|
})
|
|
#else
|
|
#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x3_t __s1 = __p1; \
|
|
uint32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 18); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 17); \
|
|
})
|
|
#else
|
|
#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x3_t __s1 = __p1; \
|
|
uint16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 0); \
|
|
})
|
|
#else
|
|
#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x3_t __s1 = __p1; \
|
|
int8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 9); \
|
|
})
|
|
#else
|
|
#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x3_t __s1 = __p1; \
|
|
float32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 2); \
|
|
})
|
|
#else
|
|
#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x3_t __s1 = __p1; \
|
|
int32x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 2); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 1); \
|
|
})
|
|
#else
|
|
#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x3_t __s1 = __p1; \
|
|
int16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 4); \
|
|
})
|
|
#else
|
|
#define vst4_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
poly8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 5); \
|
|
})
|
|
#else
|
|
#define vst4_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
poly16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 36); \
|
|
})
|
|
#else
|
|
#define vst4q_p8(__p0, __p1) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
poly8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 37); \
|
|
})
|
|
#else
|
|
#define vst4q_p16(__p0, __p1) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
poly16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 48); \
|
|
})
|
|
#else
|
|
#define vst4q_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
uint8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 50); \
|
|
})
|
|
#else
|
|
#define vst4q_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
uint32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 49); \
|
|
})
|
|
#else
|
|
#define vst4q_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
uint16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 32); \
|
|
})
|
|
#else
|
|
#define vst4q_s8(__p0, __p1) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
int8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 41); \
|
|
})
|
|
#else
|
|
#define vst4q_f32(__p0, __p1) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
float32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 34); \
|
|
})
|
|
#else
|
|
#define vst4q_s32(__p0, __p1) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
int32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 33); \
|
|
})
|
|
#else
|
|
#define vst4q_s16(__p0, __p1) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
int16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 16); \
|
|
})
|
|
#else
|
|
#define vst4_u8(__p0, __p1) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
uint8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 18); \
|
|
})
|
|
#else
|
|
#define vst4_u32(__p0, __p1) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
uint32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 18); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 19); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 17); \
|
|
})
|
|
#else
|
|
#define vst4_u16(__p0, __p1) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
uint16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 0); \
|
|
})
|
|
#else
|
|
#define vst4_s8(__p0, __p1) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
int8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 9); \
|
|
})
|
|
#else
|
|
#define vst4_f32(__p0, __p1) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
float32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 2); \
|
|
})
|
|
#else
|
|
#define vst4_s32(__p0, __p1) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
int32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 2); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_s64(__p0, __p1) __extension__ ({ \
|
|
int64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 1); \
|
|
})
|
|
#else
|
|
#define vst4_s16(__p0, __p1) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
int16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 4); \
|
|
})
|
|
#else
|
|
#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x8x4_t __s1 = __p1; \
|
|
poly8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 4); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 5); \
|
|
})
|
|
#else
|
|
#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x4x4_t __s1 = __p1; \
|
|
poly16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 5); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 37); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8x4_t __s1 = __p1; \
|
|
poly16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 37); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 50); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x4x4_t __s1 = __p1; \
|
|
uint32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 50); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 49); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8x4_t __s1 = __p1; \
|
|
uint16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 49); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 41); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x4x4_t __s1 = __p1; \
|
|
float32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 41); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 34); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4x4_t __s1 = __p1; \
|
|
int32x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_32); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 34); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 33); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8x4_t __s1 = __p1; \
|
|
int16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 33); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 16); \
|
|
})
|
|
#else
|
|
#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x8x4_t __s1 = __p1; \
|
|
uint8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 16); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 18); \
|
|
})
|
|
#else
|
|
#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
|
|
uint32x2x4_t __s1 = __p1; \
|
|
uint32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 18); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 17); \
|
|
})
|
|
#else
|
|
#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x4x4_t __s1 = __p1; \
|
|
uint16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 17); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 0); \
|
|
})
|
|
#else
|
|
#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x8x4_t __s1 = __p1; \
|
|
int8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 0); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 9); \
|
|
})
|
|
#else
|
|
#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
|
|
float32x2x4_t __s1 = __p1; \
|
|
float32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 9); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 2); \
|
|
})
|
|
#else
|
|
#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2x4_t __s1 = __p1; \
|
|
int32x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_32); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_32); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_32); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_32); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 2); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 1); \
|
|
})
|
|
#else
|
|
#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4x4_t __s1 = __p1; \
|
|
int16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 1); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vsubhn_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vmovl_u8(__p0) - vmovl_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmovl_u32(__p0) - vmovl_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmovl_u16(__p0) - vmovl_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vmovl_s8(__p0) - vmovl_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmovl_s32(__p0) - vmovl_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmovl_s16(__p0) - vmovl_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - vmovl_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 - __noswap_vmovl_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - vmovl_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmovl_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - vmovl_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmovl_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - vmovl_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 - __noswap_vmovl_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - vmovl_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmovl_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - vmovl_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmovl_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 5);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 37);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 37);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 5);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 37);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 37);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8x2_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4x2_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 5);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16x2_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 37);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8x2_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 37);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16x2_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8x2_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16x2_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8x2_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8x2_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4x2_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8x2_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_32);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4x2_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8x2_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4x2_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlahq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlahq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlahq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlahq_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlahq_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlahq_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlah_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlah_s32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlah_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlah_s16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlah_s16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlah_s16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahq_lane_s32(__p0_134, __p1_134, __p2_134, __p3_134) __extension__ ({ \
|
|
int32x4_t __ret_134; \
|
|
int32x4_t __s0_134 = __p0_134; \
|
|
int32x4_t __s1_134 = __p1_134; \
|
|
int32x2_t __s2_134 = __p2_134; \
|
|
__ret_134 = vqrdmlahq_s32(__s0_134, __s1_134, splatq_lane_s32(__s2_134, __p3_134)); \
|
|
__ret_134; \
|
|
})
|
|
#else
|
|
#define vqrdmlahq_lane_s32(__p0_135, __p1_135, __p2_135, __p3_135) __extension__ ({ \
|
|
int32x4_t __ret_135; \
|
|
int32x4_t __s0_135 = __p0_135; \
|
|
int32x4_t __s1_135 = __p1_135; \
|
|
int32x2_t __s2_135 = __p2_135; \
|
|
int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_135; __rev2_135 = __builtin_shufflevector(__s2_135, __s2_135, __lane_reverse_64_32); \
|
|
__ret_135 = __noswap_vqrdmlahq_s32(__rev0_135, __rev1_135, __noswap_splatq_lane_s32(__rev2_135, __p3_135)); \
|
|
__ret_135 = __builtin_shufflevector(__ret_135, __ret_135, __lane_reverse_128_32); \
|
|
__ret_135; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahq_lane_s16(__p0_136, __p1_136, __p2_136, __p3_136) __extension__ ({ \
|
|
int16x8_t __ret_136; \
|
|
int16x8_t __s0_136 = __p0_136; \
|
|
int16x8_t __s1_136 = __p1_136; \
|
|
int16x4_t __s2_136 = __p2_136; \
|
|
__ret_136 = vqrdmlahq_s16(__s0_136, __s1_136, splatq_lane_s16(__s2_136, __p3_136)); \
|
|
__ret_136; \
|
|
})
|
|
#else
|
|
#define vqrdmlahq_lane_s16(__p0_137, __p1_137, __p2_137, __p3_137) __extension__ ({ \
|
|
int16x8_t __ret_137; \
|
|
int16x8_t __s0_137 = __p0_137; \
|
|
int16x8_t __s1_137 = __p1_137; \
|
|
int16x4_t __s2_137 = __p2_137; \
|
|
int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_137; __rev2_137 = __builtin_shufflevector(__s2_137, __s2_137, __lane_reverse_64_16); \
|
|
__ret_137 = __noswap_vqrdmlahq_s16(__rev0_137, __rev1_137, __noswap_splatq_lane_s16(__rev2_137, __p3_137)); \
|
|
__ret_137 = __builtin_shufflevector(__ret_137, __ret_137, __lane_reverse_128_16); \
|
|
__ret_137; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlah_lane_s32(__p0_138, __p1_138, __p2_138, __p3_138) __extension__ ({ \
|
|
int32x2_t __ret_138; \
|
|
int32x2_t __s0_138 = __p0_138; \
|
|
int32x2_t __s1_138 = __p1_138; \
|
|
int32x2_t __s2_138 = __p2_138; \
|
|
__ret_138 = vqrdmlah_s32(__s0_138, __s1_138, splat_lane_s32(__s2_138, __p3_138)); \
|
|
__ret_138; \
|
|
})
|
|
#else
|
|
#define vqrdmlah_lane_s32(__p0_139, __p1_139, __p2_139, __p3_139) __extension__ ({ \
|
|
int32x2_t __ret_139; \
|
|
int32x2_t __s0_139 = __p0_139; \
|
|
int32x2_t __s1_139 = __p1_139; \
|
|
int32x2_t __s2_139 = __p2_139; \
|
|
int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_139; __rev2_139 = __builtin_shufflevector(__s2_139, __s2_139, __lane_reverse_64_32); \
|
|
__ret_139 = __noswap_vqrdmlah_s32(__rev0_139, __rev1_139, __noswap_splat_lane_s32(__rev2_139, __p3_139)); \
|
|
__ret_139 = __builtin_shufflevector(__ret_139, __ret_139, __lane_reverse_64_32); \
|
|
__ret_139; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlah_lane_s16(__p0_140, __p1_140, __p2_140, __p3_140) __extension__ ({ \
|
|
int16x4_t __ret_140; \
|
|
int16x4_t __s0_140 = __p0_140; \
|
|
int16x4_t __s1_140 = __p1_140; \
|
|
int16x4_t __s2_140 = __p2_140; \
|
|
__ret_140 = vqrdmlah_s16(__s0_140, __s1_140, splat_lane_s16(__s2_140, __p3_140)); \
|
|
__ret_140; \
|
|
})
|
|
#else
|
|
#define vqrdmlah_lane_s16(__p0_141, __p1_141, __p2_141, __p3_141) __extension__ ({ \
|
|
int16x4_t __ret_141; \
|
|
int16x4_t __s0_141 = __p0_141; \
|
|
int16x4_t __s1_141 = __p1_141; \
|
|
int16x4_t __s2_141 = __p2_141; \
|
|
int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_141; __rev2_141 = __builtin_shufflevector(__s2_141, __s2_141, __lane_reverse_64_16); \
|
|
__ret_141 = __noswap_vqrdmlah_s16(__rev0_141, __rev1_141, __noswap_splat_lane_s16(__rev2_141, __p3_141)); \
|
|
__ret_141 = __builtin_shufflevector(__ret_141, __ret_141, __lane_reverse_64_16); \
|
|
__ret_141; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlshq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlshq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmlshq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlshq_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlshq_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmlshq_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlsh_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlsh_s32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmlsh_s32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 2));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlsh_s16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlsh_s16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmlsh_s16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 1));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshq_lane_s32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
|
|
int32x4_t __ret_142; \
|
|
int32x4_t __s0_142 = __p0_142; \
|
|
int32x4_t __s1_142 = __p1_142; \
|
|
int32x2_t __s2_142 = __p2_142; \
|
|
__ret_142 = vqrdmlshq_s32(__s0_142, __s1_142, splatq_lane_s32(__s2_142, __p3_142)); \
|
|
__ret_142; \
|
|
})
|
|
#else
|
|
#define vqrdmlshq_lane_s32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
|
|
int32x4_t __ret_143; \
|
|
int32x4_t __s0_143 = __p0_143; \
|
|
int32x4_t __s1_143 = __p1_143; \
|
|
int32x2_t __s2_143 = __p2_143; \
|
|
int32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, __lane_reverse_64_32); \
|
|
__ret_143 = __noswap_vqrdmlshq_s32(__rev0_143, __rev1_143, __noswap_splatq_lane_s32(__rev2_143, __p3_143)); \
|
|
__ret_143 = __builtin_shufflevector(__ret_143, __ret_143, __lane_reverse_128_32); \
|
|
__ret_143; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshq_lane_s16(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
|
|
int16x8_t __ret_144; \
|
|
int16x8_t __s0_144 = __p0_144; \
|
|
int16x8_t __s1_144 = __p1_144; \
|
|
int16x4_t __s2_144 = __p2_144; \
|
|
__ret_144 = vqrdmlshq_s16(__s0_144, __s1_144, splatq_lane_s16(__s2_144, __p3_144)); \
|
|
__ret_144; \
|
|
})
|
|
#else
|
|
#define vqrdmlshq_lane_s16(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
|
|
int16x8_t __ret_145; \
|
|
int16x8_t __s0_145 = __p0_145; \
|
|
int16x8_t __s1_145 = __p1_145; \
|
|
int16x4_t __s2_145 = __p2_145; \
|
|
int16x8_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, __lane_reverse_64_16); \
|
|
__ret_145 = __noswap_vqrdmlshq_s16(__rev0_145, __rev1_145, __noswap_splatq_lane_s16(__rev2_145, __p3_145)); \
|
|
__ret_145 = __builtin_shufflevector(__ret_145, __ret_145, __lane_reverse_128_16); \
|
|
__ret_145; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlsh_lane_s32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
|
|
int32x2_t __ret_146; \
|
|
int32x2_t __s0_146 = __p0_146; \
|
|
int32x2_t __s1_146 = __p1_146; \
|
|
int32x2_t __s2_146 = __p2_146; \
|
|
__ret_146 = vqrdmlsh_s32(__s0_146, __s1_146, splat_lane_s32(__s2_146, __p3_146)); \
|
|
__ret_146; \
|
|
})
|
|
#else
|
|
#define vqrdmlsh_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
|
|
int32x2_t __ret_147; \
|
|
int32x2_t __s0_147 = __p0_147; \
|
|
int32x2_t __s1_147 = __p1_147; \
|
|
int32x2_t __s2_147 = __p2_147; \
|
|
int32x2_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, __lane_reverse_64_32); \
|
|
__ret_147 = __noswap_vqrdmlsh_s32(__rev0_147, __rev1_147, __noswap_splat_lane_s32(__rev2_147, __p3_147)); \
|
|
__ret_147 = __builtin_shufflevector(__ret_147, __ret_147, __lane_reverse_64_32); \
|
|
__ret_147; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlsh_lane_s16(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
|
|
int16x4_t __ret_148; \
|
|
int16x4_t __s0_148 = __p0_148; \
|
|
int16x4_t __s1_148 = __p1_148; \
|
|
int16x4_t __s2_148 = __p2_148; \
|
|
__ret_148 = vqrdmlsh_s16(__s0_148, __s1_148, splat_lane_s16(__s2_148, __p3_148)); \
|
|
__ret_148; \
|
|
})
|
|
#else
|
|
#define vqrdmlsh_lane_s16(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
|
|
int16x4_t __ret_149; \
|
|
int16x4_t __s0_149 = __p0_149; \
|
|
int16x4_t __s1_149 = __p1_149; \
|
|
int16x4_t __s2_149 = __p2_149; \
|
|
int16x4_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, __lane_reverse_64_16); \
|
|
__ret_149 = __noswap_vqrdmlsh_s16(__rev0_149, __rev1_149, __noswap_splat_lane_s16(__rev2_149, __p3_149)); \
|
|
__ret_149 = __builtin_shufflevector(__ret_149, __ret_149, __lane_reverse_64_16); \
|
|
__ret_149; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcadd_rot270_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcadd_rot270_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcadd_rot90_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcadd_rot90_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcaddq_rot270_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcaddq_rot270_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcaddq_rot90_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcaddq_rot90_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_lane_f16(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \
|
|
float16x4_t __ret_150; \
|
|
float16x4_t __s0_150 = __p0_150; \
|
|
float16x4_t __s1_150 = __p1_150; \
|
|
float16x4_t __s2_150 = __p2_150; \
|
|
__ret_150 = vcmla_f16(__s0_150, __s1_150, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_150), __p3_150), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_150), __p3_150)})); \
|
|
__ret_150; \
|
|
})
|
|
#else
|
|
#define vcmla_lane_f16(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \
|
|
float16x4_t __ret_151; \
|
|
float16x4_t __s0_151 = __p0_151; \
|
|
float16x4_t __s1_151 = __p1_151; \
|
|
float16x4_t __s2_151 = __p2_151; \
|
|
float16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__s0_151, __s0_151, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_151; __rev2_151 = __builtin_shufflevector(__s2_151, __s2_151, __lane_reverse_64_16); \
|
|
__ret_151 = __noswap_vcmla_f16(__rev0_151, __rev1_151, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_151), __p3_151), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_151), __p3_151)})); \
|
|
__ret_151 = __builtin_shufflevector(__ret_151, __ret_151, __lane_reverse_64_16); \
|
|
__ret_151; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_lane_f16(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \
|
|
float16x8_t __ret_152; \
|
|
float16x8_t __s0_152 = __p0_152; \
|
|
float16x8_t __s1_152 = __p1_152; \
|
|
float16x4_t __s2_152 = __p2_152; \
|
|
__ret_152 = vcmlaq_f16(__s0_152, __s1_152, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_152), __p3_152), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_152), __p3_152), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_152), __p3_152), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_152), __p3_152)})); \
|
|
__ret_152; \
|
|
})
|
|
#else
|
|
#define vcmlaq_lane_f16(__p0_153, __p1_153, __p2_153, __p3_153) __extension__ ({ \
|
|
float16x8_t __ret_153; \
|
|
float16x8_t __s0_153 = __p0_153; \
|
|
float16x8_t __s1_153 = __p1_153; \
|
|
float16x4_t __s2_153 = __p2_153; \
|
|
float16x8_t __rev0_153; __rev0_153 = __builtin_shufflevector(__s0_153, __s0_153, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_153; __rev2_153 = __builtin_shufflevector(__s2_153, __s2_153, __lane_reverse_64_16); \
|
|
__ret_153 = __noswap_vcmlaq_f16(__rev0_153, __rev1_153, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_153), __p3_153), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_153), __p3_153), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_153), __p3_153), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_153), __p3_153)})); \
|
|
__ret_153 = __builtin_shufflevector(__ret_153, __ret_153, __lane_reverse_128_16); \
|
|
__ret_153; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_laneq_f16(__p0_154, __p1_154, __p2_154, __p3_154) __extension__ ({ \
|
|
float16x4_t __ret_154; \
|
|
float16x4_t __s0_154 = __p0_154; \
|
|
float16x4_t __s1_154 = __p1_154; \
|
|
float16x8_t __s2_154 = __p2_154; \
|
|
__ret_154 = vcmla_f16(__s0_154, __s1_154, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_154), __p3_154), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_154), __p3_154)})); \
|
|
__ret_154; \
|
|
})
|
|
#else
|
|
#define vcmla_laneq_f16(__p0_155, __p1_155, __p2_155, __p3_155) __extension__ ({ \
|
|
float16x4_t __ret_155; \
|
|
float16x4_t __s0_155 = __p0_155; \
|
|
float16x4_t __s1_155 = __p1_155; \
|
|
float16x8_t __s2_155 = __p2_155; \
|
|
float16x4_t __rev0_155; __rev0_155 = __builtin_shufflevector(__s0_155, __s0_155, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_155; __rev2_155 = __builtin_shufflevector(__s2_155, __s2_155, __lane_reverse_128_16); \
|
|
__ret_155 = __noswap_vcmla_f16(__rev0_155, __rev1_155, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_155), __p3_155), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_155), __p3_155)})); \
|
|
__ret_155 = __builtin_shufflevector(__ret_155, __ret_155, __lane_reverse_64_16); \
|
|
__ret_155; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_laneq_f16(__p0_156, __p1_156, __p2_156, __p3_156) __extension__ ({ \
|
|
float16x8_t __ret_156; \
|
|
float16x8_t __s0_156 = __p0_156; \
|
|
float16x8_t __s1_156 = __p1_156; \
|
|
float16x8_t __s2_156 = __p2_156; \
|
|
__ret_156 = vcmlaq_f16(__s0_156, __s1_156, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_156), __p3_156), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_156), __p3_156), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_156), __p3_156), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_156), __p3_156)})); \
|
|
__ret_156; \
|
|
})
|
|
#else
|
|
#define vcmlaq_laneq_f16(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \
|
|
float16x8_t __ret_157; \
|
|
float16x8_t __s0_157 = __p0_157; \
|
|
float16x8_t __s1_157 = __p1_157; \
|
|
float16x8_t __s2_157 = __p2_157; \
|
|
float16x8_t __rev0_157; __rev0_157 = __builtin_shufflevector(__s0_157, __s0_157, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_157; __rev2_157 = __builtin_shufflevector(__s2_157, __s2_157, __lane_reverse_128_16); \
|
|
__ret_157 = __noswap_vcmlaq_f16(__rev0_157, __rev1_157, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_157), __p3_157), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_157), __p3_157), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_157), __p3_157), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_157), __p3_157)})); \
|
|
__ret_157 = __builtin_shufflevector(__ret_157, __ret_157, __lane_reverse_128_16); \
|
|
__ret_157; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot180_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot180_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot180_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot180_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot180_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot180_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot180_lane_f16(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \
|
|
float16x4_t __ret_158; \
|
|
float16x4_t __s0_158 = __p0_158; \
|
|
float16x4_t __s1_158 = __p1_158; \
|
|
float16x4_t __s2_158 = __p2_158; \
|
|
__ret_158 = vcmla_rot180_f16(__s0_158, __s1_158, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_158), __p3_158), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_158), __p3_158)})); \
|
|
__ret_158; \
|
|
})
|
|
#else
|
|
#define vcmla_rot180_lane_f16(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \
|
|
float16x4_t __ret_159; \
|
|
float16x4_t __s0_159 = __p0_159; \
|
|
float16x4_t __s1_159 = __p1_159; \
|
|
float16x4_t __s2_159 = __p2_159; \
|
|
float16x4_t __rev0_159; __rev0_159 = __builtin_shufflevector(__s0_159, __s0_159, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_159; __rev2_159 = __builtin_shufflevector(__s2_159, __s2_159, __lane_reverse_64_16); \
|
|
__ret_159 = __noswap_vcmla_rot180_f16(__rev0_159, __rev1_159, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_159), __p3_159), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_159), __p3_159)})); \
|
|
__ret_159 = __builtin_shufflevector(__ret_159, __ret_159, __lane_reverse_64_16); \
|
|
__ret_159; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot180_lane_f16(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \
|
|
float16x8_t __ret_160; \
|
|
float16x8_t __s0_160 = __p0_160; \
|
|
float16x8_t __s1_160 = __p1_160; \
|
|
float16x4_t __s2_160 = __p2_160; \
|
|
__ret_160 = vcmlaq_rot180_f16(__s0_160, __s1_160, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_160), __p3_160), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_160), __p3_160), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_160), __p3_160), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_160), __p3_160)})); \
|
|
__ret_160; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot180_lane_f16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
|
|
float16x8_t __ret_161; \
|
|
float16x8_t __s0_161 = __p0_161; \
|
|
float16x8_t __s1_161 = __p1_161; \
|
|
float16x4_t __s2_161 = __p2_161; \
|
|
float16x8_t __rev0_161; __rev0_161 = __builtin_shufflevector(__s0_161, __s0_161, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_161; __rev2_161 = __builtin_shufflevector(__s2_161, __s2_161, __lane_reverse_64_16); \
|
|
__ret_161 = __noswap_vcmlaq_rot180_f16(__rev0_161, __rev1_161, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_161), __p3_161), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_161), __p3_161), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_161), __p3_161), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_161), __p3_161)})); \
|
|
__ret_161 = __builtin_shufflevector(__ret_161, __ret_161, __lane_reverse_128_16); \
|
|
__ret_161; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot180_laneq_f16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
|
|
float16x4_t __ret_162; \
|
|
float16x4_t __s0_162 = __p0_162; \
|
|
float16x4_t __s1_162 = __p1_162; \
|
|
float16x8_t __s2_162 = __p2_162; \
|
|
__ret_162 = vcmla_rot180_f16(__s0_162, __s1_162, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_162), __p3_162), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_162), __p3_162)})); \
|
|
__ret_162; \
|
|
})
|
|
#else
|
|
#define vcmla_rot180_laneq_f16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
|
|
float16x4_t __ret_163; \
|
|
float16x4_t __s0_163 = __p0_163; \
|
|
float16x4_t __s1_163 = __p1_163; \
|
|
float16x8_t __s2_163 = __p2_163; \
|
|
float16x4_t __rev0_163; __rev0_163 = __builtin_shufflevector(__s0_163, __s0_163, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_163; __rev2_163 = __builtin_shufflevector(__s2_163, __s2_163, __lane_reverse_128_16); \
|
|
__ret_163 = __noswap_vcmla_rot180_f16(__rev0_163, __rev1_163, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_163), __p3_163), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_163), __p3_163)})); \
|
|
__ret_163 = __builtin_shufflevector(__ret_163, __ret_163, __lane_reverse_64_16); \
|
|
__ret_163; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot180_laneq_f16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
|
|
float16x8_t __ret_164; \
|
|
float16x8_t __s0_164 = __p0_164; \
|
|
float16x8_t __s1_164 = __p1_164; \
|
|
float16x8_t __s2_164 = __p2_164; \
|
|
__ret_164 = vcmlaq_rot180_f16(__s0_164, __s1_164, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_164), __p3_164), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_164), __p3_164), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_164), __p3_164), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_164), __p3_164)})); \
|
|
__ret_164; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot180_laneq_f16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
|
|
float16x8_t __ret_165; \
|
|
float16x8_t __s0_165 = __p0_165; \
|
|
float16x8_t __s1_165 = __p1_165; \
|
|
float16x8_t __s2_165 = __p2_165; \
|
|
float16x8_t __rev0_165; __rev0_165 = __builtin_shufflevector(__s0_165, __s0_165, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_165; __rev2_165 = __builtin_shufflevector(__s2_165, __s2_165, __lane_reverse_128_16); \
|
|
__ret_165 = __noswap_vcmlaq_rot180_f16(__rev0_165, __rev1_165, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_165), __p3_165), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_165), __p3_165), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_165), __p3_165), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_165), __p3_165)})); \
|
|
__ret_165 = __builtin_shufflevector(__ret_165, __ret_165, __lane_reverse_128_16); \
|
|
__ret_165; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot270_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot270_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot270_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot270_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot270_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot270_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot270_lane_f16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
|
|
float16x4_t __ret_166; \
|
|
float16x4_t __s0_166 = __p0_166; \
|
|
float16x4_t __s1_166 = __p1_166; \
|
|
float16x4_t __s2_166 = __p2_166; \
|
|
__ret_166 = vcmla_rot270_f16(__s0_166, __s1_166, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_166), __p3_166), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_166), __p3_166)})); \
|
|
__ret_166; \
|
|
})
|
|
#else
|
|
#define vcmla_rot270_lane_f16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
|
|
float16x4_t __ret_167; \
|
|
float16x4_t __s0_167 = __p0_167; \
|
|
float16x4_t __s1_167 = __p1_167; \
|
|
float16x4_t __s2_167 = __p2_167; \
|
|
float16x4_t __rev0_167; __rev0_167 = __builtin_shufflevector(__s0_167, __s0_167, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_167; __rev2_167 = __builtin_shufflevector(__s2_167, __s2_167, __lane_reverse_64_16); \
|
|
__ret_167 = __noswap_vcmla_rot270_f16(__rev0_167, __rev1_167, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_167), __p3_167), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_167), __p3_167)})); \
|
|
__ret_167 = __builtin_shufflevector(__ret_167, __ret_167, __lane_reverse_64_16); \
|
|
__ret_167; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot270_lane_f16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
|
|
float16x8_t __ret_168; \
|
|
float16x8_t __s0_168 = __p0_168; \
|
|
float16x8_t __s1_168 = __p1_168; \
|
|
float16x4_t __s2_168 = __p2_168; \
|
|
__ret_168 = vcmlaq_rot270_f16(__s0_168, __s1_168, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_168), __p3_168), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_168), __p3_168), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_168), __p3_168), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_168), __p3_168)})); \
|
|
__ret_168; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot270_lane_f16(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
|
|
float16x8_t __ret_169; \
|
|
float16x8_t __s0_169 = __p0_169; \
|
|
float16x8_t __s1_169 = __p1_169; \
|
|
float16x4_t __s2_169 = __p2_169; \
|
|
float16x8_t __rev0_169; __rev0_169 = __builtin_shufflevector(__s0_169, __s0_169, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_169; __rev2_169 = __builtin_shufflevector(__s2_169, __s2_169, __lane_reverse_64_16); \
|
|
__ret_169 = __noswap_vcmlaq_rot270_f16(__rev0_169, __rev1_169, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_169), __p3_169), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_169), __p3_169), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_169), __p3_169), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_169), __p3_169)})); \
|
|
__ret_169 = __builtin_shufflevector(__ret_169, __ret_169, __lane_reverse_128_16); \
|
|
__ret_169; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot270_laneq_f16(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
|
|
float16x4_t __ret_170; \
|
|
float16x4_t __s0_170 = __p0_170; \
|
|
float16x4_t __s1_170 = __p1_170; \
|
|
float16x8_t __s2_170 = __p2_170; \
|
|
__ret_170 = vcmla_rot270_f16(__s0_170, __s1_170, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_170), __p3_170), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_170), __p3_170)})); \
|
|
__ret_170; \
|
|
})
|
|
#else
|
|
#define vcmla_rot270_laneq_f16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
|
|
float16x4_t __ret_171; \
|
|
float16x4_t __s0_171 = __p0_171; \
|
|
float16x4_t __s1_171 = __p1_171; \
|
|
float16x8_t __s2_171 = __p2_171; \
|
|
float16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_171; __rev2_171 = __builtin_shufflevector(__s2_171, __s2_171, __lane_reverse_128_16); \
|
|
__ret_171 = __noswap_vcmla_rot270_f16(__rev0_171, __rev1_171, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_171), __p3_171), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_171), __p3_171)})); \
|
|
__ret_171 = __builtin_shufflevector(__ret_171, __ret_171, __lane_reverse_64_16); \
|
|
__ret_171; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot270_laneq_f16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
|
|
float16x8_t __ret_172; \
|
|
float16x8_t __s0_172 = __p0_172; \
|
|
float16x8_t __s1_172 = __p1_172; \
|
|
float16x8_t __s2_172 = __p2_172; \
|
|
__ret_172 = vcmlaq_rot270_f16(__s0_172, __s1_172, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_172), __p3_172), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_172), __p3_172), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_172), __p3_172), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_172), __p3_172)})); \
|
|
__ret_172; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot270_laneq_f16(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
|
|
float16x8_t __ret_173; \
|
|
float16x8_t __s0_173 = __p0_173; \
|
|
float16x8_t __s1_173 = __p1_173; \
|
|
float16x8_t __s2_173 = __p2_173; \
|
|
float16x8_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_173; __rev2_173 = __builtin_shufflevector(__s2_173, __s2_173, __lane_reverse_128_16); \
|
|
__ret_173 = __noswap_vcmlaq_rot270_f16(__rev0_173, __rev1_173, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_173), __p3_173), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_173), __p3_173), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_173), __p3_173), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_173), __p3_173)})); \
|
|
__ret_173 = __builtin_shufflevector(__ret_173, __ret_173, __lane_reverse_128_16); \
|
|
__ret_173; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot90_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot90_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcmlaq_rot90_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot90_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot90_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcmla_rot90_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot90_lane_f16(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
|
|
float16x4_t __ret_174; \
|
|
float16x4_t __s0_174 = __p0_174; \
|
|
float16x4_t __s1_174 = __p1_174; \
|
|
float16x4_t __s2_174 = __p2_174; \
|
|
__ret_174 = vcmla_rot90_f16(__s0_174, __s1_174, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_174), __p3_174), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_174), __p3_174)})); \
|
|
__ret_174; \
|
|
})
|
|
#else
|
|
#define vcmla_rot90_lane_f16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
|
|
float16x4_t __ret_175; \
|
|
float16x4_t __s0_175 = __p0_175; \
|
|
float16x4_t __s1_175 = __p1_175; \
|
|
float16x4_t __s2_175 = __p2_175; \
|
|
float16x4_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_175; __rev2_175 = __builtin_shufflevector(__s2_175, __s2_175, __lane_reverse_64_16); \
|
|
__ret_175 = __noswap_vcmla_rot90_f16(__rev0_175, __rev1_175, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_175), __p3_175), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_175), __p3_175)})); \
|
|
__ret_175 = __builtin_shufflevector(__ret_175, __ret_175, __lane_reverse_64_16); \
|
|
__ret_175; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot90_lane_f16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
|
|
float16x8_t __ret_176; \
|
|
float16x8_t __s0_176 = __p0_176; \
|
|
float16x8_t __s1_176 = __p1_176; \
|
|
float16x4_t __s2_176 = __p2_176; \
|
|
__ret_176 = vcmlaq_rot90_f16(__s0_176, __s1_176, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_176), __p3_176), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_176), __p3_176), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_176), __p3_176), vget_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_176), __p3_176)})); \
|
|
__ret_176; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot90_lane_f16(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
|
|
float16x8_t __ret_177; \
|
|
float16x8_t __s0_177 = __p0_177; \
|
|
float16x8_t __s1_177 = __p1_177; \
|
|
float16x4_t __s2_177 = __p2_177; \
|
|
float16x8_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_177; __rev2_177 = __builtin_shufflevector(__s2_177, __s2_177, __lane_reverse_64_16); \
|
|
__ret_177 = __noswap_vcmlaq_rot90_f16(__rev0_177, __rev1_177, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_177), __p3_177), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_177), __p3_177), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_177), __p3_177), __noswap_vget_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_177), __p3_177)})); \
|
|
__ret_177 = __builtin_shufflevector(__ret_177, __ret_177, __lane_reverse_128_16); \
|
|
__ret_177; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot90_laneq_f16(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
|
|
float16x4_t __ret_178; \
|
|
float16x4_t __s0_178 = __p0_178; \
|
|
float16x4_t __s1_178 = __p1_178; \
|
|
float16x8_t __s2_178 = __p2_178; \
|
|
__ret_178 = vcmla_rot90_f16(__s0_178, __s1_178, __builtin_bit_cast(float16x4_t, (uint32x2_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_178), __p3_178), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_178), __p3_178)})); \
|
|
__ret_178; \
|
|
})
|
|
#else
|
|
#define vcmla_rot90_laneq_f16(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
|
|
float16x4_t __ret_179; \
|
|
float16x4_t __s0_179 = __p0_179; \
|
|
float16x4_t __s1_179 = __p1_179; \
|
|
float16x8_t __s2_179 = __p2_179; \
|
|
float16x4_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_179; __rev2_179 = __builtin_shufflevector(__s2_179, __s2_179, __lane_reverse_128_16); \
|
|
__ret_179 = __noswap_vcmla_rot90_f16(__rev0_179, __rev1_179, __builtin_bit_cast(float16x4_t, (uint32x2_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_179), __p3_179), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_179), __p3_179)})); \
|
|
__ret_179 = __builtin_shufflevector(__ret_179, __ret_179, __lane_reverse_64_16); \
|
|
__ret_179; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot90_laneq_f16(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
|
|
float16x8_t __ret_180; \
|
|
float16x8_t __s0_180 = __p0_180; \
|
|
float16x8_t __s1_180 = __p1_180; \
|
|
float16x8_t __s2_180 = __p2_180; \
|
|
__ret_180 = vcmlaq_rot90_f16(__s0_180, __s1_180, __builtin_bit_cast(float16x8_t, (uint32x4_t) {vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_180), __p3_180), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_180), __p3_180), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_180), __p3_180), vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __s2_180), __p3_180)})); \
|
|
__ret_180; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot90_laneq_f16(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
|
|
float16x8_t __ret_181; \
|
|
float16x8_t __s0_181 = __p0_181; \
|
|
float16x8_t __s1_181 = __p1_181; \
|
|
float16x8_t __s2_181 = __p2_181; \
|
|
float16x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_181; __rev2_181 = __builtin_shufflevector(__s2_181, __s2_181, __lane_reverse_128_16); \
|
|
__ret_181 = __noswap_vcmlaq_rot90_f16(__rev0_181, __rev1_181, __builtin_bit_cast(float16x8_t, (uint32x4_t) {__noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_181), __p3_181), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_181), __p3_181), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_181), __p3_181), __noswap_vgetq_lane_u32(__builtin_bit_cast(uint32x4_t, __rev2_181), __p3_181)})); \
|
|
__ret_181 = __builtin_shufflevector(__ret_181, __ret_181, __lane_reverse_128_16); \
|
|
__ret_181; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcadd_rot270_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcadd_rot270_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcadd_rot90_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcadd_rot90_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcaddq_rot270_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcaddq_rot270_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcaddq_rot90_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcaddq_rot90_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_lane_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
|
|
float32x2_t __ret_182; \
|
|
float32x2_t __s0_182 = __p0_182; \
|
|
float32x2_t __s1_182 = __p1_182; \
|
|
float32x2_t __s2_182 = __p2_182; \
|
|
__ret_182 = vcmla_f32(__s0_182, __s1_182, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_182), __p3_182)})); \
|
|
__ret_182; \
|
|
})
|
|
#else
|
|
#define vcmla_lane_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
|
|
float32x2_t __ret_183; \
|
|
float32x2_t __s0_183 = __p0_183; \
|
|
float32x2_t __s1_183 = __p1_183; \
|
|
float32x2_t __s2_183 = __p2_183; \
|
|
float32x2_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_183; __rev2_183 = __builtin_shufflevector(__s2_183, __s2_183, __lane_reverse_64_32); \
|
|
__ret_183 = __noswap_vcmla_f32(__rev0_183, __rev1_183, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_183), __p3_183)})); \
|
|
__ret_183 = __builtin_shufflevector(__ret_183, __ret_183, __lane_reverse_64_32); \
|
|
__ret_183; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_lane_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
|
|
float32x4_t __ret_184; \
|
|
float32x4_t __s0_184 = __p0_184; \
|
|
float32x4_t __s1_184 = __p1_184; \
|
|
float32x2_t __s2_184 = __p2_184; \
|
|
__ret_184 = vcmlaq_f32(__s0_184, __s1_184, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_184), __p3_184), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_184), __p3_184)})); \
|
|
__ret_184; \
|
|
})
|
|
#else
|
|
#define vcmlaq_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \
|
|
float32x4_t __ret_185; \
|
|
float32x4_t __s0_185 = __p0_185; \
|
|
float32x4_t __s1_185 = __p1_185; \
|
|
float32x2_t __s2_185 = __p2_185; \
|
|
float32x4_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_185; __rev2_185 = __builtin_shufflevector(__s2_185, __s2_185, __lane_reverse_64_32); \
|
|
__ret_185 = __noswap_vcmlaq_f32(__rev0_185, __rev1_185, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_185), __p3_185), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_185), __p3_185)})); \
|
|
__ret_185 = __builtin_shufflevector(__ret_185, __ret_185, __lane_reverse_128_32); \
|
|
__ret_185; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_laneq_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \
|
|
float32x2_t __ret_186; \
|
|
float32x2_t __s0_186 = __p0_186; \
|
|
float32x2_t __s1_186 = __p1_186; \
|
|
float32x4_t __s2_186 = __p2_186; \
|
|
__ret_186 = vcmla_f32(__s0_186, __s1_186, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_186), __p3_186)})); \
|
|
__ret_186; \
|
|
})
|
|
#else
|
|
#define vcmla_laneq_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \
|
|
float32x2_t __ret_187; \
|
|
float32x2_t __s0_187 = __p0_187; \
|
|
float32x2_t __s1_187 = __p1_187; \
|
|
float32x4_t __s2_187 = __p2_187; \
|
|
float32x2_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_187; __rev2_187 = __builtin_shufflevector(__s2_187, __s2_187, __lane_reverse_128_32); \
|
|
__ret_187 = __noswap_vcmla_f32(__rev0_187, __rev1_187, __builtin_bit_cast(float32x2_t, (uint64x1_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_187), __p3_187)})); \
|
|
__ret_187 = __builtin_shufflevector(__ret_187, __ret_187, __lane_reverse_64_32); \
|
|
__ret_187; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_laneq_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \
|
|
float32x4_t __ret_188; \
|
|
float32x4_t __s0_188 = __p0_188; \
|
|
float32x4_t __s1_188 = __p1_188; \
|
|
float32x4_t __s2_188 = __p2_188; \
|
|
__ret_188 = vcmlaq_f32(__s0_188, __s1_188, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_188), __p3_188), vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_188), __p3_188)})); \
|
|
__ret_188; \
|
|
})
|
|
#else
|
|
#define vcmlaq_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
|
|
float32x4_t __ret_189; \
|
|
float32x4_t __s0_189 = __p0_189; \
|
|
float32x4_t __s1_189 = __p1_189; \
|
|
float32x4_t __s2_189 = __p2_189; \
|
|
float32x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_189; __rev2_189 = __builtin_shufflevector(__s2_189, __s2_189, __lane_reverse_128_32); \
|
|
__ret_189 = __noswap_vcmlaq_f32(__rev0_189, __rev1_189, __builtin_bit_cast(float32x4_t, (uint64x2_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_189), __p3_189), __noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_189), __p3_189)})); \
|
|
__ret_189 = __builtin_shufflevector(__ret_189, __ret_189, __lane_reverse_128_32); \
|
|
__ret_189; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot180_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot180_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot180_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot180_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot180_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot180_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot180_lane_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
|
|
float32x2_t __ret_190; \
|
|
float32x2_t __s0_190 = __p0_190; \
|
|
float32x2_t __s1_190 = __p1_190; \
|
|
float32x2_t __s2_190 = __p2_190; \
|
|
__ret_190 = vcmla_rot180_f32(__s0_190, __s1_190, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_190), __p3_190)})); \
|
|
__ret_190; \
|
|
})
|
|
#else
|
|
#define vcmla_rot180_lane_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
|
|
float32x2_t __ret_191; \
|
|
float32x2_t __s0_191 = __p0_191; \
|
|
float32x2_t __s1_191 = __p1_191; \
|
|
float32x2_t __s2_191 = __p2_191; \
|
|
float32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_191; __rev2_191 = __builtin_shufflevector(__s2_191, __s2_191, __lane_reverse_64_32); \
|
|
__ret_191 = __noswap_vcmla_rot180_f32(__rev0_191, __rev1_191, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_191), __p3_191)})); \
|
|
__ret_191 = __builtin_shufflevector(__ret_191, __ret_191, __lane_reverse_64_32); \
|
|
__ret_191; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot180_lane_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
|
|
float32x4_t __ret_192; \
|
|
float32x4_t __s0_192 = __p0_192; \
|
|
float32x4_t __s1_192 = __p1_192; \
|
|
float32x2_t __s2_192 = __p2_192; \
|
|
__ret_192 = vcmlaq_rot180_f32(__s0_192, __s1_192, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_192), __p3_192), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_192), __p3_192)})); \
|
|
__ret_192; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot180_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
|
|
float32x4_t __ret_193; \
|
|
float32x4_t __s0_193 = __p0_193; \
|
|
float32x4_t __s1_193 = __p1_193; \
|
|
float32x2_t __s2_193 = __p2_193; \
|
|
float32x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_193; __rev2_193 = __builtin_shufflevector(__s2_193, __s2_193, __lane_reverse_64_32); \
|
|
__ret_193 = __noswap_vcmlaq_rot180_f32(__rev0_193, __rev1_193, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_193), __p3_193), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_193), __p3_193)})); \
|
|
__ret_193 = __builtin_shufflevector(__ret_193, __ret_193, __lane_reverse_128_32); \
|
|
__ret_193; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot180_laneq_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
|
|
float32x2_t __ret_194; \
|
|
float32x2_t __s0_194 = __p0_194; \
|
|
float32x2_t __s1_194 = __p1_194; \
|
|
float32x4_t __s2_194 = __p2_194; \
|
|
__ret_194 = vcmla_rot180_f32(__s0_194, __s1_194, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_194), __p3_194)})); \
|
|
__ret_194; \
|
|
})
|
|
#else
|
|
#define vcmla_rot180_laneq_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
|
|
float32x2_t __ret_195; \
|
|
float32x2_t __s0_195 = __p0_195; \
|
|
float32x2_t __s1_195 = __p1_195; \
|
|
float32x4_t __s2_195 = __p2_195; \
|
|
float32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_195; __rev2_195 = __builtin_shufflevector(__s2_195, __s2_195, __lane_reverse_128_32); \
|
|
__ret_195 = __noswap_vcmla_rot180_f32(__rev0_195, __rev1_195, __builtin_bit_cast(float32x2_t, (uint64x1_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_195), __p3_195)})); \
|
|
__ret_195 = __builtin_shufflevector(__ret_195, __ret_195, __lane_reverse_64_32); \
|
|
__ret_195; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot180_laneq_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
|
|
float32x4_t __ret_196; \
|
|
float32x4_t __s0_196 = __p0_196; \
|
|
float32x4_t __s1_196 = __p1_196; \
|
|
float32x4_t __s2_196 = __p2_196; \
|
|
__ret_196 = vcmlaq_rot180_f32(__s0_196, __s1_196, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_196), __p3_196), vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_196), __p3_196)})); \
|
|
__ret_196; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot180_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
|
|
float32x4_t __ret_197; \
|
|
float32x4_t __s0_197 = __p0_197; \
|
|
float32x4_t __s1_197 = __p1_197; \
|
|
float32x4_t __s2_197 = __p2_197; \
|
|
float32x4_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_197; __rev2_197 = __builtin_shufflevector(__s2_197, __s2_197, __lane_reverse_128_32); \
|
|
__ret_197 = __noswap_vcmlaq_rot180_f32(__rev0_197, __rev1_197, __builtin_bit_cast(float32x4_t, (uint64x2_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_197), __p3_197), __noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_197), __p3_197)})); \
|
|
__ret_197 = __builtin_shufflevector(__ret_197, __ret_197, __lane_reverse_128_32); \
|
|
__ret_197; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot270_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot270_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot270_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot270_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot270_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot270_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot270_lane_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
|
|
float32x2_t __ret_198; \
|
|
float32x2_t __s0_198 = __p0_198; \
|
|
float32x2_t __s1_198 = __p1_198; \
|
|
float32x2_t __s2_198 = __p2_198; \
|
|
__ret_198 = vcmla_rot270_f32(__s0_198, __s1_198, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_198), __p3_198)})); \
|
|
__ret_198; \
|
|
})
|
|
#else
|
|
#define vcmla_rot270_lane_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
|
|
float32x2_t __ret_199; \
|
|
float32x2_t __s0_199 = __p0_199; \
|
|
float32x2_t __s1_199 = __p1_199; \
|
|
float32x2_t __s2_199 = __p2_199; \
|
|
float32x2_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_199; __rev2_199 = __builtin_shufflevector(__s2_199, __s2_199, __lane_reverse_64_32); \
|
|
__ret_199 = __noswap_vcmla_rot270_f32(__rev0_199, __rev1_199, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_199), __p3_199)})); \
|
|
__ret_199 = __builtin_shufflevector(__ret_199, __ret_199, __lane_reverse_64_32); \
|
|
__ret_199; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot270_lane_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
|
|
float32x4_t __ret_200; \
|
|
float32x4_t __s0_200 = __p0_200; \
|
|
float32x4_t __s1_200 = __p1_200; \
|
|
float32x2_t __s2_200 = __p2_200; \
|
|
__ret_200 = vcmlaq_rot270_f32(__s0_200, __s1_200, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_200), __p3_200), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_200), __p3_200)})); \
|
|
__ret_200; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot270_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \
|
|
float32x4_t __ret_201; \
|
|
float32x4_t __s0_201 = __p0_201; \
|
|
float32x4_t __s1_201 = __p1_201; \
|
|
float32x2_t __s2_201 = __p2_201; \
|
|
float32x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_201; __rev2_201 = __builtin_shufflevector(__s2_201, __s2_201, __lane_reverse_64_32); \
|
|
__ret_201 = __noswap_vcmlaq_rot270_f32(__rev0_201, __rev1_201, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_201), __p3_201), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_201), __p3_201)})); \
|
|
__ret_201 = __builtin_shufflevector(__ret_201, __ret_201, __lane_reverse_128_32); \
|
|
__ret_201; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot270_laneq_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \
|
|
float32x2_t __ret_202; \
|
|
float32x2_t __s0_202 = __p0_202; \
|
|
float32x2_t __s1_202 = __p1_202; \
|
|
float32x4_t __s2_202 = __p2_202; \
|
|
__ret_202 = vcmla_rot270_f32(__s0_202, __s1_202, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_202), __p3_202)})); \
|
|
__ret_202; \
|
|
})
|
|
#else
|
|
#define vcmla_rot270_laneq_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \
|
|
float32x2_t __ret_203; \
|
|
float32x2_t __s0_203 = __p0_203; \
|
|
float32x2_t __s1_203 = __p1_203; \
|
|
float32x4_t __s2_203 = __p2_203; \
|
|
float32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_203; __rev2_203 = __builtin_shufflevector(__s2_203, __s2_203, __lane_reverse_128_32); \
|
|
__ret_203 = __noswap_vcmla_rot270_f32(__rev0_203, __rev1_203, __builtin_bit_cast(float32x2_t, (uint64x1_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_203), __p3_203)})); \
|
|
__ret_203 = __builtin_shufflevector(__ret_203, __ret_203, __lane_reverse_64_32); \
|
|
__ret_203; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot270_laneq_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \
|
|
float32x4_t __ret_204; \
|
|
float32x4_t __s0_204 = __p0_204; \
|
|
float32x4_t __s1_204 = __p1_204; \
|
|
float32x4_t __s2_204 = __p2_204; \
|
|
__ret_204 = vcmlaq_rot270_f32(__s0_204, __s1_204, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_204), __p3_204), vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_204), __p3_204)})); \
|
|
__ret_204; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot270_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \
|
|
float32x4_t __ret_205; \
|
|
float32x4_t __s0_205 = __p0_205; \
|
|
float32x4_t __s1_205 = __p1_205; \
|
|
float32x4_t __s2_205 = __p2_205; \
|
|
float32x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_205; __rev2_205 = __builtin_shufflevector(__s2_205, __s2_205, __lane_reverse_128_32); \
|
|
__ret_205 = __noswap_vcmlaq_rot270_f32(__rev0_205, __rev1_205, __builtin_bit_cast(float32x4_t, (uint64x2_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_205), __p3_205), __noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_205), __p3_205)})); \
|
|
__ret_205 = __builtin_shufflevector(__ret_205, __ret_205, __lane_reverse_128_32); \
|
|
__ret_205; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot90_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot90_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcmlaq_rot90_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot90_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot90_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcmla_rot90_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot90_lane_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \
|
|
float32x2_t __ret_206; \
|
|
float32x2_t __s0_206 = __p0_206; \
|
|
float32x2_t __s1_206 = __p1_206; \
|
|
float32x2_t __s2_206 = __p2_206; \
|
|
__ret_206 = vcmla_rot90_f32(__s0_206, __s1_206, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_206), __p3_206)})); \
|
|
__ret_206; \
|
|
})
|
|
#else
|
|
#define vcmla_rot90_lane_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \
|
|
float32x2_t __ret_207; \
|
|
float32x2_t __s0_207 = __p0_207; \
|
|
float32x2_t __s1_207 = __p1_207; \
|
|
float32x2_t __s2_207 = __p2_207; \
|
|
float32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_207; __rev2_207 = __builtin_shufflevector(__s2_207, __s2_207, __lane_reverse_64_32); \
|
|
__ret_207 = __noswap_vcmla_rot90_f32(__rev0_207, __rev1_207, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_207), __p3_207)})); \
|
|
__ret_207 = __builtin_shufflevector(__ret_207, __ret_207, __lane_reverse_64_32); \
|
|
__ret_207; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot90_lane_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \
|
|
float32x4_t __ret_208; \
|
|
float32x4_t __s0_208 = __p0_208; \
|
|
float32x4_t __s1_208 = __p1_208; \
|
|
float32x2_t __s2_208 = __p2_208; \
|
|
__ret_208 = vcmlaq_rot90_f32(__s0_208, __s1_208, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_208), __p3_208), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __s2_208), __p3_208)})); \
|
|
__ret_208; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot90_lane_f32(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \
|
|
float32x4_t __ret_209; \
|
|
float32x4_t __s0_209 = __p0_209; \
|
|
float32x4_t __s1_209 = __p1_209; \
|
|
float32x2_t __s2_209 = __p2_209; \
|
|
float32x4_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_209; __rev2_209 = __builtin_shufflevector(__s2_209, __s2_209, __lane_reverse_64_32); \
|
|
__ret_209 = __noswap_vcmlaq_rot90_f32(__rev0_209, __rev1_209, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_209), __p3_209), vget_lane_u64(__builtin_bit_cast(uint64x1_t, __rev2_209), __p3_209)})); \
|
|
__ret_209 = __builtin_shufflevector(__ret_209, __ret_209, __lane_reverse_128_32); \
|
|
__ret_209; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmla_rot90_laneq_f32(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \
|
|
float32x2_t __ret_210; \
|
|
float32x2_t __s0_210 = __p0_210; \
|
|
float32x2_t __s1_210 = __p1_210; \
|
|
float32x4_t __s2_210 = __p2_210; \
|
|
__ret_210 = vcmla_rot90_f32(__s0_210, __s1_210, __builtin_bit_cast(float32x2_t, (uint64x1_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_210), __p3_210)})); \
|
|
__ret_210; \
|
|
})
|
|
#else
|
|
#define vcmla_rot90_laneq_f32(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \
|
|
float32x2_t __ret_211; \
|
|
float32x2_t __s0_211 = __p0_211; \
|
|
float32x2_t __s1_211 = __p1_211; \
|
|
float32x4_t __s2_211 = __p2_211; \
|
|
float32x2_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_211; __rev2_211 = __builtin_shufflevector(__s2_211, __s2_211, __lane_reverse_128_32); \
|
|
__ret_211 = __noswap_vcmla_rot90_f32(__rev0_211, __rev1_211, __builtin_bit_cast(float32x2_t, (uint64x1_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_211), __p3_211)})); \
|
|
__ret_211 = __builtin_shufflevector(__ret_211, __ret_211, __lane_reverse_64_32); \
|
|
__ret_211; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcmlaq_rot90_laneq_f32(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \
|
|
float32x4_t __ret_212; \
|
|
float32x4_t __s0_212 = __p0_212; \
|
|
float32x4_t __s1_212 = __p1_212; \
|
|
float32x4_t __s2_212 = __p2_212; \
|
|
__ret_212 = vcmlaq_rot90_f32(__s0_212, __s1_212, __builtin_bit_cast(float32x4_t, (uint64x2_t) {vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_212), __p3_212), vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __s2_212), __p3_212)})); \
|
|
__ret_212; \
|
|
})
|
|
#else
|
|
#define vcmlaq_rot90_laneq_f32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
|
|
float32x4_t __ret_213; \
|
|
float32x4_t __s0_213 = __p0_213; \
|
|
float32x4_t __s1_213 = __p1_213; \
|
|
float32x4_t __s2_213 = __p2_213; \
|
|
float32x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_213; __rev2_213 = __builtin_shufflevector(__s2_213, __s2_213, __lane_reverse_128_32); \
|
|
__ret_213 = __noswap_vcmlaq_rot90_f32(__rev0_213, __rev1_213, __builtin_bit_cast(float32x4_t, (uint64x2_t) {__noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_213), __p3_213), __noswap_vgetq_lane_u64(__builtin_bit_cast(uint64x2_t, __rev2_213), __p3_213)})); \
|
|
__ret_213 = __builtin_shufflevector(__ret_213, __ret_213, __lane_reverse_128_32); \
|
|
__ret_213; \
|
|
})
|
|
#endif
|
|
|
|
#if !defined(__aarch64__) && !defined(__arm64ec__)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon___a32_vcvt_bf16_f32(__builtin_bit_cast(int8x16_t, __p0), 11));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon___a32_vcvt_bf16_f32(__builtin_bit_cast(int8x16_t, __rev0), 11));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon___a32_vcvt_bf16_f32(__builtin_bit_cast(int8x16_t, __p0), 11));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __a32_vcvt_bf16_f32(__p0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap___a32_vcvt_bf16_f32(__rev0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = vcombine_bf16(__builtin_bit_cast(bfloat16x4_t, 0ULL), __a32_vcvt_bf16_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_bf16(__builtin_bit_cast(bfloat16x4_t, 0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_lane_s32(__p0_214, __p1_214, __p2_214) __extension__ ({ \
|
|
int32x4_t __ret_214; \
|
|
int32x4_t __s0_214 = __p0_214; \
|
|
int32x2_t __s1_214 = __p1_214; \
|
|
__ret_214 = vqdmulhq_s32(__s0_214, splatq_lane_s32(__s1_214, __p2_214)); \
|
|
__ret_214; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_lane_s32(__p0_215, __p1_215, __p2_215) __extension__ ({ \
|
|
int32x4_t __ret_215; \
|
|
int32x4_t __s0_215 = __p0_215; \
|
|
int32x2_t __s1_215 = __p1_215; \
|
|
int32x4_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, __lane_reverse_128_32); \
|
|
int32x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, __lane_reverse_64_32); \
|
|
__ret_215 = __noswap_vqdmulhq_s32(__rev0_215, __noswap_splatq_lane_s32(__rev1_215, __p2_215)); \
|
|
__ret_215 = __builtin_shufflevector(__ret_215, __ret_215, __lane_reverse_128_32); \
|
|
__ret_215; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_lane_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
|
|
int16x8_t __ret_216; \
|
|
int16x8_t __s0_216 = __p0_216; \
|
|
int16x4_t __s1_216 = __p1_216; \
|
|
__ret_216 = vqdmulhq_s16(__s0_216, splatq_lane_s16(__s1_216, __p2_216)); \
|
|
__ret_216; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_lane_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
|
|
int16x8_t __ret_217; \
|
|
int16x8_t __s0_217 = __p0_217; \
|
|
int16x4_t __s1_217 = __p1_217; \
|
|
int16x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, __lane_reverse_128_16); \
|
|
int16x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, __lane_reverse_64_16); \
|
|
__ret_217 = __noswap_vqdmulhq_s16(__rev0_217, __noswap_splatq_lane_s16(__rev1_217, __p2_217)); \
|
|
__ret_217 = __builtin_shufflevector(__ret_217, __ret_217, __lane_reverse_128_16); \
|
|
__ret_217; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_lane_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
|
|
int32x2_t __ret_218; \
|
|
int32x2_t __s0_218 = __p0_218; \
|
|
int32x2_t __s1_218 = __p1_218; \
|
|
__ret_218 = vqdmulh_s32(__s0_218, splat_lane_s32(__s1_218, __p2_218)); \
|
|
__ret_218; \
|
|
})
|
|
#else
|
|
#define vqdmulh_lane_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
|
|
int32x2_t __ret_219; \
|
|
int32x2_t __s0_219 = __p0_219; \
|
|
int32x2_t __s1_219 = __p1_219; \
|
|
int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, __lane_reverse_64_32); \
|
|
__ret_219 = __noswap_vqdmulh_s32(__rev0_219, __noswap_splat_lane_s32(__rev1_219, __p2_219)); \
|
|
__ret_219 = __builtin_shufflevector(__ret_219, __ret_219, __lane_reverse_64_32); \
|
|
__ret_219; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_lane_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \
|
|
int16x4_t __ret_220; \
|
|
int16x4_t __s0_220 = __p0_220; \
|
|
int16x4_t __s1_220 = __p1_220; \
|
|
__ret_220 = vqdmulh_s16(__s0_220, splat_lane_s16(__s1_220, __p2_220)); \
|
|
__ret_220; \
|
|
})
|
|
#else
|
|
#define vqdmulh_lane_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \
|
|
int16x4_t __ret_221; \
|
|
int16x4_t __s0_221 = __p0_221; \
|
|
int16x4_t __s1_221 = __p1_221; \
|
|
int16x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, __lane_reverse_64_16); \
|
|
__ret_221 = __noswap_vqdmulh_s16(__rev0_221, __noswap_splat_lane_s16(__rev1_221, __p2_221)); \
|
|
__ret_221 = __builtin_shufflevector(__ret_221, __ret_221, __lane_reverse_64_16); \
|
|
__ret_221; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_lane_s32(__p0_222, __p1_222, __p2_222) __extension__ ({ \
|
|
int32x4_t __ret_222; \
|
|
int32x4_t __s0_222 = __p0_222; \
|
|
int32x2_t __s1_222 = __p1_222; \
|
|
__ret_222 = vqrdmulhq_s32(__s0_222, splatq_lane_s32(__s1_222, __p2_222)); \
|
|
__ret_222; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_lane_s32(__p0_223, __p1_223, __p2_223) __extension__ ({ \
|
|
int32x4_t __ret_223; \
|
|
int32x4_t __s0_223 = __p0_223; \
|
|
int32x2_t __s1_223 = __p1_223; \
|
|
int32x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, __lane_reverse_128_32); \
|
|
int32x2_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, __lane_reverse_64_32); \
|
|
__ret_223 = __noswap_vqrdmulhq_s32(__rev0_223, __noswap_splatq_lane_s32(__rev1_223, __p2_223)); \
|
|
__ret_223 = __builtin_shufflevector(__ret_223, __ret_223, __lane_reverse_128_32); \
|
|
__ret_223; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_lane_s16(__p0_224, __p1_224, __p2_224) __extension__ ({ \
|
|
int16x8_t __ret_224; \
|
|
int16x8_t __s0_224 = __p0_224; \
|
|
int16x4_t __s1_224 = __p1_224; \
|
|
__ret_224 = vqrdmulhq_s16(__s0_224, splatq_lane_s16(__s1_224, __p2_224)); \
|
|
__ret_224; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_lane_s16(__p0_225, __p1_225, __p2_225) __extension__ ({ \
|
|
int16x8_t __ret_225; \
|
|
int16x8_t __s0_225 = __p0_225; \
|
|
int16x4_t __s1_225 = __p1_225; \
|
|
int16x8_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, __lane_reverse_128_16); \
|
|
int16x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, __lane_reverse_64_16); \
|
|
__ret_225 = __noswap_vqrdmulhq_s16(__rev0_225, __noswap_splatq_lane_s16(__rev1_225, __p2_225)); \
|
|
__ret_225 = __builtin_shufflevector(__ret_225, __ret_225, __lane_reverse_128_16); \
|
|
__ret_225; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_lane_s32(__p0_226, __p1_226, __p2_226) __extension__ ({ \
|
|
int32x2_t __ret_226; \
|
|
int32x2_t __s0_226 = __p0_226; \
|
|
int32x2_t __s1_226 = __p1_226; \
|
|
__ret_226 = vqrdmulh_s32(__s0_226, splat_lane_s32(__s1_226, __p2_226)); \
|
|
__ret_226; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_lane_s32(__p0_227, __p1_227, __p2_227) __extension__ ({ \
|
|
int32x2_t __ret_227; \
|
|
int32x2_t __s0_227 = __p0_227; \
|
|
int32x2_t __s1_227 = __p1_227; \
|
|
int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, __lane_reverse_64_32); \
|
|
__ret_227 = __noswap_vqrdmulh_s32(__rev0_227, __noswap_splat_lane_s32(__rev1_227, __p2_227)); \
|
|
__ret_227 = __builtin_shufflevector(__ret_227, __ret_227, __lane_reverse_64_32); \
|
|
__ret_227; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_lane_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \
|
|
int16x4_t __ret_228; \
|
|
int16x4_t __s0_228 = __p0_228; \
|
|
int16x4_t __s1_228 = __p1_228; \
|
|
__ret_228 = vqrdmulh_s16(__s0_228, splat_lane_s16(__s1_228, __p2_228)); \
|
|
__ret_228; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_lane_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \
|
|
int16x4_t __ret_229; \
|
|
int16x4_t __s0_229 = __p0_229; \
|
|
int16x4_t __s1_229 = __p1_229; \
|
|
int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, __lane_reverse_64_16); \
|
|
__ret_229 = __noswap_vqrdmulh_s16(__rev0_229, __noswap_splat_lane_s16(__rev1_229, __p2_229)); \
|
|
__ret_229 = __builtin_shufflevector(__ret_229, __ret_229, __lane_reverse_64_16); \
|
|
__ret_229; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
#if (__ARM_FP & 2)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_f32(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vcvt_f16_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvt_f32_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvt_f32_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vcvt_f32_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_v(__p0, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_v(__p0, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_v(__p0, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_v(__p0, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_dup_v(__p0, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_dup_v(__p0, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_dup_v(__p0, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_f16(__p0) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_dup_v(__p0, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f16_x2(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f16_x2(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f16_x2(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f16_x2(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f16_x3(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f16_x3(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f16_x3(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f16_x3(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f16_x4(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f16_x4(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_f16_x4(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_f16_x4(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_f16(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_f16(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_f16(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_f16(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
float16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x2_t __ret; \
|
|
float16x8x2_t __s1 = __p1; \
|
|
float16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
float16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x2_t __ret; \
|
|
float16x4x2_t __s1 = __p1; \
|
|
float16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_f16(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_f16(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_f16(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_f16(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
float16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x3_t __ret; \
|
|
float16x8x3_t __s1 = __p1; \
|
|
float16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
float16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x3_t __ret; \
|
|
float16x4x3_t __s1 = __p1; \
|
|
float16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_f16(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_f16(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_f16(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_f16(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_f16(__p0) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_f16(__p0) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
float16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 40); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x4_t __ret; \
|
|
float16x8x4_t __s1 = __p1; \
|
|
float16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 40); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
float16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 8); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x4_t __ret; \
|
|
float16x4x4_t __s1 = __p1; \
|
|
float16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 8); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_16); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_16); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_16); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 40); \
|
|
})
|
|
#else
|
|
#define vst1q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 8); \
|
|
})
|
|
#else
|
|
#define vst1_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 40); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 8); \
|
|
})
|
|
#else
|
|
#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 40); \
|
|
})
|
|
#else
|
|
#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
float16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 8); \
|
|
})
|
|
#else
|
|
#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
float16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 40); \
|
|
})
|
|
#else
|
|
#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
float16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 8); \
|
|
})
|
|
#else
|
|
#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
float16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 40); \
|
|
})
|
|
#else
|
|
#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
float16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 8); \
|
|
})
|
|
#else
|
|
#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
float16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 40); \
|
|
})
|
|
#else
|
|
#define vst2q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
float16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 8); \
|
|
})
|
|
#else
|
|
#define vst2_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
float16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 40); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x2_t __s1 = __p1; \
|
|
float16x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 8); \
|
|
})
|
|
#else
|
|
#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x2_t __s1 = __p1; \
|
|
float16x4x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 40); \
|
|
})
|
|
#else
|
|
#define vst3q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
float16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 8); \
|
|
})
|
|
#else
|
|
#define vst3_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
float16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 40); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x3_t __s1 = __p1; \
|
|
float16x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 8); \
|
|
})
|
|
#else
|
|
#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x3_t __s1 = __p1; \
|
|
float16x4x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 40); \
|
|
})
|
|
#else
|
|
#define vst4q_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
float16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 8); \
|
|
})
|
|
#else
|
|
#define vst4_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
float16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 8); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 40); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8x4_t __s1 = __p1; \
|
|
float16x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_16); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 40); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 8); \
|
|
})
|
|
#else
|
|
#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4x4_t __s1 = __p1; \
|
|
float16x4x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_16); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_16); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_16); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_16); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 8); \
|
|
})
|
|
#endif
|
|
|
|
#endif
|
|
#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmaxnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmaxnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmaxnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vminnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vminnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vminnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrnd_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnd_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndaq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndaq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrnda_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnda_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndiq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndiq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrndi_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrndi_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndmq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndmq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrndm_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrndm_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndnq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndnq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrndn_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrndn_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndpq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndpq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrndp_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrndp_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndxq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrndxq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrndx_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrndx_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
#if __ARM_ARCH >= 8
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesdq_u8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesdq_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaeseq_u8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaeseq_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesimcq_u8(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesimcq_u8(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesmcq_u8(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vaesmcq_u8(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtaq_s32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtaq_s32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvta_s32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvta_s32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtaq_u32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtaq_u32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvta_u32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvta_u32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtmq_s32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtmq_s32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtm_s32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtm_s32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtmq_u32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtmq_u32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtm_u32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtm_u32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtnq_s32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtnq_s32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtn_s32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtn_s32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtnq_u32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtnq_u32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtn_u32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtn_u32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtpq_s32_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vcvtpq_s32_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtp_s32_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vcvtp_s32_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtpq_u32_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcvtpq_u32_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtp_u32_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcvtp_u32_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1cq_u32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("sha2,neon"))) uint32_t vsha1h_u32(uint32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vsha1h_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1mq_u32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1pq_u32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1su0q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1su0q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1su1q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha1su1q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256hq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256hq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256h2q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256h2q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256su0q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256su0q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256su1q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsha256su1q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrnd_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrnd_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndaq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndaq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrnda_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrnda_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndmq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndmq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndm_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndm_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndnq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndnq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndn_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndn_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndpq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndpq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndp_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndp_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndxq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndxq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndx_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndx_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndaq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndaq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnda_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnda_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndiq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndiq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndi_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndi_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndmq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndmq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndm_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndm_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndnq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndnq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndn_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndn_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float32_t vrndns_f32(float32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrndns_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndpq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndpq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndp_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndp_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndxq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrndxq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndx_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrndx_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmaxnmq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmaxnmq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmaxnm_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmaxnm_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vminnmq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vminnmq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vminnm_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vminnm_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmaxnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmaxnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmaxnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmaxnm_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vminnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vminnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vminnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vminnm_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
#if defined(__ARM_FEATURE_FMA)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = vfmaq_f32(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = vfma_f32(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
#if defined(__aarch64__)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_bf16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_bf16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_f16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_f16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_high_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_high_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_high_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_high_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_high_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_high_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_high_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_high_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_low_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_low_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt1_low_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt1_low_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_low_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_low_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt1_low_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt1_low_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_bf16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_bf16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_f16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_f16_mf8_fpm(mfloat8x8_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_high_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_high_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_high_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_high_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_high_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_high_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_high_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_high_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_low_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_low_bf16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) bfloat16x8_t vcvt2_low_bf16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvt2_low_bf16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_low_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_low_f16_mf8_fpm(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vcvt2_low_f16_mf8_fpm(mfloat8x16_t __p0, fpm_t __p1) {
|
|
float16x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vcvt2_low_f16_mf8_fpm(__rev0, __p1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x16_t vcvt_high_mf8_f32_fpm(mfloat8x8_t __p0, float32x4_t __p1, float32x4_t __p2, fpm_t __p3) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vcvt_high_mf8_f32_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x16_t vcvt_high_mf8_f32_fpm(mfloat8x8_t __p0, float32x4_t __p1, float32x4_t __p2, fpm_t __p3) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vcvt_high_mf8_f32_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x16_t vcvtq_mf8_f16_fpm(float16x8_t __p0, float16x8_t __p1, fpm_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vcvtq_mf8_f16_fpm(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x16_t vcvtq_mf8_f16_fpm(float16x8_t __p0, float16x8_t __p1, fpm_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vcvtq_mf8_f16_fpm(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x8_t vcvt_mf8_f16_fpm(float16x4_t __p0, float16x4_t __p1, fpm_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vcvt_mf8_f16_fpm(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x8_t vcvt_mf8_f16_fpm(float16x4_t __p0, float16x4_t __p1, fpm_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vcvt_mf8_f16_fpm(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x8_t vcvt_mf8_f32_fpm(float32x4_t __p0, float32x4_t __p1, fpm_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vcvt_mf8_f32_fpm(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) mfloat8x8_t vcvt_mf8_f32_fpm(float32x4_t __p0, float32x4_t __p1, fpm_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vcvt_mf8_f32_fpm(__rev0, __rev1, __p2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float32x2_t vscale_f32(float32x2_t __p0, int32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vscale_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float32x2_t vscale_f32(float32x2_t __p0, int32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vscale_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x4_t vscale_f16(float16x4_t __p0, int16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vscale_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x4_t vscale_f16(float16x4_t __p0, int16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vscale_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float64x2_t vscaleq_f64(float64x2_t __p0, int64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vscaleq_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float64x2_t vscaleq_f64(float64x2_t __p0, int64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vscaleq_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float32x4_t vscaleq_f32(float32x4_t __p0, int32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vscaleq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float32x4_t vscaleq_f32(float32x4_t __p0, int32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vscaleq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vscaleq_f16(float16x8_t __p0, int16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vscaleq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8,neon"))) float16x8_t vscaleq_f16(float16x8_t __p0, int16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vscaleq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8dot2,neon"))) float16x8_t vdotq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __p0), __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8dot2,neon"))) float16x8_t vdotq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8dot2,neon"))) float16x4_t vdot_f16_mf8_fpm(float16x4_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2, fpm_t __p3) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __p0), __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8dot2,neon"))) float16x4_t vdot_f16_mf8_fpm(float16x4_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2, fpm_t __p3) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __rev0), __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdotq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_lane_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdot_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_lane_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdotq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vdotq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdot_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vdot_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x8_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8dot4,neon"))) float32x4_t vdotq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8dot4,neon"))) float32x4_t vdotq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8dot4,neon"))) float32x2_t vdot_f32_mf8_fpm(float32x2_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2, fpm_t __p3) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8dot4,neon"))) float32x2_t vdot_f32_mf8_fpm(float32x2_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2, fpm_t __p3) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdotq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdot_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdotq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vdotq_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdot_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vdot_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float16x8_t vmlalbq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __p0), __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float16x8_t vmlalbq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlalbq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlalbq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlalbq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlalbq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlalbq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallbbq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallbbq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallbbq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallbbq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallbbq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallbbq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbbq_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallbtq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallbtq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallbtq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallbtq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallbtq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallbtq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallbtq_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlalltbq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlalltbq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlalltbq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlalltbq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlalltbq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlalltbq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlalltbq_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallttq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_f32_mf8_fpm(__p0, __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float32x4_t vmlallttq_f32_mf8_fpm(float32x4_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallttq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_lane_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallttq_lane_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_lane_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlallttq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_laneq_f32_mf8_fpm(__s0, __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlallttq_laneq_f32_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmlallttq_laneq_f32_mf8_fpm(__rev0, __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp8fma,neon"))) float16x8_t vmlaltq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __p0), __p1, __p2, __p3));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp8fma,neon"))) float16x8_t vmlaltq_f16_mf8_fpm(float16x8_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2, fpm_t __p3) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaltq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlaltq_lane_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_lane_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaltq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __s0), __s1, __s2, __p3, __s4)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmlaltq_laneq_f16_mf8_fpm(__p0, __p1, __p2, __p3, __p4) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __s2 = __p2; \
|
|
fpm_t __s4 = __p4; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmlaltq_laneq_f16_mf8_fpm(__builtin_bit_cast(int8x16_t, __rev0), __rev1, __rev2, __p3, __s4)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2_lane_p8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2_lane_p8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2q_lane_p8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2q_lane_p8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2q_lane_u8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2q_lane_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2q_lane_s8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2q_lane_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2q_lane_mf8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2q_lane_mf8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2_lane_u8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2_lane_u8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2_lane_s8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2_lane_s8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2_lane_mf8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2_lane_mf8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2_lane_p16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2_lane_p16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2q_lane_p16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2q_lane_p16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2q_lane_u16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2q_lane_u16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2q_lane_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2q_lane_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2q_lane_s16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2q_lane_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2_lane_u16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2_lane_u16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2_lane_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2_lane_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2_lane_s16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2_lane_s16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2_laneq_p8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2_laneq_p8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2q_laneq_p8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti2q_laneq_p8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2q_laneq_u8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2q_laneq_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2q_laneq_s8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2q_laneq_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2q_laneq_mf8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2q_laneq_mf8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2_laneq_u8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti2_laneq_u8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2_laneq_s8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti2_laneq_s8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2_laneq_mf8(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti2_laneq_mf8(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2_laneq_p16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2_laneq_p16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2q_laneq_p16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_p16(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti2q_laneq_p16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2q_laneq_u16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2q_laneq_u16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2q_laneq_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2q_laneq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2q_laneq_s16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2q_laneq_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2_laneq_u16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti2_laneq_u16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2_laneq_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti2_laneq_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2_laneq_s16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti2_laneq_s16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti4q_lane_p8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti4q_lane_p8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti4q_lane_u8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti4q_lane_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti4q_lane_s8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti4q_lane_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti4q_lane_mf8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti4q_lane_mf8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_p16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti4q_lane_p16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x8_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_p16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
poly16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti4q_lane_p16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_u16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti4q_lane_u16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x8_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_u16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
uint16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti4q_lane_u16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_f16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti4q_lane_f16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x8_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_f16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
float16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti4q_lane_f16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_s16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti4q_lane_s16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x8_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_s16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
int16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti4q_lane_s16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti4q_laneq_p8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 36)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vluti4q_laneq_p8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 36)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti4q_laneq_u8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 48)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vluti4q_laneq_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 48)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti4q_laneq_s8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 32)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vluti4q_laneq_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 32)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti4q_laneq_mf8(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vluti4q_laneq_mf8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_p16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti4q_laneq_p16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x16_t, __s1), __p2, 37)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_p16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
poly16x8_t __ret; \
|
|
poly16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
poly16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly16x8_t, __builtin_neon_vluti4q_laneq_p16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), __p2, 37)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_u16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti4q_laneq_u16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x16_t, __s1), __p2, 49)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_u16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
uint16x8_t __ret; \
|
|
uint16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
uint16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vluti4q_laneq_u16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), __p2, 49)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_f16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti4q_laneq_f16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x16_t, __s1), __p2, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_f16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
float16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vluti4q_laneq_f16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), __p2, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_s16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti4q_laneq_s16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_s16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
int16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vluti4q_laneq_s16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2q_lane_bf16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2q_lane_bf16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2_lane_bf16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2_lane_bf16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2q_laneq_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2q_laneq_bf16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2q_laneq_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2q_laneq_bf16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti2_laneq_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2_laneq_bf16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti2_laneq_bf16(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x4_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti2_laneq_bf16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_lane_bf16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti4q_lane_bf16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x8_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_lane_bf16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8x2_t __s0 = __p0; \
|
|
uint8x8_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti4q_lane_bf16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vluti4q_laneq_bf16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti4q_laneq_bf16_x2(__builtin_bit_cast(int8x16_t, __s0.val[0]), __builtin_bit_cast(int8x16_t, __s0.val[1]), __builtin_bit_cast(int8x16_t, __s1), __p2, 43)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vluti4q_laneq_bf16_x2(__p0, __p1, __p2) __extension__ ({ \
|
|
bfloat16x8_t __ret; \
|
|
bfloat16x8x2_t __s0 = __p0; \
|
|
uint8x16_t __s1 = __p1; \
|
|
bfloat16x8x2_t __rev0; \
|
|
__rev0.val[0] = __builtin_shufflevector(__s0.val[0], __s0.val[0], __lane_reverse_128_16); \
|
|
__rev0.val[1] = __builtin_shufflevector(__s0.val[1], __s0.val[1], __lane_reverse_128_16); \
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vluti4q_laneq_bf16_x2(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), __p2, 43)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 12)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __p1, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_lane_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 12)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splatq_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splatq_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splatq_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_splatq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 44)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define splat_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define splat_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_splat_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_splat_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 44)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vbslq_mf8(uint8x16_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vbslq_mf8(uint8x16_t __p0, mfloat8x16_t __p1, mfloat8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vbsl_mf8(uint8x8_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vbsl_mf8(uint8x8_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vcombine_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vcombine_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vcreate_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_mf8(__p0_230, __p1_230) __extension__ ({ \
|
|
mfloat8x16_t __ret_230; \
|
|
mfloat8x8_t __s0_230 = __p0_230; \
|
|
__ret_230 = splatq_lane_mf8(__s0_230, __p1_230); \
|
|
__ret_230; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_mf8(__p0_231, __p1_231) __extension__ ({ \
|
|
mfloat8x16_t __ret_231; \
|
|
mfloat8x8_t __s0_231 = __p0_231; \
|
|
mfloat8x8_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, __lane_reverse_64_8); \
|
|
__ret_231 = __noswap_splatq_lane_mf8(__rev0_231, __p1_231); \
|
|
__ret_231 = __builtin_shufflevector(__ret_231, __ret_231, __lane_reverse_128_8); \
|
|
__ret_231; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_lane_mf8(__p0_232, __p1_232) __extension__ ({ \
|
|
mfloat8x8_t __ret_232; \
|
|
mfloat8x8_t __s0_232 = __p0_232; \
|
|
__ret_232 = splat_lane_mf8(__s0_232, __p1_232); \
|
|
__ret_232; \
|
|
})
|
|
#else
|
|
#define vdup_lane_mf8(__p0_233, __p1_233) __extension__ ({ \
|
|
mfloat8x8_t __ret_233; \
|
|
mfloat8x8_t __s0_233 = __p0_233; \
|
|
mfloat8x8_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, __lane_reverse_64_8); \
|
|
__ret_233 = __noswap_splat_lane_mf8(__rev0_233, __p1_233); \
|
|
__ret_233 = __builtin_shufflevector(__ret_233, __ret_233, __lane_reverse_64_8); \
|
|
__ret_233; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vdupq_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = (mfloat8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vdupq_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = (mfloat8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vdup_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = (mfloat8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vdup_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = (mfloat8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vext_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vext_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vget_high_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vget_high_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vgetq_lane_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vgetq_lane_mf8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vgetq_lane_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vget_lane_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vget_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vget_lane_mf8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vget_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vget_lane_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vget_low_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vget_low_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vmovq_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = (mfloat8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vmovq_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = (mfloat8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vmov_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = (mfloat8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vmov_n_mf8(mfloat8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = (mfloat8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev16q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev16q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev16_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev16_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev32q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev32q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev32_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev32_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev64q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vrev64q_mf8(mfloat8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev64_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vrev64_mf8(mfloat8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vsetq_lane_mf8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vsetq_lane_mf8(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vsetq_lane_mf8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vset_lane_mf8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vset_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vset_lane_mf8(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vset_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8_t __s0 = __p0; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vset_lane_mf8(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl2_mf8(mfloat8x8x2_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl2_mf8(mfloat8x8x2_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl2_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl3_mf8(mfloat8x8x3_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl3_mf8(mfloat8x8x3_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl3_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl4_mf8(mfloat8x8x4_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __p0.val[0]), __builtin_bit_cast(int8x8_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbl4_mf8(mfloat8x8x4_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_64_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_64_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_64_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbl4_v(__builtin_bit_cast(int8x8_t, __rev0.val[0]), __builtin_bit_cast(int8x8_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx2_mf8(mfloat8x8_t __p0, mfloat8x8x2_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx2_mf8(mfloat8x8_t __p0, mfloat8x8x2_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx3_mf8(mfloat8x8_t __p0, mfloat8x8x3_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx3_mf8(mfloat8x8_t __p0, mfloat8x8x3_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx4_mf8(mfloat8x8_t __p0, mfloat8x8x4_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1.val[0]), __builtin_bit_cast(int8x8_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtbx4_mf8(mfloat8x8_t __p0, mfloat8x8x4_t __p1, mfloat8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_64_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_64_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_64_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_64_8);
|
|
mfloat8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vtrnq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 44);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vtrnq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vtrnq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 44);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vtrn_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vtrn_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vtrn_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 12);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vuzpq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 44);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vuzpq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vuzpq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 44);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vuzp_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vuzp_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vuzp_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 12);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vzipq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 44);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16x2_t vzipq_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16x2_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__builtin_neon_vzipq_v(&__ret, __builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 44);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vzip_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 12);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8x2_t vzip_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8x2_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__builtin_neon_vzip_v(&__ret, __builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 12);
|
|
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8);
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float64x2_t vamaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vamaxq_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float64x2_t vamaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vamaxq_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float32x4_t vamaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vamaxq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float32x4_t vamaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vamaxq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float16x8_t vamaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vamaxq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float16x8_t vamaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vamaxq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float32x2_t vamax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vamax_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float32x2_t vamax_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vamax_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float16x4_t vamax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vamax_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float16x4_t vamax_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vamax_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float64x2_t vaminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vaminq_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float64x2_t vaminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vaminq_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float32x4_t vaminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vaminq_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float32x4_t vaminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vaminq_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float16x8_t vaminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vaminq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float16x8_t vaminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vaminq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float32x2_t vamin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vamin_f32(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float32x2_t vamin_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vamin_f32(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon,faminmax"))) float16x4_t vamin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vamin_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon,faminmax"))) float16x4_t vamin_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vamin_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
#if defined(__aarch64__) || defined(__arm64ec__)
|
|
__ai __attribute__((target("aes,neon"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __builtin_neon_vmull_p64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_bf16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
|
|
bfloat16x8_t __ret_234; \
|
|
bfloat16x8_t __s0_234 = __p0_234; \
|
|
bfloat16x4_t __s2_234 = __p2_234; \
|
|
__ret_234 = vsetq_lane_bf16(vget_lane_bf16(__s2_234, __p3_234), __s0_234, __p1_234); \
|
|
__ret_234; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_bf16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
|
|
bfloat16x8_t __ret_235; \
|
|
bfloat16x8_t __s0_235 = __p0_235; \
|
|
bfloat16x4_t __s2_235 = __p2_235; \
|
|
bfloat16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, __lane_reverse_128_16); \
|
|
bfloat16x4_t __rev2_235; __rev2_235 = __builtin_shufflevector(__s2_235, __s2_235, __lane_reverse_64_16); \
|
|
__ret_235 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_235, __p3_235), __rev0_235, __p1_235); \
|
|
__ret_235 = __builtin_shufflevector(__ret_235, __ret_235, __lane_reverse_128_16); \
|
|
__ret_235; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_bf16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
|
|
bfloat16x4_t __ret_236; \
|
|
bfloat16x4_t __s0_236 = __p0_236; \
|
|
bfloat16x4_t __s2_236 = __p2_236; \
|
|
__ret_236 = vset_lane_bf16(vget_lane_bf16(__s2_236, __p3_236), __s0_236, __p1_236); \
|
|
__ret_236; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_bf16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
|
|
bfloat16x4_t __ret_237; \
|
|
bfloat16x4_t __s0_237 = __p0_237; \
|
|
bfloat16x4_t __s2_237 = __p2_237; \
|
|
bfloat16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, __lane_reverse_64_16); \
|
|
bfloat16x4_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, __lane_reverse_64_16); \
|
|
__ret_237 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_237, __p3_237), __rev0_237, __p1_237); \
|
|
__ret_237 = __builtin_shufflevector(__ret_237, __ret_237, __lane_reverse_64_16); \
|
|
__ret_237; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_bf16(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
|
|
bfloat16x8_t __ret_238; \
|
|
bfloat16x8_t __s0_238 = __p0_238; \
|
|
bfloat16x8_t __s2_238 = __p2_238; \
|
|
__ret_238 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_238, __p3_238), __s0_238, __p1_238); \
|
|
__ret_238; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_bf16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
|
|
bfloat16x8_t __ret_239; \
|
|
bfloat16x8_t __s0_239 = __p0_239; \
|
|
bfloat16x8_t __s2_239 = __p2_239; \
|
|
bfloat16x8_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, __lane_reverse_128_16); \
|
|
bfloat16x8_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, __lane_reverse_128_16); \
|
|
__ret_239 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_239, __p3_239), __rev0_239, __p1_239); \
|
|
__ret_239 = __builtin_shufflevector(__ret_239, __ret_239, __lane_reverse_128_16); \
|
|
__ret_239; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_bf16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
|
|
bfloat16x4_t __ret_240; \
|
|
bfloat16x4_t __s0_240 = __p0_240; \
|
|
bfloat16x8_t __s2_240 = __p2_240; \
|
|
__ret_240 = vset_lane_bf16(vgetq_lane_bf16(__s2_240, __p3_240), __s0_240, __p1_240); \
|
|
__ret_240; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_bf16(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
|
|
bfloat16x4_t __ret_241; \
|
|
bfloat16x4_t __s0_241 = __p0_241; \
|
|
bfloat16x8_t __s2_241 = __p2_241; \
|
|
bfloat16x4_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, __lane_reverse_64_16); \
|
|
bfloat16x8_t __rev2_241; __rev2_241 = __builtin_shufflevector(__s2_241, __s2_241, __lane_reverse_128_16); \
|
|
__ret_241 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_241, __p3_241), __rev0_241, __p1_241); \
|
|
__ret_241 = __builtin_shufflevector(__ret_241, __ret_241, __lane_reverse_64_16); \
|
|
__ret_241; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vcvt_bf16_f32(__builtin_bit_cast(int8x16_t, __p0), 11));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __builtin_neon_vcvt_bf16_f32(__builtin_bit_cast(int8x16_t, __rev0), 11));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvtq_high_bf16_f32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 43));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
|
|
bfloat16x8_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvtq_high_bf16_f32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 43));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvtq_low_bf16_f32(__builtin_bit_cast(int8x16_t, __p0), 43));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __builtin_neon_vcvtq_low_bf16_f32(__builtin_bit_cast(int8x16_t, __rev0), 43));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
|
|
bfloat16x8_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
|
|
bfloat16x4_t __ret;
|
|
__ret = __builtin_bit_cast(bfloat16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_laneq_u32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
|
|
uint32x4_t __ret_242; \
|
|
uint32x4_t __s0_242 = __p0_242; \
|
|
uint8x16_t __s1_242 = __p1_242; \
|
|
uint8x16_t __s2_242 = __p2_242; \
|
|
__ret_242 = vdotq_u32(__s0_242, __s1_242, __builtin_bit_cast(uint8x16_t, splatq_laneq_u32(__builtin_bit_cast(uint32x4_t, __s2_242), __p3_242))); \
|
|
__ret_242; \
|
|
})
|
|
#else
|
|
#define vdotq_laneq_u32(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
|
|
uint32x4_t __ret_243; \
|
|
uint32x4_t __s0_243 = __p0_243; \
|
|
uint8x16_t __s1_243 = __p1_243; \
|
|
uint8x16_t __s2_243 = __p2_243; \
|
|
uint32x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, __lane_reverse_128_32); \
|
|
uint8x16_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, __lane_reverse_128_8); \
|
|
uint8x16_t __rev2_243; __rev2_243 = __builtin_shufflevector(__s2_243, __s2_243, __lane_reverse_128_8); \
|
|
__ret_243 = __noswap_vdotq_u32(__rev0_243, __rev1_243, __builtin_bit_cast(uint8x16_t, __noswap_splatq_laneq_u32(__builtin_bit_cast(uint32x4_t, __rev2_243), __p3_243))); \
|
|
__ret_243 = __builtin_shufflevector(__ret_243, __ret_243, __lane_reverse_128_32); \
|
|
__ret_243; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_laneq_s32(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
|
|
int32x4_t __ret_244; \
|
|
int32x4_t __s0_244 = __p0_244; \
|
|
int8x16_t __s1_244 = __p1_244; \
|
|
int8x16_t __s2_244 = __p2_244; \
|
|
__ret_244 = vdotq_s32(__s0_244, __s1_244, __builtin_bit_cast(int8x16_t, splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_244), __p3_244))); \
|
|
__ret_244; \
|
|
})
|
|
#else
|
|
#define vdotq_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
|
|
int32x4_t __ret_245; \
|
|
int32x4_t __s0_245 = __p0_245; \
|
|
int8x16_t __s1_245 = __p1_245; \
|
|
int8x16_t __s2_245 = __p2_245; \
|
|
int32x4_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, __lane_reverse_128_32); \
|
|
int8x16_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, __lane_reverse_128_8); \
|
|
int8x16_t __rev2_245; __rev2_245 = __builtin_shufflevector(__s2_245, __s2_245, __lane_reverse_128_8); \
|
|
__ret_245 = __noswap_vdotq_s32(__rev0_245, __rev1_245, __builtin_bit_cast(int8x16_t, __noswap_splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_245), __p3_245))); \
|
|
__ret_245 = __builtin_shufflevector(__ret_245, __ret_245, __lane_reverse_128_32); \
|
|
__ret_245; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_laneq_u32(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
|
|
uint32x2_t __ret_246; \
|
|
uint32x2_t __s0_246 = __p0_246; \
|
|
uint8x8_t __s1_246 = __p1_246; \
|
|
uint8x16_t __s2_246 = __p2_246; \
|
|
__ret_246 = vdot_u32(__s0_246, __s1_246, __builtin_bit_cast(uint8x8_t, splat_laneq_u32(__builtin_bit_cast(uint32x4_t, __s2_246), __p3_246))); \
|
|
__ret_246; \
|
|
})
|
|
#else
|
|
#define vdot_laneq_u32(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
|
|
uint32x2_t __ret_247; \
|
|
uint32x2_t __s0_247 = __p0_247; \
|
|
uint8x8_t __s1_247 = __p1_247; \
|
|
uint8x16_t __s2_247 = __p2_247; \
|
|
uint32x2_t __rev0_247; __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, __lane_reverse_64_32); \
|
|
uint8x8_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, __lane_reverse_64_8); \
|
|
uint8x16_t __rev2_247; __rev2_247 = __builtin_shufflevector(__s2_247, __s2_247, __lane_reverse_128_8); \
|
|
__ret_247 = __noswap_vdot_u32(__rev0_247, __rev1_247, __builtin_bit_cast(uint8x8_t, __noswap_splat_laneq_u32(__builtin_bit_cast(uint32x4_t, __rev2_247), __p3_247))); \
|
|
__ret_247 = __builtin_shufflevector(__ret_247, __ret_247, __lane_reverse_64_32); \
|
|
__ret_247; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_laneq_s32(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
|
|
int32x2_t __ret_248; \
|
|
int32x2_t __s0_248 = __p0_248; \
|
|
int8x8_t __s1_248 = __p1_248; \
|
|
int8x16_t __s2_248 = __p2_248; \
|
|
__ret_248 = vdot_s32(__s0_248, __s1_248, __builtin_bit_cast(int8x8_t, splat_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_248), __p3_248))); \
|
|
__ret_248; \
|
|
})
|
|
#else
|
|
#define vdot_laneq_s32(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
|
|
int32x2_t __ret_249; \
|
|
int32x2_t __s0_249 = __p0_249; \
|
|
int8x8_t __s1_249 = __p1_249; \
|
|
int8x16_t __s2_249 = __p2_249; \
|
|
int32x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, __lane_reverse_64_32); \
|
|
int8x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, __lane_reverse_64_8); \
|
|
int8x16_t __rev2_249; __rev2_249 = __builtin_shufflevector(__s2_249, __s2_249, __lane_reverse_128_8); \
|
|
__ret_249 = __noswap_vdot_s32(__rev0_249, __rev1_249, __builtin_bit_cast(int8x8_t, __noswap_splat_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_249), __p3_249))); \
|
|
__ret_249 = __builtin_shufflevector(__ret_249, __ret_249, __lane_reverse_64_32); \
|
|
__ret_249; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_high_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_high_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_high_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_high_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_high_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_high_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_low_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_low_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlalq_low_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_low_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_low_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlal_low_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_high_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_high_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_high_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_high_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_high_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_high_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_low_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_low_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmlslq_low_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_low_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_low_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfmlsl_low_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 / __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 / __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_lane_f16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_lane_f16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_lane_f16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_lane_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_lane_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), __p3, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_lane_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 40)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_lane_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_lane_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), __p3, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_lane_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 8)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_laneq_f16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_laneq_f16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_laneq_f16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_laneq_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 40)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_laneq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 40)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vfmaq_laneq_f16(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 40)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_laneq_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 8)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_laneq_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 8)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vfma_laneq_f16(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 8)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
__ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
__ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsh_lane_f16(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
|
|
float16_t __ret_250; \
|
|
float16_t __s0_250 = __p0_250; \
|
|
float16_t __s1_250 = __p1_250; \
|
|
float16x4_t __s2_250 = __p2_250; \
|
|
__ret_250 = vfmah_lane_f16(__s0_250, -__s1_250, __s2_250, __p3_250); \
|
|
__ret_250; \
|
|
})
|
|
#else
|
|
#define vfmsh_lane_f16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
|
|
float16_t __ret_251; \
|
|
float16_t __s0_251 = __p0_251; \
|
|
float16_t __s1_251 = __p1_251; \
|
|
float16x4_t __s2_251 = __p2_251; \
|
|
float16x4_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, __lane_reverse_64_16); \
|
|
__ret_251 = __noswap_vfmah_lane_f16(__s0_251, -__s1_251, __rev2_251, __p3_251); \
|
|
__ret_251; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_lane_f16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
|
|
float16x8_t __ret_252; \
|
|
float16x8_t __s0_252 = __p0_252; \
|
|
float16x8_t __s1_252 = __p1_252; \
|
|
float16x4_t __s2_252 = __p2_252; \
|
|
__ret_252 = vfmaq_lane_f16(__s0_252, -__s1_252, __s2_252, __p3_252); \
|
|
__ret_252; \
|
|
})
|
|
#else
|
|
#define vfmsq_lane_f16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
|
|
float16x8_t __ret_253; \
|
|
float16x8_t __s0_253 = __p0_253; \
|
|
float16x8_t __s1_253 = __p1_253; \
|
|
float16x4_t __s2_253 = __p2_253; \
|
|
float16x8_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, __lane_reverse_64_16); \
|
|
__ret_253 = __noswap_vfmaq_lane_f16(__rev0_253, -__rev1_253, __rev2_253, __p3_253); \
|
|
__ret_253 = __builtin_shufflevector(__ret_253, __ret_253, __lane_reverse_128_16); \
|
|
__ret_253; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_lane_f16(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
|
|
float16x4_t __ret_254; \
|
|
float16x4_t __s0_254 = __p0_254; \
|
|
float16x4_t __s1_254 = __p1_254; \
|
|
float16x4_t __s2_254 = __p2_254; \
|
|
__ret_254 = vfma_lane_f16(__s0_254, -__s1_254, __s2_254, __p3_254); \
|
|
__ret_254; \
|
|
})
|
|
#else
|
|
#define vfms_lane_f16(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
|
|
float16x4_t __ret_255; \
|
|
float16x4_t __s0_255 = __p0_255; \
|
|
float16x4_t __s1_255 = __p1_255; \
|
|
float16x4_t __s2_255 = __p2_255; \
|
|
float16x4_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, __lane_reverse_64_16); \
|
|
__ret_255 = __noswap_vfma_lane_f16(__rev0_255, -__rev1_255, __rev2_255, __p3_255); \
|
|
__ret_255 = __builtin_shufflevector(__ret_255, __ret_255, __lane_reverse_64_16); \
|
|
__ret_255; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsh_laneq_f16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
|
|
float16_t __ret_256; \
|
|
float16_t __s0_256 = __p0_256; \
|
|
float16_t __s1_256 = __p1_256; \
|
|
float16x8_t __s2_256 = __p2_256; \
|
|
__ret_256 = vfmah_laneq_f16(__s0_256, -__s1_256, __s2_256, __p3_256); \
|
|
__ret_256; \
|
|
})
|
|
#else
|
|
#define vfmsh_laneq_f16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
|
|
float16_t __ret_257; \
|
|
float16_t __s0_257 = __p0_257; \
|
|
float16_t __s1_257 = __p1_257; \
|
|
float16x8_t __s2_257 = __p2_257; \
|
|
float16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, __lane_reverse_128_16); \
|
|
__ret_257 = __noswap_vfmah_laneq_f16(__s0_257, -__s1_257, __rev2_257, __p3_257); \
|
|
__ret_257; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_laneq_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
|
|
float16x8_t __ret_258; \
|
|
float16x8_t __s0_258 = __p0_258; \
|
|
float16x8_t __s1_258 = __p1_258; \
|
|
float16x8_t __s2_258 = __p2_258; \
|
|
__ret_258 = vfmaq_laneq_f16(__s0_258, -__s1_258, __s2_258, __p3_258); \
|
|
__ret_258; \
|
|
})
|
|
#else
|
|
#define vfmsq_laneq_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
|
|
float16x8_t __ret_259; \
|
|
float16x8_t __s0_259 = __p0_259; \
|
|
float16x8_t __s1_259 = __p1_259; \
|
|
float16x8_t __s2_259 = __p2_259; \
|
|
float16x8_t __rev0_259; __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_259; __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_259; __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, __lane_reverse_128_16); \
|
|
__ret_259 = __noswap_vfmaq_laneq_f16(__rev0_259, -__rev1_259, __rev2_259, __p3_259); \
|
|
__ret_259 = __builtin_shufflevector(__ret_259, __ret_259, __lane_reverse_128_16); \
|
|
__ret_259; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_laneq_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
|
|
float16x4_t __ret_260; \
|
|
float16x4_t __s0_260 = __p0_260; \
|
|
float16x4_t __s1_260 = __p1_260; \
|
|
float16x8_t __s2_260 = __p2_260; \
|
|
__ret_260 = vfma_laneq_f16(__s0_260, -__s1_260, __s2_260, __p3_260); \
|
|
__ret_260; \
|
|
})
|
|
#else
|
|
#define vfms_laneq_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
|
|
float16x4_t __ret_261; \
|
|
float16x4_t __s0_261 = __p0_261; \
|
|
float16x4_t __s1_261 = __p1_261; \
|
|
float16x8_t __s2_261 = __p2_261; \
|
|
float16x4_t __rev0_261; __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_261; __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, __lane_reverse_128_16); \
|
|
__ret_261 = __noswap_vfma_laneq_f16(__rev0_261, -__rev1_261, __rev2_261, __p3_261); \
|
|
__ret_261 = __builtin_shufflevector(__ret_261, __ret_261, __lane_reverse_64_16); \
|
|
__ret_261; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
__ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
__ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16_t __s2 = __p2; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmaxnmvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmvq_f16(__builtin_bit_cast(int8x16_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmaxnmvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmvq_f16(__builtin_bit_cast(int8x16_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmaxnmv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmv_f16(__builtin_bit_cast(int8x8_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmaxnmv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmv_f16(__builtin_bit_cast(int8x8_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmaxvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxvq_f16(__builtin_bit_cast(int8x16_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmaxvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxvq_f16(__builtin_bit_cast(int8x16_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmaxv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxv_f16(__builtin_bit_cast(int8x8_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmaxv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxv_f16(__builtin_bit_cast(int8x8_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vminnmvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmvq_f16(__builtin_bit_cast(int8x16_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vminnmvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmvq_f16(__builtin_bit_cast(int8x16_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vminnmv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmv_f16(__builtin_bit_cast(int8x8_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vminnmv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmv_f16(__builtin_bit_cast(int8x8_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vminvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminvq_f16(__builtin_bit_cast(int8x16_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vminvq_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminvq_f16(__builtin_bit_cast(int8x16_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vminv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminv_f16(__builtin_bit_cast(int8x8_t, __s0))); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vminv_f16(__p0) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminv_f16(__builtin_bit_cast(int8x8_t, __rev0))); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \
|
|
float16x8_t __ret_262; \
|
|
float16x8_t __s0_262 = __p0_262; \
|
|
float16x8_t __s1_262 = __p1_262; \
|
|
__ret_262 = __s0_262 * splatq_laneq_f16(__s1_262, __p2_262); \
|
|
__ret_262; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \
|
|
float16x8_t __ret_263; \
|
|
float16x8_t __s0_263 = __p0_263; \
|
|
float16x8_t __s1_263 = __p1_263; \
|
|
float16x8_t __rev0_263; __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, __lane_reverse_128_16); \
|
|
__ret_263 = __rev0_263 * __noswap_splatq_laneq_f16(__rev1_263, __p2_263); \
|
|
__ret_263 = __builtin_shufflevector(__ret_263, __ret_263, __lane_reverse_128_16); \
|
|
__ret_263; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \
|
|
float16x4_t __ret_264; \
|
|
float16x4_t __s0_264 = __p0_264; \
|
|
float16x8_t __s1_264 = __p1_264; \
|
|
__ret_264 = __s0_264 * splat_laneq_f16(__s1_264, __p2_264); \
|
|
__ret_264; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \
|
|
float16x4_t __ret_265; \
|
|
float16x4_t __s0_265 = __p0_265; \
|
|
float16x8_t __s1_265 = __p1_265; \
|
|
float16x4_t __rev0_265; __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, __lane_reverse_64_16); \
|
|
float16x8_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, __lane_reverse_128_16); \
|
|
__ret_265 = __rev0_265 * __noswap_splat_laneq_f16(__rev1_265, __p2_265); \
|
|
__ret_265 = __builtin_shufflevector(__ret_265, __ret_265, __lane_reverse_64_16); \
|
|
__ret_265; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmulxq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmulxq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vmulxq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmulx_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmulx_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vmulx_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_lane_f16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16x4_t __s1 = __p1; \
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_lane_f16(__s0, __rev1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_lane_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \
|
|
float16x8_t __ret_266; \
|
|
float16x8_t __s0_266 = __p0_266; \
|
|
float16x4_t __s1_266 = __p1_266; \
|
|
__ret_266 = vmulxq_f16(__s0_266, splatq_lane_f16(__s1_266, __p2_266)); \
|
|
__ret_266; \
|
|
})
|
|
#else
|
|
#define vmulxq_lane_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \
|
|
float16x8_t __ret_267; \
|
|
float16x8_t __s0_267 = __p0_267; \
|
|
float16x4_t __s1_267 = __p1_267; \
|
|
float16x8_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, __lane_reverse_128_16); \
|
|
float16x4_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, __lane_reverse_64_16); \
|
|
__ret_267 = __noswap_vmulxq_f16(__rev0_267, __noswap_splatq_lane_f16(__rev1_267, __p2_267)); \
|
|
__ret_267 = __builtin_shufflevector(__ret_267, __ret_267, __lane_reverse_128_16); \
|
|
__ret_267; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_lane_f16(__p0_268, __p1_268, __p2_268) __extension__ ({ \
|
|
float16x4_t __ret_268; \
|
|
float16x4_t __s0_268 = __p0_268; \
|
|
float16x4_t __s1_268 = __p1_268; \
|
|
__ret_268 = vmulx_f16(__s0_268, splat_lane_f16(__s1_268, __p2_268)); \
|
|
__ret_268; \
|
|
})
|
|
#else
|
|
#define vmulx_lane_f16(__p0_269, __p1_269, __p2_269) __extension__ ({ \
|
|
float16x4_t __ret_269; \
|
|
float16x4_t __s0_269 = __p0_269; \
|
|
float16x4_t __s1_269 = __p1_269; \
|
|
float16x4_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_269; __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, __lane_reverse_64_16); \
|
|
__ret_269 = __noswap_vmulx_f16(__rev0_269, __noswap_splat_lane_f16(__rev1_269, __p2_269)); \
|
|
__ret_269 = __builtin_shufflevector(__ret_269, __ret_269, __lane_reverse_64_16); \
|
|
__ret_269; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_laneq_f16(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16_t __s0 = __p0; \
|
|
float16x8_t __s1 = __p1; \
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_laneq_f16(__s0, __rev1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_laneq_f16(__p0_270, __p1_270, __p2_270) __extension__ ({ \
|
|
float16x8_t __ret_270; \
|
|
float16x8_t __s0_270 = __p0_270; \
|
|
float16x8_t __s1_270 = __p1_270; \
|
|
__ret_270 = vmulxq_f16(__s0_270, splatq_laneq_f16(__s1_270, __p2_270)); \
|
|
__ret_270; \
|
|
})
|
|
#else
|
|
#define vmulxq_laneq_f16(__p0_271, __p1_271, __p2_271) __extension__ ({ \
|
|
float16x8_t __ret_271; \
|
|
float16x8_t __s0_271 = __p0_271; \
|
|
float16x8_t __s1_271 = __p1_271; \
|
|
float16x8_t __rev0_271; __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, __lane_reverse_128_16); \
|
|
float16x8_t __rev1_271; __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, __lane_reverse_128_16); \
|
|
__ret_271 = __noswap_vmulxq_f16(__rev0_271, __noswap_splatq_laneq_f16(__rev1_271, __p2_271)); \
|
|
__ret_271 = __builtin_shufflevector(__ret_271, __ret_271, __lane_reverse_128_16); \
|
|
__ret_271; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_laneq_f16(__p0_272, __p1_272, __p2_272) __extension__ ({ \
|
|
float16x4_t __ret_272; \
|
|
float16x4_t __s0_272 = __p0_272; \
|
|
float16x8_t __s1_272 = __p1_272; \
|
|
__ret_272 = vmulx_f16(__s0_272, splat_laneq_f16(__s1_272, __p2_272)); \
|
|
__ret_272; \
|
|
})
|
|
#else
|
|
#define vmulx_laneq_f16(__p0_273, __p1_273, __p2_273) __extension__ ({ \
|
|
float16x4_t __ret_273; \
|
|
float16x4_t __s0_273 = __p0_273; \
|
|
float16x8_t __s1_273 = __p1_273; \
|
|
float16x4_t __rev0_273; __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, __lane_reverse_64_16); \
|
|
float16x8_t __rev1_273; __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, __lane_reverse_128_16); \
|
|
__ret_273 = __noswap_vmulx_f16(__rev0_273, __noswap_splat_laneq_f16(__rev1_273, __p2_273)); \
|
|
__ret_273 = __builtin_shufflevector(__ret_273, __ret_273, __lane_reverse_64_16); \
|
|
__ret_273; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
__ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x8_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
__ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
|
|
float16x4_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16_t __s1 = __p1; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpaddq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpaddq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpmaxq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpmaxq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpmaxnmq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpmaxnmq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmaxnm_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpmaxnm_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpminq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpminq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpminnmq_f16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vpminnmq_f16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpminnm_f16(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vpminnm_f16(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndiq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vrndiq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndi_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vrndi_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vsqrtq_f16(__builtin_bit_cast(int8x16_t, __p0), 40));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(float16x8_t, __builtin_neon_vsqrtq_f16(__builtin_bit_cast(int8x16_t, __rev0), 40));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vsqrt_f16(__builtin_bit_cast(int8x8_t, __p0), 8));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(float16x4_t, __builtin_neon_vsqrt_f16(__builtin_bit_cast(int8x8_t, __rev0), 8));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsudotq_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
|
|
int32x4_t __ret_274; \
|
|
int32x4_t __s0_274 = __p0_274; \
|
|
int8x16_t __s1_274 = __p1_274; \
|
|
uint8x16_t __s2_274 = __p2_274; \
|
|
__ret_274 = vusdotq_s32(__s0_274, __builtin_bit_cast(uint8x16_t, splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_274), __p3_274)), __s1_274); \
|
|
__ret_274; \
|
|
})
|
|
#else
|
|
#define vsudotq_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
|
|
int32x4_t __ret_275; \
|
|
int32x4_t __s0_275 = __p0_275; \
|
|
int8x16_t __s1_275 = __p1_275; \
|
|
uint8x16_t __s2_275 = __p2_275; \
|
|
int32x4_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, __lane_reverse_128_32); \
|
|
int8x16_t __rev1_275; __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, __lane_reverse_128_8); \
|
|
uint8x16_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, __lane_reverse_128_8); \
|
|
__ret_275 = __noswap_vusdotq_s32(__rev0_275, __builtin_bit_cast(uint8x16_t, __noswap_splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_275), __p3_275)), __rev1_275); \
|
|
__ret_275 = __builtin_shufflevector(__ret_275, __ret_275, __lane_reverse_128_32); \
|
|
__ret_275; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsudot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
|
|
int32x2_t __ret_276; \
|
|
int32x2_t __s0_276 = __p0_276; \
|
|
int8x8_t __s1_276 = __p1_276; \
|
|
uint8x16_t __s2_276 = __p2_276; \
|
|
__ret_276 = vusdot_s32(__s0_276, __builtin_bit_cast(uint8x8_t, splat_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_276), __p3_276)), __s1_276); \
|
|
__ret_276; \
|
|
})
|
|
#else
|
|
#define vsudot_laneq_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
|
|
int32x2_t __ret_277; \
|
|
int32x2_t __s0_277 = __p0_277; \
|
|
int8x8_t __s1_277 = __p1_277; \
|
|
uint8x16_t __s2_277 = __p2_277; \
|
|
int32x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, __lane_reverse_64_32); \
|
|
int8x8_t __rev1_277; __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, __lane_reverse_64_8); \
|
|
uint8x16_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, __lane_reverse_128_8); \
|
|
__ret_277 = __noswap_vusdot_s32(__rev0_277, __builtin_bit_cast(uint8x8_t, __noswap_splat_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_277), __p3_277)), __rev1_277); \
|
|
__ret_277 = __builtin_shufflevector(__ret_277, __ret_277, __lane_reverse_64_32); \
|
|
__ret_277; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vusdotq_laneq_s32(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
|
|
int32x4_t __ret_278; \
|
|
int32x4_t __s0_278 = __p0_278; \
|
|
uint8x16_t __s1_278 = __p1_278; \
|
|
int8x16_t __s2_278 = __p2_278; \
|
|
__ret_278 = vusdotq_s32(__s0_278, __s1_278, __builtin_bit_cast(int8x16_t, splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_278), __p3_278))); \
|
|
__ret_278; \
|
|
})
|
|
#else
|
|
#define vusdotq_laneq_s32(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
|
|
int32x4_t __ret_279; \
|
|
int32x4_t __s0_279 = __p0_279; \
|
|
uint8x16_t __s1_279 = __p1_279; \
|
|
int8x16_t __s2_279 = __p2_279; \
|
|
int32x4_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, __lane_reverse_128_32); \
|
|
uint8x16_t __rev1_279; __rev1_279 = __builtin_shufflevector(__s1_279, __s1_279, __lane_reverse_128_8); \
|
|
int8x16_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, __lane_reverse_128_8); \
|
|
__ret_279 = __noswap_vusdotq_s32(__rev0_279, __rev1_279, __builtin_bit_cast(int8x16_t, __noswap_splatq_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_279), __p3_279))); \
|
|
__ret_279 = __builtin_shufflevector(__ret_279, __ret_279, __lane_reverse_128_32); \
|
|
__ret_279; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vusdot_laneq_s32(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
|
|
int32x2_t __ret_280; \
|
|
int32x2_t __s0_280 = __p0_280; \
|
|
uint8x8_t __s1_280 = __p1_280; \
|
|
int8x16_t __s2_280 = __p2_280; \
|
|
__ret_280 = vusdot_s32(__s0_280, __s1_280, __builtin_bit_cast(int8x8_t, splat_laneq_s32(__builtin_bit_cast(int32x4_t, __s2_280), __p3_280))); \
|
|
__ret_280; \
|
|
})
|
|
#else
|
|
#define vusdot_laneq_s32(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
|
|
int32x2_t __ret_281; \
|
|
int32x2_t __s0_281 = __p0_281; \
|
|
uint8x8_t __s1_281 = __p1_281; \
|
|
int8x16_t __s2_281 = __p2_281; \
|
|
int32x2_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, __lane_reverse_64_32); \
|
|
uint8x8_t __rev1_281; __rev1_281 = __builtin_shufflevector(__s1_281, __s1_281, __lane_reverse_64_8); \
|
|
int8x16_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, __lane_reverse_128_8); \
|
|
__ret_281 = __noswap_vusdot_s32(__rev0_281, __rev1_281, __builtin_bit_cast(int8x8_t, __noswap_splat_laneq_s32(__builtin_bit_cast(int32x4_t, __rev2_281), __p3_281))); \
|
|
__ret_281 = __builtin_shufflevector(__ret_281, __ret_281, __lane_reverse_64_32); \
|
|
__ret_281; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vabdq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vabd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vabdd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vabds_f32(float32_t __p0, float32_t __p1) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vabds_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vabs_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vabs_s64(int64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vabs_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vabsd_s64(int64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vabsd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 + __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 + __p1;
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __builtin_neon_vaddq_p128(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddlvq_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) {
|
|
uint16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddlvq_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddlvq_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) {
|
|
uint64_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddlvq_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddlvq_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) {
|
|
uint32_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddlvq_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddlvq_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) {
|
|
int16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddlvq_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddlvq_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) {
|
|
int64_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddlvq_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddlvq_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) {
|
|
int32_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddlvq_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddlv_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) {
|
|
uint16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddlv_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddlv_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) {
|
|
uint64_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddlv_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddlv_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) {
|
|
uint32_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddlv_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddlv_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) {
|
|
int16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddlv_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddlv_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) {
|
|
int64_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddlv_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddlv_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) {
|
|
int32_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddlv_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vaddvq_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vaddvq_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddvq_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddvq_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddvq_u64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) {
|
|
uint64_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vaddvq_u64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddvq_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddvq_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vaddvq_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vaddvq_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vaddvq_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vaddvq_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vaddvq_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vaddvq_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddvq_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddvq_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddvq_s64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) {
|
|
int64_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vaddvq_s64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddvq_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddvq_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vaddv_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vaddv_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddv_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vaddv_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddv_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vaddv_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vaddv_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vaddv_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vaddv_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vaddv_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddv_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vaddv_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddv_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vaddv_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 6));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 38));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
|
|
poly64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 38));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vbslq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vbsl_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcageq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcageq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcage_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcaged_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcages_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcagtq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcagtq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcagt_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcagtd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcagts_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcaleq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcaleq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcale_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcaled_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcales_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcaltq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcaltq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcalt_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcaltd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcalts_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 == __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 == __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vceqs_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceqz_p64(poly64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 6));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 38));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 38));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vceqzq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceqz_u64(uint64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceqz_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vceqz_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vceqz_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64_t vceqzd_u64(uint64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqzd_u64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vceqzd_s64(int64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqzd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vceqzd_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vceqzd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vceqzs_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vceqzs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 >= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 >= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcged_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcged_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcged_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcges_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgezq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgez_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgez_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgez_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64_t vcgezd_s64(int64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgezd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcgezd_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgezd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcgezs_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcgezs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 > __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 > __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgtd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgtd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgtd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcgts_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcgtzq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgtz_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcgtz_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcgtz_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64_t vcgtzd_s64(int64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgtzd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcgtzd_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcgtzd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcgtzs_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcgtzs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 <= __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 <= __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcled_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcled_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcled_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcles_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vclezq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vclez_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vclez_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vclez_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64_t vclezd_s64(int64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vclezd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vclezd_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vclezd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vclezs_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vclezs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __rev0 < __rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0 < __p1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcltd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcltd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcltd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vclts_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __p0), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vcltzq_v(__builtin_bit_cast(int8x16_t, __rev0), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcltz_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __rev0), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcltz_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __p0), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vcltz_v(__builtin_bit_cast(int8x8_t, __rev0), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64_t vcltzd_s64(int64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcltzd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcltzd_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcltzd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcltzs_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcltzs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_p8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
|
|
poly8x16_t __ret_282; \
|
|
poly8x16_t __s0_282 = __p0_282; \
|
|
poly8x8_t __s2_282 = __p2_282; \
|
|
__ret_282 = vsetq_lane_p8(vget_lane_p8(__s2_282, __p3_282), __s0_282, __p1_282); \
|
|
__ret_282; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_p8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
|
|
poly8x16_t __ret_283; \
|
|
poly8x16_t __s0_283 = __p0_283; \
|
|
poly8x8_t __s2_283 = __p2_283; \
|
|
poly8x16_t __rev0_283; __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, __lane_reverse_128_8); \
|
|
poly8x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, __lane_reverse_64_8); \
|
|
__ret_283 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_283, __p3_283), __rev0_283, __p1_283); \
|
|
__ret_283 = __builtin_shufflevector(__ret_283, __ret_283, __lane_reverse_128_8); \
|
|
__ret_283; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_p16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
|
|
poly16x8_t __ret_284; \
|
|
poly16x8_t __s0_284 = __p0_284; \
|
|
poly16x4_t __s2_284 = __p2_284; \
|
|
__ret_284 = vsetq_lane_p16(vget_lane_p16(__s2_284, __p3_284), __s0_284, __p1_284); \
|
|
__ret_284; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_p16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
|
|
poly16x8_t __ret_285; \
|
|
poly16x8_t __s0_285 = __p0_285; \
|
|
poly16x4_t __s2_285 = __p2_285; \
|
|
poly16x8_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, __lane_reverse_128_16); \
|
|
poly16x4_t __rev2_285; __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, __lane_reverse_64_16); \
|
|
__ret_285 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_285, __p3_285), __rev0_285, __p1_285); \
|
|
__ret_285 = __builtin_shufflevector(__ret_285, __ret_285, __lane_reverse_128_16); \
|
|
__ret_285; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_u8(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
|
|
uint8x16_t __ret_286; \
|
|
uint8x16_t __s0_286 = __p0_286; \
|
|
uint8x8_t __s2_286 = __p2_286; \
|
|
__ret_286 = vsetq_lane_u8(vget_lane_u8(__s2_286, __p3_286), __s0_286, __p1_286); \
|
|
__ret_286; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_u8(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
|
|
uint8x16_t __ret_287; \
|
|
uint8x16_t __s0_287 = __p0_287; \
|
|
uint8x8_t __s2_287 = __p2_287; \
|
|
uint8x16_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, __lane_reverse_128_8); \
|
|
uint8x8_t __rev2_287; __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, __lane_reverse_64_8); \
|
|
__ret_287 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_287, __p3_287), __rev0_287, __p1_287); \
|
|
__ret_287 = __builtin_shufflevector(__ret_287, __ret_287, __lane_reverse_128_8); \
|
|
__ret_287; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_u32(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
|
|
uint32x4_t __ret_288; \
|
|
uint32x4_t __s0_288 = __p0_288; \
|
|
uint32x2_t __s2_288 = __p2_288; \
|
|
__ret_288 = vsetq_lane_u32(vget_lane_u32(__s2_288, __p3_288), __s0_288, __p1_288); \
|
|
__ret_288; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_u32(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
|
|
uint32x4_t __ret_289; \
|
|
uint32x4_t __s0_289 = __p0_289; \
|
|
uint32x2_t __s2_289 = __p2_289; \
|
|
uint32x4_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, __lane_reverse_128_32); \
|
|
uint32x2_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, __lane_reverse_64_32); \
|
|
__ret_289 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_289, __p3_289), __rev0_289, __p1_289); \
|
|
__ret_289 = __builtin_shufflevector(__ret_289, __ret_289, __lane_reverse_128_32); \
|
|
__ret_289; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_u64(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
|
|
uint64x2_t __ret_290; \
|
|
uint64x2_t __s0_290 = __p0_290; \
|
|
uint64x1_t __s2_290 = __p2_290; \
|
|
__ret_290 = vsetq_lane_u64(vget_lane_u64(__s2_290, __p3_290), __s0_290, __p1_290); \
|
|
__ret_290; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_u64(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
|
|
uint64x2_t __ret_291; \
|
|
uint64x2_t __s0_291 = __p0_291; \
|
|
uint64x1_t __s2_291 = __p2_291; \
|
|
uint64x2_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, __lane_reverse_128_64); \
|
|
__ret_291 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_291, __p3_291), __rev0_291, __p1_291); \
|
|
__ret_291 = __builtin_shufflevector(__ret_291, __ret_291, __lane_reverse_128_64); \
|
|
__ret_291; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_u16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
|
|
uint16x8_t __ret_292; \
|
|
uint16x8_t __s0_292 = __p0_292; \
|
|
uint16x4_t __s2_292 = __p2_292; \
|
|
__ret_292 = vsetq_lane_u16(vget_lane_u16(__s2_292, __p3_292), __s0_292, __p1_292); \
|
|
__ret_292; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_u16(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
|
|
uint16x8_t __ret_293; \
|
|
uint16x8_t __s0_293 = __p0_293; \
|
|
uint16x4_t __s2_293 = __p2_293; \
|
|
uint16x8_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, __lane_reverse_128_16); \
|
|
uint16x4_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, __lane_reverse_64_16); \
|
|
__ret_293 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_293, __p3_293), __rev0_293, __p1_293); \
|
|
__ret_293 = __builtin_shufflevector(__ret_293, __ret_293, __lane_reverse_128_16); \
|
|
__ret_293; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_s8(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
|
|
int8x16_t __ret_294; \
|
|
int8x16_t __s0_294 = __p0_294; \
|
|
int8x8_t __s2_294 = __p2_294; \
|
|
__ret_294 = vsetq_lane_s8(vget_lane_s8(__s2_294, __p3_294), __s0_294, __p1_294); \
|
|
__ret_294; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_s8(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
|
|
int8x16_t __ret_295; \
|
|
int8x16_t __s0_295 = __p0_295; \
|
|
int8x8_t __s2_295 = __p2_295; \
|
|
int8x16_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, __lane_reverse_128_8); \
|
|
int8x8_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, __lane_reverse_64_8); \
|
|
__ret_295 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_295, __p3_295), __rev0_295, __p1_295); \
|
|
__ret_295 = __builtin_shufflevector(__ret_295, __ret_295, __lane_reverse_128_8); \
|
|
__ret_295; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_f32(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
|
|
float32x4_t __ret_296; \
|
|
float32x4_t __s0_296 = __p0_296; \
|
|
float32x2_t __s2_296 = __p2_296; \
|
|
__ret_296 = vsetq_lane_f32(vget_lane_f32(__s2_296, __p3_296), __s0_296, __p1_296); \
|
|
__ret_296; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_f32(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
|
|
float32x4_t __ret_297; \
|
|
float32x4_t __s0_297 = __p0_297; \
|
|
float32x2_t __s2_297 = __p2_297; \
|
|
float32x4_t __rev0_297; __rev0_297 = __builtin_shufflevector(__s0_297, __s0_297, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_297; __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, __lane_reverse_64_32); \
|
|
__ret_297 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_297, __p3_297), __rev0_297, __p1_297); \
|
|
__ret_297 = __builtin_shufflevector(__ret_297, __ret_297, __lane_reverse_128_32); \
|
|
__ret_297; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_s32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
|
|
int32x4_t __ret_298; \
|
|
int32x4_t __s0_298 = __p0_298; \
|
|
int32x2_t __s2_298 = __p2_298; \
|
|
__ret_298 = vsetq_lane_s32(vget_lane_s32(__s2_298, __p3_298), __s0_298, __p1_298); \
|
|
__ret_298; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_s32(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
|
|
int32x4_t __ret_299; \
|
|
int32x4_t __s0_299 = __p0_299; \
|
|
int32x2_t __s2_299 = __p2_299; \
|
|
int32x4_t __rev0_299; __rev0_299 = __builtin_shufflevector(__s0_299, __s0_299, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, __lane_reverse_64_32); \
|
|
__ret_299 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_299, __p3_299), __rev0_299, __p1_299); \
|
|
__ret_299 = __builtin_shufflevector(__ret_299, __ret_299, __lane_reverse_128_32); \
|
|
__ret_299; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_s64(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
|
|
int64x2_t __ret_300; \
|
|
int64x2_t __s0_300 = __p0_300; \
|
|
int64x1_t __s2_300 = __p2_300; \
|
|
__ret_300 = vsetq_lane_s64(vget_lane_s64(__s2_300, __p3_300), __s0_300, __p1_300); \
|
|
__ret_300; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_s64(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
|
|
int64x2_t __ret_301; \
|
|
int64x2_t __s0_301 = __p0_301; \
|
|
int64x1_t __s2_301 = __p2_301; \
|
|
int64x2_t __rev0_301; __rev0_301 = __builtin_shufflevector(__s0_301, __s0_301, __lane_reverse_128_64); \
|
|
__ret_301 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_301, __p3_301), __rev0_301, __p1_301); \
|
|
__ret_301 = __builtin_shufflevector(__ret_301, __ret_301, __lane_reverse_128_64); \
|
|
__ret_301; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_mf8(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
|
|
mfloat8x16_t __ret_302; \
|
|
mfloat8x16_t __s0_302 = __p0_302; \
|
|
mfloat8x8_t __s2_302 = __p2_302; \
|
|
__ret_302 = vsetq_lane_mf8(vget_lane_mf8(__s2_302, __p3_302), __s0_302, __p1_302); \
|
|
__ret_302; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_mf8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
|
|
mfloat8x16_t __ret_303; \
|
|
mfloat8x16_t __s0_303 = __p0_303; \
|
|
mfloat8x8_t __s2_303 = __p2_303; \
|
|
mfloat8x16_t __rev0_303; __rev0_303 = __builtin_shufflevector(__s0_303, __s0_303, __lane_reverse_128_8); \
|
|
mfloat8x8_t __rev2_303; __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, __lane_reverse_64_8); \
|
|
__ret_303 = __noswap_vsetq_lane_mf8(__noswap_vget_lane_mf8(__rev2_303, __p3_303), __rev0_303, __p1_303); \
|
|
__ret_303 = __builtin_shufflevector(__ret_303, __ret_303, __lane_reverse_128_8); \
|
|
__ret_303; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_s16(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
|
|
int16x8_t __ret_304; \
|
|
int16x8_t __s0_304 = __p0_304; \
|
|
int16x4_t __s2_304 = __p2_304; \
|
|
__ret_304 = vsetq_lane_s16(vget_lane_s16(__s2_304, __p3_304), __s0_304, __p1_304); \
|
|
__ret_304; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_s16(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
|
|
int16x8_t __ret_305; \
|
|
int16x8_t __s0_305 = __p0_305; \
|
|
int16x4_t __s2_305 = __p2_305; \
|
|
int16x8_t __rev0_305; __rev0_305 = __builtin_shufflevector(__s0_305, __s0_305, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_305; __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, __lane_reverse_64_16); \
|
|
__ret_305 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_305, __p3_305), __rev0_305, __p1_305); \
|
|
__ret_305 = __builtin_shufflevector(__ret_305, __ret_305, __lane_reverse_128_16); \
|
|
__ret_305; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_p8(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
|
|
poly8x8_t __ret_306; \
|
|
poly8x8_t __s0_306 = __p0_306; \
|
|
poly8x8_t __s2_306 = __p2_306; \
|
|
__ret_306 = vset_lane_p8(vget_lane_p8(__s2_306, __p3_306), __s0_306, __p1_306); \
|
|
__ret_306; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_p8(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
|
|
poly8x8_t __ret_307; \
|
|
poly8x8_t __s0_307 = __p0_307; \
|
|
poly8x8_t __s2_307 = __p2_307; \
|
|
poly8x8_t __rev0_307; __rev0_307 = __builtin_shufflevector(__s0_307, __s0_307, __lane_reverse_64_8); \
|
|
poly8x8_t __rev2_307; __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, __lane_reverse_64_8); \
|
|
__ret_307 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_307, __p3_307), __rev0_307, __p1_307); \
|
|
__ret_307 = __builtin_shufflevector(__ret_307, __ret_307, __lane_reverse_64_8); \
|
|
__ret_307; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_p16(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
|
|
poly16x4_t __ret_308; \
|
|
poly16x4_t __s0_308 = __p0_308; \
|
|
poly16x4_t __s2_308 = __p2_308; \
|
|
__ret_308 = vset_lane_p16(vget_lane_p16(__s2_308, __p3_308), __s0_308, __p1_308); \
|
|
__ret_308; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_p16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
|
|
poly16x4_t __ret_309; \
|
|
poly16x4_t __s0_309 = __p0_309; \
|
|
poly16x4_t __s2_309 = __p2_309; \
|
|
poly16x4_t __rev0_309; __rev0_309 = __builtin_shufflevector(__s0_309, __s0_309, __lane_reverse_64_16); \
|
|
poly16x4_t __rev2_309; __rev2_309 = __builtin_shufflevector(__s2_309, __s2_309, __lane_reverse_64_16); \
|
|
__ret_309 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_309, __p3_309), __rev0_309, __p1_309); \
|
|
__ret_309 = __builtin_shufflevector(__ret_309, __ret_309, __lane_reverse_64_16); \
|
|
__ret_309; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_u8(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
|
|
uint8x8_t __ret_310; \
|
|
uint8x8_t __s0_310 = __p0_310; \
|
|
uint8x8_t __s2_310 = __p2_310; \
|
|
__ret_310 = vset_lane_u8(vget_lane_u8(__s2_310, __p3_310), __s0_310, __p1_310); \
|
|
__ret_310; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_u8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
|
|
uint8x8_t __ret_311; \
|
|
uint8x8_t __s0_311 = __p0_311; \
|
|
uint8x8_t __s2_311 = __p2_311; \
|
|
uint8x8_t __rev0_311; __rev0_311 = __builtin_shufflevector(__s0_311, __s0_311, __lane_reverse_64_8); \
|
|
uint8x8_t __rev2_311; __rev2_311 = __builtin_shufflevector(__s2_311, __s2_311, __lane_reverse_64_8); \
|
|
__ret_311 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_311, __p3_311), __rev0_311, __p1_311); \
|
|
__ret_311 = __builtin_shufflevector(__ret_311, __ret_311, __lane_reverse_64_8); \
|
|
__ret_311; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_u32(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
|
|
uint32x2_t __ret_312; \
|
|
uint32x2_t __s0_312 = __p0_312; \
|
|
uint32x2_t __s2_312 = __p2_312; \
|
|
__ret_312 = vset_lane_u32(vget_lane_u32(__s2_312, __p3_312), __s0_312, __p1_312); \
|
|
__ret_312; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_u32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
|
|
uint32x2_t __ret_313; \
|
|
uint32x2_t __s0_313 = __p0_313; \
|
|
uint32x2_t __s2_313 = __p2_313; \
|
|
uint32x2_t __rev0_313; __rev0_313 = __builtin_shufflevector(__s0_313, __s0_313, __lane_reverse_64_32); \
|
|
uint32x2_t __rev2_313; __rev2_313 = __builtin_shufflevector(__s2_313, __s2_313, __lane_reverse_64_32); \
|
|
__ret_313 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_313, __p3_313), __rev0_313, __p1_313); \
|
|
__ret_313 = __builtin_shufflevector(__ret_313, __ret_313, __lane_reverse_64_32); \
|
|
__ret_313; \
|
|
})
|
|
#endif
|
|
|
|
#define vcopy_lane_u64(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
|
|
uint64x1_t __ret_314; \
|
|
uint64x1_t __s0_314 = __p0_314; \
|
|
uint64x1_t __s2_314 = __p2_314; \
|
|
__ret_314 = vset_lane_u64(vget_lane_u64(__s2_314, __p3_314), __s0_314, __p1_314); \
|
|
__ret_314; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_u16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
|
|
uint16x4_t __ret_315; \
|
|
uint16x4_t __s0_315 = __p0_315; \
|
|
uint16x4_t __s2_315 = __p2_315; \
|
|
__ret_315 = vset_lane_u16(vget_lane_u16(__s2_315, __p3_315), __s0_315, __p1_315); \
|
|
__ret_315; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_u16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
|
|
uint16x4_t __ret_316; \
|
|
uint16x4_t __s0_316 = __p0_316; \
|
|
uint16x4_t __s2_316 = __p2_316; \
|
|
uint16x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, __lane_reverse_64_16); \
|
|
uint16x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, __lane_reverse_64_16); \
|
|
__ret_316 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_316, __p3_316), __rev0_316, __p1_316); \
|
|
__ret_316 = __builtin_shufflevector(__ret_316, __ret_316, __lane_reverse_64_16); \
|
|
__ret_316; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_s8(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
|
|
int8x8_t __ret_317; \
|
|
int8x8_t __s0_317 = __p0_317; \
|
|
int8x8_t __s2_317 = __p2_317; \
|
|
__ret_317 = vset_lane_s8(vget_lane_s8(__s2_317, __p3_317), __s0_317, __p1_317); \
|
|
__ret_317; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_s8(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
|
|
int8x8_t __ret_318; \
|
|
int8x8_t __s0_318 = __p0_318; \
|
|
int8x8_t __s2_318 = __p2_318; \
|
|
int8x8_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, __lane_reverse_64_8); \
|
|
int8x8_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, __lane_reverse_64_8); \
|
|
__ret_318 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_318, __p3_318), __rev0_318, __p1_318); \
|
|
__ret_318 = __builtin_shufflevector(__ret_318, __ret_318, __lane_reverse_64_8); \
|
|
__ret_318; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_f32(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
|
|
float32x2_t __ret_319; \
|
|
float32x2_t __s0_319 = __p0_319; \
|
|
float32x2_t __s2_319 = __p2_319; \
|
|
__ret_319 = vset_lane_f32(vget_lane_f32(__s2_319, __p3_319), __s0_319, __p1_319); \
|
|
__ret_319; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_f32(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
|
|
float32x2_t __ret_320; \
|
|
float32x2_t __s0_320 = __p0_320; \
|
|
float32x2_t __s2_320 = __p2_320; \
|
|
float32x2_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, __lane_reverse_64_32); \
|
|
__ret_320 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_320, __p3_320), __rev0_320, __p1_320); \
|
|
__ret_320 = __builtin_shufflevector(__ret_320, __ret_320, __lane_reverse_64_32); \
|
|
__ret_320; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
|
|
int32x2_t __ret_321; \
|
|
int32x2_t __s0_321 = __p0_321; \
|
|
int32x2_t __s2_321 = __p2_321; \
|
|
__ret_321 = vset_lane_s32(vget_lane_s32(__s2_321, __p3_321), __s0_321, __p1_321); \
|
|
__ret_321; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
|
|
int32x2_t __ret_322; \
|
|
int32x2_t __s0_322 = __p0_322; \
|
|
int32x2_t __s2_322 = __p2_322; \
|
|
int32x2_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, __lane_reverse_64_32); \
|
|
__ret_322 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_322, __p3_322), __rev0_322, __p1_322); \
|
|
__ret_322 = __builtin_shufflevector(__ret_322, __ret_322, __lane_reverse_64_32); \
|
|
__ret_322; \
|
|
})
|
|
#endif
|
|
|
|
#define vcopy_lane_s64(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
|
|
int64x1_t __ret_323; \
|
|
int64x1_t __s0_323 = __p0_323; \
|
|
int64x1_t __s2_323 = __p2_323; \
|
|
__ret_323 = vset_lane_s64(vget_lane_s64(__s2_323, __p3_323), __s0_323, __p1_323); \
|
|
__ret_323; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_mf8(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
|
|
mfloat8x8_t __ret_324; \
|
|
mfloat8x8_t __s0_324 = __p0_324; \
|
|
mfloat8x8_t __s2_324 = __p2_324; \
|
|
__ret_324 = vset_lane_mf8(vget_lane_mf8(__s2_324, __p3_324), __s0_324, __p1_324); \
|
|
__ret_324; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_mf8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
|
|
mfloat8x8_t __ret_325; \
|
|
mfloat8x8_t __s0_325 = __p0_325; \
|
|
mfloat8x8_t __s2_325 = __p2_325; \
|
|
mfloat8x8_t __rev0_325; __rev0_325 = __builtin_shufflevector(__s0_325, __s0_325, __lane_reverse_64_8); \
|
|
mfloat8x8_t __rev2_325; __rev2_325 = __builtin_shufflevector(__s2_325, __s2_325, __lane_reverse_64_8); \
|
|
__ret_325 = __noswap_vset_lane_mf8(__noswap_vget_lane_mf8(__rev2_325, __p3_325), __rev0_325, __p1_325); \
|
|
__ret_325 = __builtin_shufflevector(__ret_325, __ret_325, __lane_reverse_64_8); \
|
|
__ret_325; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_lane_s16(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
|
|
int16x4_t __ret_326; \
|
|
int16x4_t __s0_326 = __p0_326; \
|
|
int16x4_t __s2_326 = __p2_326; \
|
|
__ret_326 = vset_lane_s16(vget_lane_s16(__s2_326, __p3_326), __s0_326, __p1_326); \
|
|
__ret_326; \
|
|
})
|
|
#else
|
|
#define vcopy_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
|
|
int16x4_t __ret_327; \
|
|
int16x4_t __s0_327 = __p0_327; \
|
|
int16x4_t __s2_327 = __p2_327; \
|
|
int16x4_t __rev0_327; __rev0_327 = __builtin_shufflevector(__s0_327, __s0_327, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_327; __rev2_327 = __builtin_shufflevector(__s2_327, __s2_327, __lane_reverse_64_16); \
|
|
__ret_327 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_327, __p3_327), __rev0_327, __p1_327); \
|
|
__ret_327 = __builtin_shufflevector(__ret_327, __ret_327, __lane_reverse_64_16); \
|
|
__ret_327; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_p8(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
|
|
poly8x16_t __ret_328; \
|
|
poly8x16_t __s0_328 = __p0_328; \
|
|
poly8x16_t __s2_328 = __p2_328; \
|
|
__ret_328 = vsetq_lane_p8(vgetq_lane_p8(__s2_328, __p3_328), __s0_328, __p1_328); \
|
|
__ret_328; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_p8(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
|
|
poly8x16_t __ret_329; \
|
|
poly8x16_t __s0_329 = __p0_329; \
|
|
poly8x16_t __s2_329 = __p2_329; \
|
|
poly8x16_t __rev0_329; __rev0_329 = __builtin_shufflevector(__s0_329, __s0_329, __lane_reverse_128_8); \
|
|
poly8x16_t __rev2_329; __rev2_329 = __builtin_shufflevector(__s2_329, __s2_329, __lane_reverse_128_8); \
|
|
__ret_329 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_329, __p3_329), __rev0_329, __p1_329); \
|
|
__ret_329 = __builtin_shufflevector(__ret_329, __ret_329, __lane_reverse_128_8); \
|
|
__ret_329; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_p16(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
|
|
poly16x8_t __ret_330; \
|
|
poly16x8_t __s0_330 = __p0_330; \
|
|
poly16x8_t __s2_330 = __p2_330; \
|
|
__ret_330 = vsetq_lane_p16(vgetq_lane_p16(__s2_330, __p3_330), __s0_330, __p1_330); \
|
|
__ret_330; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_p16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
|
|
poly16x8_t __ret_331; \
|
|
poly16x8_t __s0_331 = __p0_331; \
|
|
poly16x8_t __s2_331 = __p2_331; \
|
|
poly16x8_t __rev0_331; __rev0_331 = __builtin_shufflevector(__s0_331, __s0_331, __lane_reverse_128_16); \
|
|
poly16x8_t __rev2_331; __rev2_331 = __builtin_shufflevector(__s2_331, __s2_331, __lane_reverse_128_16); \
|
|
__ret_331 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_331, __p3_331), __rev0_331, __p1_331); \
|
|
__ret_331 = __builtin_shufflevector(__ret_331, __ret_331, __lane_reverse_128_16); \
|
|
__ret_331; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_u8(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
|
|
uint8x16_t __ret_332; \
|
|
uint8x16_t __s0_332 = __p0_332; \
|
|
uint8x16_t __s2_332 = __p2_332; \
|
|
__ret_332 = vsetq_lane_u8(vgetq_lane_u8(__s2_332, __p3_332), __s0_332, __p1_332); \
|
|
__ret_332; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_u8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
|
|
uint8x16_t __ret_333; \
|
|
uint8x16_t __s0_333 = __p0_333; \
|
|
uint8x16_t __s2_333 = __p2_333; \
|
|
uint8x16_t __rev0_333; __rev0_333 = __builtin_shufflevector(__s0_333, __s0_333, __lane_reverse_128_8); \
|
|
uint8x16_t __rev2_333; __rev2_333 = __builtin_shufflevector(__s2_333, __s2_333, __lane_reverse_128_8); \
|
|
__ret_333 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_333, __p3_333), __rev0_333, __p1_333); \
|
|
__ret_333 = __builtin_shufflevector(__ret_333, __ret_333, __lane_reverse_128_8); \
|
|
__ret_333; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_u32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
|
|
uint32x4_t __ret_334; \
|
|
uint32x4_t __s0_334 = __p0_334; \
|
|
uint32x4_t __s2_334 = __p2_334; \
|
|
__ret_334 = vsetq_lane_u32(vgetq_lane_u32(__s2_334, __p3_334), __s0_334, __p1_334); \
|
|
__ret_334; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_u32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \
|
|
uint32x4_t __ret_335; \
|
|
uint32x4_t __s0_335 = __p0_335; \
|
|
uint32x4_t __s2_335 = __p2_335; \
|
|
uint32x4_t __rev0_335; __rev0_335 = __builtin_shufflevector(__s0_335, __s0_335, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2_335; __rev2_335 = __builtin_shufflevector(__s2_335, __s2_335, __lane_reverse_128_32); \
|
|
__ret_335 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_335, __p3_335), __rev0_335, __p1_335); \
|
|
__ret_335 = __builtin_shufflevector(__ret_335, __ret_335, __lane_reverse_128_32); \
|
|
__ret_335; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_u64(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \
|
|
uint64x2_t __ret_336; \
|
|
uint64x2_t __s0_336 = __p0_336; \
|
|
uint64x2_t __s2_336 = __p2_336; \
|
|
__ret_336 = vsetq_lane_u64(vgetq_lane_u64(__s2_336, __p3_336), __s0_336, __p1_336); \
|
|
__ret_336; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_u64(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \
|
|
uint64x2_t __ret_337; \
|
|
uint64x2_t __s0_337 = __p0_337; \
|
|
uint64x2_t __s2_337 = __p2_337; \
|
|
uint64x2_t __rev0_337; __rev0_337 = __builtin_shufflevector(__s0_337, __s0_337, __lane_reverse_128_64); \
|
|
uint64x2_t __rev2_337; __rev2_337 = __builtin_shufflevector(__s2_337, __s2_337, __lane_reverse_128_64); \
|
|
__ret_337 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_337, __p3_337), __rev0_337, __p1_337); \
|
|
__ret_337 = __builtin_shufflevector(__ret_337, __ret_337, __lane_reverse_128_64); \
|
|
__ret_337; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_u16(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \
|
|
uint16x8_t __ret_338; \
|
|
uint16x8_t __s0_338 = __p0_338; \
|
|
uint16x8_t __s2_338 = __p2_338; \
|
|
__ret_338 = vsetq_lane_u16(vgetq_lane_u16(__s2_338, __p3_338), __s0_338, __p1_338); \
|
|
__ret_338; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_u16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \
|
|
uint16x8_t __ret_339; \
|
|
uint16x8_t __s0_339 = __p0_339; \
|
|
uint16x8_t __s2_339 = __p2_339; \
|
|
uint16x8_t __rev0_339; __rev0_339 = __builtin_shufflevector(__s0_339, __s0_339, __lane_reverse_128_16); \
|
|
uint16x8_t __rev2_339; __rev2_339 = __builtin_shufflevector(__s2_339, __s2_339, __lane_reverse_128_16); \
|
|
__ret_339 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_339, __p3_339), __rev0_339, __p1_339); \
|
|
__ret_339 = __builtin_shufflevector(__ret_339, __ret_339, __lane_reverse_128_16); \
|
|
__ret_339; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_s8(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \
|
|
int8x16_t __ret_340; \
|
|
int8x16_t __s0_340 = __p0_340; \
|
|
int8x16_t __s2_340 = __p2_340; \
|
|
__ret_340 = vsetq_lane_s8(vgetq_lane_s8(__s2_340, __p3_340), __s0_340, __p1_340); \
|
|
__ret_340; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_s8(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \
|
|
int8x16_t __ret_341; \
|
|
int8x16_t __s0_341 = __p0_341; \
|
|
int8x16_t __s2_341 = __p2_341; \
|
|
int8x16_t __rev0_341; __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, __lane_reverse_128_8); \
|
|
int8x16_t __rev2_341; __rev2_341 = __builtin_shufflevector(__s2_341, __s2_341, __lane_reverse_128_8); \
|
|
__ret_341 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_341, __p3_341), __rev0_341, __p1_341); \
|
|
__ret_341 = __builtin_shufflevector(__ret_341, __ret_341, __lane_reverse_128_8); \
|
|
__ret_341; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_f32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \
|
|
float32x4_t __ret_342; \
|
|
float32x4_t __s0_342 = __p0_342; \
|
|
float32x4_t __s2_342 = __p2_342; \
|
|
__ret_342 = vsetq_lane_f32(vgetq_lane_f32(__s2_342, __p3_342), __s0_342, __p1_342); \
|
|
__ret_342; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_f32(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \
|
|
float32x4_t __ret_343; \
|
|
float32x4_t __s0_343 = __p0_343; \
|
|
float32x4_t __s2_343 = __p2_343; \
|
|
float32x4_t __rev0_343; __rev0_343 = __builtin_shufflevector(__s0_343, __s0_343, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_343; __rev2_343 = __builtin_shufflevector(__s2_343, __s2_343, __lane_reverse_128_32); \
|
|
__ret_343 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_343, __p3_343), __rev0_343, __p1_343); \
|
|
__ret_343 = __builtin_shufflevector(__ret_343, __ret_343, __lane_reverse_128_32); \
|
|
__ret_343; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_s32(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \
|
|
int32x4_t __ret_344; \
|
|
int32x4_t __s0_344 = __p0_344; \
|
|
int32x4_t __s2_344 = __p2_344; \
|
|
__ret_344 = vsetq_lane_s32(vgetq_lane_s32(__s2_344, __p3_344), __s0_344, __p1_344); \
|
|
__ret_344; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \
|
|
int32x4_t __ret_345; \
|
|
int32x4_t __s0_345 = __p0_345; \
|
|
int32x4_t __s2_345 = __p2_345; \
|
|
int32x4_t __rev0_345; __rev0_345 = __builtin_shufflevector(__s0_345, __s0_345, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_345; __rev2_345 = __builtin_shufflevector(__s2_345, __s2_345, __lane_reverse_128_32); \
|
|
__ret_345 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_345, __p3_345), __rev0_345, __p1_345); \
|
|
__ret_345 = __builtin_shufflevector(__ret_345, __ret_345, __lane_reverse_128_32); \
|
|
__ret_345; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_s64(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \
|
|
int64x2_t __ret_346; \
|
|
int64x2_t __s0_346 = __p0_346; \
|
|
int64x2_t __s2_346 = __p2_346; \
|
|
__ret_346 = vsetq_lane_s64(vgetq_lane_s64(__s2_346, __p3_346), __s0_346, __p1_346); \
|
|
__ret_346; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_s64(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \
|
|
int64x2_t __ret_347; \
|
|
int64x2_t __s0_347 = __p0_347; \
|
|
int64x2_t __s2_347 = __p2_347; \
|
|
int64x2_t __rev0_347; __rev0_347 = __builtin_shufflevector(__s0_347, __s0_347, __lane_reverse_128_64); \
|
|
int64x2_t __rev2_347; __rev2_347 = __builtin_shufflevector(__s2_347, __s2_347, __lane_reverse_128_64); \
|
|
__ret_347 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_347, __p3_347), __rev0_347, __p1_347); \
|
|
__ret_347 = __builtin_shufflevector(__ret_347, __ret_347, __lane_reverse_128_64); \
|
|
__ret_347; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_mf8(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \
|
|
mfloat8x16_t __ret_348; \
|
|
mfloat8x16_t __s0_348 = __p0_348; \
|
|
mfloat8x16_t __s2_348 = __p2_348; \
|
|
__ret_348 = vsetq_lane_mf8(vgetq_lane_mf8(__s2_348, __p3_348), __s0_348, __p1_348); \
|
|
__ret_348; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_mf8(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \
|
|
mfloat8x16_t __ret_349; \
|
|
mfloat8x16_t __s0_349 = __p0_349; \
|
|
mfloat8x16_t __s2_349 = __p2_349; \
|
|
mfloat8x16_t __rev0_349; __rev0_349 = __builtin_shufflevector(__s0_349, __s0_349, __lane_reverse_128_8); \
|
|
mfloat8x16_t __rev2_349; __rev2_349 = __builtin_shufflevector(__s2_349, __s2_349, __lane_reverse_128_8); \
|
|
__ret_349 = __noswap_vsetq_lane_mf8(__noswap_vgetq_lane_mf8(__rev2_349, __p3_349), __rev0_349, __p1_349); \
|
|
__ret_349 = __builtin_shufflevector(__ret_349, __ret_349, __lane_reverse_128_8); \
|
|
__ret_349; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_s16(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \
|
|
int16x8_t __ret_350; \
|
|
int16x8_t __s0_350 = __p0_350; \
|
|
int16x8_t __s2_350 = __p2_350; \
|
|
__ret_350 = vsetq_lane_s16(vgetq_lane_s16(__s2_350, __p3_350), __s0_350, __p1_350); \
|
|
__ret_350; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \
|
|
int16x8_t __ret_351; \
|
|
int16x8_t __s0_351 = __p0_351; \
|
|
int16x8_t __s2_351 = __p2_351; \
|
|
int16x8_t __rev0_351; __rev0_351 = __builtin_shufflevector(__s0_351, __s0_351, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_351; __rev2_351 = __builtin_shufflevector(__s2_351, __s2_351, __lane_reverse_128_16); \
|
|
__ret_351 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_351, __p3_351), __rev0_351, __p1_351); \
|
|
__ret_351 = __builtin_shufflevector(__ret_351, __ret_351, __lane_reverse_128_16); \
|
|
__ret_351; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_p8(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \
|
|
poly8x8_t __ret_352; \
|
|
poly8x8_t __s0_352 = __p0_352; \
|
|
poly8x16_t __s2_352 = __p2_352; \
|
|
__ret_352 = vset_lane_p8(vgetq_lane_p8(__s2_352, __p3_352), __s0_352, __p1_352); \
|
|
__ret_352; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_p8(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \
|
|
poly8x8_t __ret_353; \
|
|
poly8x8_t __s0_353 = __p0_353; \
|
|
poly8x16_t __s2_353 = __p2_353; \
|
|
poly8x8_t __rev0_353; __rev0_353 = __builtin_shufflevector(__s0_353, __s0_353, __lane_reverse_64_8); \
|
|
poly8x16_t __rev2_353; __rev2_353 = __builtin_shufflevector(__s2_353, __s2_353, __lane_reverse_128_8); \
|
|
__ret_353 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_353, __p3_353), __rev0_353, __p1_353); \
|
|
__ret_353 = __builtin_shufflevector(__ret_353, __ret_353, __lane_reverse_64_8); \
|
|
__ret_353; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_p16(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \
|
|
poly16x4_t __ret_354; \
|
|
poly16x4_t __s0_354 = __p0_354; \
|
|
poly16x8_t __s2_354 = __p2_354; \
|
|
__ret_354 = vset_lane_p16(vgetq_lane_p16(__s2_354, __p3_354), __s0_354, __p1_354); \
|
|
__ret_354; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_p16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \
|
|
poly16x4_t __ret_355; \
|
|
poly16x4_t __s0_355 = __p0_355; \
|
|
poly16x8_t __s2_355 = __p2_355; \
|
|
poly16x4_t __rev0_355; __rev0_355 = __builtin_shufflevector(__s0_355, __s0_355, __lane_reverse_64_16); \
|
|
poly16x8_t __rev2_355; __rev2_355 = __builtin_shufflevector(__s2_355, __s2_355, __lane_reverse_128_16); \
|
|
__ret_355 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_355, __p3_355), __rev0_355, __p1_355); \
|
|
__ret_355 = __builtin_shufflevector(__ret_355, __ret_355, __lane_reverse_64_16); \
|
|
__ret_355; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_u8(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \
|
|
uint8x8_t __ret_356; \
|
|
uint8x8_t __s0_356 = __p0_356; \
|
|
uint8x16_t __s2_356 = __p2_356; \
|
|
__ret_356 = vset_lane_u8(vgetq_lane_u8(__s2_356, __p3_356), __s0_356, __p1_356); \
|
|
__ret_356; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_u8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \
|
|
uint8x8_t __ret_357; \
|
|
uint8x8_t __s0_357 = __p0_357; \
|
|
uint8x16_t __s2_357 = __p2_357; \
|
|
uint8x8_t __rev0_357; __rev0_357 = __builtin_shufflevector(__s0_357, __s0_357, __lane_reverse_64_8); \
|
|
uint8x16_t __rev2_357; __rev2_357 = __builtin_shufflevector(__s2_357, __s2_357, __lane_reverse_128_8); \
|
|
__ret_357 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_357, __p3_357), __rev0_357, __p1_357); \
|
|
__ret_357 = __builtin_shufflevector(__ret_357, __ret_357, __lane_reverse_64_8); \
|
|
__ret_357; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_u32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \
|
|
uint32x2_t __ret_358; \
|
|
uint32x2_t __s0_358 = __p0_358; \
|
|
uint32x4_t __s2_358 = __p2_358; \
|
|
__ret_358 = vset_lane_u32(vgetq_lane_u32(__s2_358, __p3_358), __s0_358, __p1_358); \
|
|
__ret_358; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_u32(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \
|
|
uint32x2_t __ret_359; \
|
|
uint32x2_t __s0_359 = __p0_359; \
|
|
uint32x4_t __s2_359 = __p2_359; \
|
|
uint32x2_t __rev0_359; __rev0_359 = __builtin_shufflevector(__s0_359, __s0_359, __lane_reverse_64_32); \
|
|
uint32x4_t __rev2_359; __rev2_359 = __builtin_shufflevector(__s2_359, __s2_359, __lane_reverse_128_32); \
|
|
__ret_359 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_359, __p3_359), __rev0_359, __p1_359); \
|
|
__ret_359 = __builtin_shufflevector(__ret_359, __ret_359, __lane_reverse_64_32); \
|
|
__ret_359; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_u64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \
|
|
uint64x1_t __ret_360; \
|
|
uint64x1_t __s0_360 = __p0_360; \
|
|
uint64x2_t __s2_360 = __p2_360; \
|
|
__ret_360 = vset_lane_u64(vgetq_lane_u64(__s2_360, __p3_360), __s0_360, __p1_360); \
|
|
__ret_360; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_u64(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \
|
|
uint64x1_t __ret_361; \
|
|
uint64x1_t __s0_361 = __p0_361; \
|
|
uint64x2_t __s2_361 = __p2_361; \
|
|
uint64x2_t __rev2_361; __rev2_361 = __builtin_shufflevector(__s2_361, __s2_361, __lane_reverse_128_64); \
|
|
__ret_361 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_361, __p3_361), __s0_361, __p1_361); \
|
|
__ret_361; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_u16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \
|
|
uint16x4_t __ret_362; \
|
|
uint16x4_t __s0_362 = __p0_362; \
|
|
uint16x8_t __s2_362 = __p2_362; \
|
|
__ret_362 = vset_lane_u16(vgetq_lane_u16(__s2_362, __p3_362), __s0_362, __p1_362); \
|
|
__ret_362; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_u16(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \
|
|
uint16x4_t __ret_363; \
|
|
uint16x4_t __s0_363 = __p0_363; \
|
|
uint16x8_t __s2_363 = __p2_363; \
|
|
uint16x4_t __rev0_363; __rev0_363 = __builtin_shufflevector(__s0_363, __s0_363, __lane_reverse_64_16); \
|
|
uint16x8_t __rev2_363; __rev2_363 = __builtin_shufflevector(__s2_363, __s2_363, __lane_reverse_128_16); \
|
|
__ret_363 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_363, __p3_363), __rev0_363, __p1_363); \
|
|
__ret_363 = __builtin_shufflevector(__ret_363, __ret_363, __lane_reverse_64_16); \
|
|
__ret_363; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_s8(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \
|
|
int8x8_t __ret_364; \
|
|
int8x8_t __s0_364 = __p0_364; \
|
|
int8x16_t __s2_364 = __p2_364; \
|
|
__ret_364 = vset_lane_s8(vgetq_lane_s8(__s2_364, __p3_364), __s0_364, __p1_364); \
|
|
__ret_364; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_s8(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \
|
|
int8x8_t __ret_365; \
|
|
int8x8_t __s0_365 = __p0_365; \
|
|
int8x16_t __s2_365 = __p2_365; \
|
|
int8x8_t __rev0_365; __rev0_365 = __builtin_shufflevector(__s0_365, __s0_365, __lane_reverse_64_8); \
|
|
int8x16_t __rev2_365; __rev2_365 = __builtin_shufflevector(__s2_365, __s2_365, __lane_reverse_128_8); \
|
|
__ret_365 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_365, __p3_365), __rev0_365, __p1_365); \
|
|
__ret_365 = __builtin_shufflevector(__ret_365, __ret_365, __lane_reverse_64_8); \
|
|
__ret_365; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_f32(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \
|
|
float32x2_t __ret_366; \
|
|
float32x2_t __s0_366 = __p0_366; \
|
|
float32x4_t __s2_366 = __p2_366; \
|
|
__ret_366 = vset_lane_f32(vgetq_lane_f32(__s2_366, __p3_366), __s0_366, __p1_366); \
|
|
__ret_366; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_f32(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \
|
|
float32x2_t __ret_367; \
|
|
float32x2_t __s0_367 = __p0_367; \
|
|
float32x4_t __s2_367 = __p2_367; \
|
|
float32x2_t __rev0_367; __rev0_367 = __builtin_shufflevector(__s0_367, __s0_367, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_367; __rev2_367 = __builtin_shufflevector(__s2_367, __s2_367, __lane_reverse_128_32); \
|
|
__ret_367 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_367, __p3_367), __rev0_367, __p1_367); \
|
|
__ret_367 = __builtin_shufflevector(__ret_367, __ret_367, __lane_reverse_64_32); \
|
|
__ret_367; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_s32(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \
|
|
int32x2_t __ret_368; \
|
|
int32x2_t __s0_368 = __p0_368; \
|
|
int32x4_t __s2_368 = __p2_368; \
|
|
__ret_368 = vset_lane_s32(vgetq_lane_s32(__s2_368, __p3_368), __s0_368, __p1_368); \
|
|
__ret_368; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_s32(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \
|
|
int32x2_t __ret_369; \
|
|
int32x2_t __s0_369 = __p0_369; \
|
|
int32x4_t __s2_369 = __p2_369; \
|
|
int32x2_t __rev0_369; __rev0_369 = __builtin_shufflevector(__s0_369, __s0_369, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_369; __rev2_369 = __builtin_shufflevector(__s2_369, __s2_369, __lane_reverse_128_32); \
|
|
__ret_369 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_369, __p3_369), __rev0_369, __p1_369); \
|
|
__ret_369 = __builtin_shufflevector(__ret_369, __ret_369, __lane_reverse_64_32); \
|
|
__ret_369; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_s64(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \
|
|
int64x1_t __ret_370; \
|
|
int64x1_t __s0_370 = __p0_370; \
|
|
int64x2_t __s2_370 = __p2_370; \
|
|
__ret_370 = vset_lane_s64(vgetq_lane_s64(__s2_370, __p3_370), __s0_370, __p1_370); \
|
|
__ret_370; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_s64(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \
|
|
int64x1_t __ret_371; \
|
|
int64x1_t __s0_371 = __p0_371; \
|
|
int64x2_t __s2_371 = __p2_371; \
|
|
int64x2_t __rev2_371; __rev2_371 = __builtin_shufflevector(__s2_371, __s2_371, __lane_reverse_128_64); \
|
|
__ret_371 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_371, __p3_371), __s0_371, __p1_371); \
|
|
__ret_371; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_mf8(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \
|
|
mfloat8x8_t __ret_372; \
|
|
mfloat8x8_t __s0_372 = __p0_372; \
|
|
mfloat8x16_t __s2_372 = __p2_372; \
|
|
__ret_372 = vset_lane_mf8(vgetq_lane_mf8(__s2_372, __p3_372), __s0_372, __p1_372); \
|
|
__ret_372; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_mf8(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \
|
|
mfloat8x8_t __ret_373; \
|
|
mfloat8x8_t __s0_373 = __p0_373; \
|
|
mfloat8x16_t __s2_373 = __p2_373; \
|
|
mfloat8x8_t __rev0_373; __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, __lane_reverse_64_8); \
|
|
mfloat8x16_t __rev2_373; __rev2_373 = __builtin_shufflevector(__s2_373, __s2_373, __lane_reverse_128_8); \
|
|
__ret_373 = __noswap_vset_lane_mf8(__noswap_vgetq_lane_mf8(__rev2_373, __p3_373), __rev0_373, __p1_373); \
|
|
__ret_373 = __builtin_shufflevector(__ret_373, __ret_373, __lane_reverse_64_8); \
|
|
__ret_373; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_s16(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \
|
|
int16x4_t __ret_374; \
|
|
int16x4_t __s0_374 = __p0_374; \
|
|
int16x8_t __s2_374 = __p2_374; \
|
|
__ret_374 = vset_lane_s16(vgetq_lane_s16(__s2_374, __p3_374), __s0_374, __p1_374); \
|
|
__ret_374; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_s16(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \
|
|
int16x4_t __ret_375; \
|
|
int16x4_t __s0_375 = __p0_375; \
|
|
int16x8_t __s2_375 = __p2_375; \
|
|
int16x4_t __rev0_375; __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_375; __rev2_375 = __builtin_shufflevector(__s2_375, __s2_375, __lane_reverse_128_16); \
|
|
__ret_375 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_375, __p3_375), __rev0_375, __p1_375); \
|
|
__ret_375 = __builtin_shufflevector(__ret_375, __ret_375, __lane_reverse_64_16); \
|
|
__ret_375; \
|
|
})
|
|
#endif
|
|
|
|
#define vcreate_p64(__p0) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __promote); \
|
|
__ret; \
|
|
})
|
|
#define vcreate_f64(__p0) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
uint64_t __promote = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __promote); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("neon"))) float32_t vcvts_f32_s32(int32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vcvts_f32_s32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vcvts_f32_u32(uint32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vcvts_f32_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_f64(__builtin_bit_cast(int8x16_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_f64(__builtin_bit_cast(int8x16_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvt_f32_f64(__builtin_bit_cast(int8x16_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64_t vcvtd_f64_s64(int64_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vcvtd_f64_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vcvtd_f64_u64(uint64_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vcvtd_f64_u64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_f64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_f64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_f64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_f64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vcvt_f64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_s64(int64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vcvt_f64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvt_f64_f32(__builtin_bit_cast(int8x8_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvt_f64_f32(__builtin_bit_cast(int8x8_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvt_f64_f32(__builtin_bit_cast(int8x8_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = vcvt_f32_f16(vget_high_f16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = vcvt_f64_f32(vget_high_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
|
|
float64x2_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vcvts_n_f32_u32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vcvts_n_f32_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_n_f64_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_n_f64_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_n_f64_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcvtq_n_f64_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vcvt_n_f64_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vcvt_n_f64_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vcvtd_n_f64_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vcvtd_n_f64_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvts_n_s32_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtq_n_s64_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtq_n_s64_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvt_n_s64_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 3)); \
|
|
__ret; \
|
|
})
|
|
#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtd_n_s64_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvts_n_u32_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtq_n_u64_v(__builtin_bit_cast(int8x16_t, __s0), __p1, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtq_n_u64_v(__builtin_bit_cast(int8x16_t, __rev0), __p1, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvt_n_u64_v(__builtin_bit_cast(int8x8_t, __s0), __p1, 19)); \
|
|
__ret; \
|
|
})
|
|
#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtd_n_u64_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("neon"))) int32_t vcvts_s32_f32(float32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvts_s32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vcvtd_s64_f64(float64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtd_s64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtq_s64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtq_s64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vcvt_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvt_s64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcvts_u32_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvts_u32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcvtd_u64_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtd_u64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtq_u64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtq_u64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvt_u64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vcvtas_s32_f32(float32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtas_s32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtaq_s64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtaq_s64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vcvta_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvta_s64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vcvtad_s64_f64(float64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtad_s64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcvtas_u32_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtas_u32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtaq_u64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtaq_u64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvta_u64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcvtad_u64_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtad_u64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vcvtms_s32_f32(float32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtms_s32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtmq_s64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtmq_s64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvtm_s64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vcvtmd_s64_f64(float64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtmd_s64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcvtms_u32_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtms_u32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtmq_u64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtmq_u64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvtm_u64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcvtmd_u64_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtmd_u64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vcvtns_s32_f32(float32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtns_s32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtnq_s64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtnq_s64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvtn_s64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vcvtnd_s64_f64(float64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtnd_s64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcvtns_u32_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtns_u32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtnq_u64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtnq_u64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvtn_u64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcvtnd_u64_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtnd_u64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vcvtps_s32_f32(float32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtps_s32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtpq_s64_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vcvtpq_s64_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vcvtp_s64_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vcvtpd_s64_f64(float64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtpd_s64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vcvtps_u32_f32(float32_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtps_u32_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtpq_u64_v(__builtin_bit_cast(int8x16_t, __p0), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vcvtpq_u64_v(__builtin_bit_cast(int8x16_t, __rev0), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vcvtp_u64_v(__builtin_bit_cast(int8x8_t, __p0), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vcvtpd_u64_f64(float64_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtpd_u64_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vcvtxd_f32_f64(float64_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vcvtxd_f32_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvtx_f32_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvtx_f32_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vcvtx_f32_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 / __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 / __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __p0 / __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 / __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vdupb_lane_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x8_t __s0 = __p0; \
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vdupb_lane_i8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vduph_lane_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x4_t __s0 = __p0; \
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vduph_lane_i16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vdupb_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x8_t __s0 = __p0; \
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vdupb_lane_i8(__builtin_bit_cast(int8x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vdups_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x2_t __s0 = __p0; \
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vdups_lane_i32(__builtin_bit_cast(int32x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vdupd_lane_i64(__builtin_bit_cast(int64x1_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vduph_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x4_t __s0 = __p0; \
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vduph_lane_i16(__builtin_bit_cast(int16x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vdupb_lane_i8(__builtin_bit_cast(int8x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x8_t __s0 = __p0; \
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vdupb_lane_i8(__builtin_bit_cast(int8x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vdupd_lane_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vdups_lane_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vdups_lane_f32(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vdups_lane_i32(__builtin_bit_cast(int32x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vdups_lane_i32(__builtin_bit_cast(int32x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vdupd_lane_i64(__builtin_bit_cast(int64x1_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vdupb_lane_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_lane_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x8_t __s0 = __p0; \
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vdupb_lane_mf8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vduph_lane_i16(__builtin_bit_cast(int16x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vduph_lane_i16(__builtin_bit_cast(int16x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vduph_lane_f16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x4_t __s0 = __p0; \
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vduph_lane_f16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vdup_lane_p64(__p0_376, __p1_376) __extension__ ({ \
|
|
poly64x1_t __ret_376; \
|
|
poly64x1_t __s0_376 = __p0_376; \
|
|
__ret_376 = splat_lane_p64(__s0_376, __p1_376); \
|
|
__ret_376; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_p64(__p0_377, __p1_377) __extension__ ({ \
|
|
poly64x2_t __ret_377; \
|
|
poly64x1_t __s0_377 = __p0_377; \
|
|
__ret_377 = splatq_lane_p64(__s0_377, __p1_377); \
|
|
__ret_377; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_p64(__p0_378, __p1_378) __extension__ ({ \
|
|
poly64x2_t __ret_378; \
|
|
poly64x1_t __s0_378 = __p0_378; \
|
|
__ret_378 = __noswap_splatq_lane_p64(__s0_378, __p1_378); \
|
|
__ret_378 = __builtin_shufflevector(__ret_378, __ret_378, __lane_reverse_128_64); \
|
|
__ret_378; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_lane_f64(__p0_379, __p1_379) __extension__ ({ \
|
|
float64x2_t __ret_379; \
|
|
float64x1_t __s0_379 = __p0_379; \
|
|
__ret_379 = splatq_lane_f64(__s0_379, __p1_379); \
|
|
__ret_379; \
|
|
})
|
|
#else
|
|
#define vdupq_lane_f64(__p0_380, __p1_380) __extension__ ({ \
|
|
float64x2_t __ret_380; \
|
|
float64x1_t __s0_380 = __p0_380; \
|
|
__ret_380 = __noswap_splatq_lane_f64(__s0_380, __p1_380); \
|
|
__ret_380 = __builtin_shufflevector(__ret_380, __ret_380, __lane_reverse_128_64); \
|
|
__ret_380; \
|
|
})
|
|
#endif
|
|
|
|
#define vdup_lane_f64(__p0_381, __p1_381) __extension__ ({ \
|
|
float64x1_t __ret_381; \
|
|
float64x1_t __s0_381 = __p0_381; \
|
|
__ret_381 = splat_lane_f64(__s0_381, __p1_381); \
|
|
__ret_381; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vdupb_laneq_i8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
|
|
poly8_t __ret; \
|
|
poly8x16_t __s0 = __p0; \
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(poly8_t, __builtin_neon_vdupb_laneq_i8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vduph_laneq_i16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
|
|
poly16_t __ret; \
|
|
poly16x8_t __s0 = __p0; \
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(poly16_t, __builtin_neon_vduph_laneq_i16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vdupb_laneq_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8x16_t __s0 = __p0; \
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vdupb_laneq_i8(__builtin_bit_cast(int8x16_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vdups_laneq_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vdups_laneq_i32(__builtin_bit_cast(int32x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vdupd_laneq_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vdupd_laneq_i64(__builtin_bit_cast(int64x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vduph_laneq_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16x8_t __s0 = __p0; \
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vduph_laneq_i16(__builtin_bit_cast(int16x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vdupb_laneq_i8(__builtin_bit_cast(int8x16_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8x16_t __s0 = __p0; \
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vdupb_laneq_i8(__builtin_bit_cast(int8x16_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vdupd_laneq_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vdupd_laneq_f64(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vdups_laneq_f32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vdups_laneq_f32(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vdups_laneq_i32(__builtin_bit_cast(int32x4_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vdups_laneq_i32(__builtin_bit_cast(int32x4_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vdupd_laneq_i64(__builtin_bit_cast(int64x2_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64x2_t __s0 = __p0; \
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vdupd_laneq_i64(__builtin_bit_cast(int64x2_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupb_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vdupb_laneq_mf8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vdupb_laneq_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8_t __ret; \
|
|
mfloat8x16_t __s0 = __p0; \
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8_t, __builtin_neon_vdupb_laneq_mf8(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vduph_laneq_i16(__builtin_bit_cast(int16x8_t, __s0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vduph_laneq_i16(__builtin_bit_cast(int16x8_t, __rev0), __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vduph_laneq_f16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
|
|
float16_t __ret; \
|
|
float16x8_t __s0 = __p0; \
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vduph_laneq_f16(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_p8(__p0_382, __p1_382) __extension__ ({ \
|
|
poly8x8_t __ret_382; \
|
|
poly8x16_t __s0_382 = __p0_382; \
|
|
__ret_382 = splat_laneq_p8(__s0_382, __p1_382); \
|
|
__ret_382; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_p8(__p0_383, __p1_383) __extension__ ({ \
|
|
poly8x8_t __ret_383; \
|
|
poly8x16_t __s0_383 = __p0_383; \
|
|
poly8x16_t __rev0_383; __rev0_383 = __builtin_shufflevector(__s0_383, __s0_383, __lane_reverse_128_8); \
|
|
__ret_383 = __noswap_splat_laneq_p8(__rev0_383, __p1_383); \
|
|
__ret_383 = __builtin_shufflevector(__ret_383, __ret_383, __lane_reverse_64_8); \
|
|
__ret_383; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_p64(__p0_384, __p1_384) __extension__ ({ \
|
|
poly64x1_t __ret_384; \
|
|
poly64x2_t __s0_384 = __p0_384; \
|
|
__ret_384 = splat_laneq_p64(__s0_384, __p1_384); \
|
|
__ret_384; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_p64(__p0_385, __p1_385) __extension__ ({ \
|
|
poly64x1_t __ret_385; \
|
|
poly64x2_t __s0_385 = __p0_385; \
|
|
poly64x2_t __rev0_385; __rev0_385 = __builtin_shufflevector(__s0_385, __s0_385, __lane_reverse_128_64); \
|
|
__ret_385 = __noswap_splat_laneq_p64(__rev0_385, __p1_385); \
|
|
__ret_385; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_p16(__p0_386, __p1_386) __extension__ ({ \
|
|
poly16x4_t __ret_386; \
|
|
poly16x8_t __s0_386 = __p0_386; \
|
|
__ret_386 = splat_laneq_p16(__s0_386, __p1_386); \
|
|
__ret_386; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_p16(__p0_387, __p1_387) __extension__ ({ \
|
|
poly16x4_t __ret_387; \
|
|
poly16x8_t __s0_387 = __p0_387; \
|
|
poly16x8_t __rev0_387; __rev0_387 = __builtin_shufflevector(__s0_387, __s0_387, __lane_reverse_128_16); \
|
|
__ret_387 = __noswap_splat_laneq_p16(__rev0_387, __p1_387); \
|
|
__ret_387 = __builtin_shufflevector(__ret_387, __ret_387, __lane_reverse_64_16); \
|
|
__ret_387; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_p8(__p0_388, __p1_388) __extension__ ({ \
|
|
poly8x16_t __ret_388; \
|
|
poly8x16_t __s0_388 = __p0_388; \
|
|
__ret_388 = splatq_laneq_p8(__s0_388, __p1_388); \
|
|
__ret_388; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_p8(__p0_389, __p1_389) __extension__ ({ \
|
|
poly8x16_t __ret_389; \
|
|
poly8x16_t __s0_389 = __p0_389; \
|
|
poly8x16_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, __lane_reverse_128_8); \
|
|
__ret_389 = __noswap_splatq_laneq_p8(__rev0_389, __p1_389); \
|
|
__ret_389 = __builtin_shufflevector(__ret_389, __ret_389, __lane_reverse_128_8); \
|
|
__ret_389; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_p64(__p0_390, __p1_390) __extension__ ({ \
|
|
poly64x2_t __ret_390; \
|
|
poly64x2_t __s0_390 = __p0_390; \
|
|
__ret_390 = splatq_laneq_p64(__s0_390, __p1_390); \
|
|
__ret_390; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_p64(__p0_391, __p1_391) __extension__ ({ \
|
|
poly64x2_t __ret_391; \
|
|
poly64x2_t __s0_391 = __p0_391; \
|
|
poly64x2_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, __lane_reverse_128_64); \
|
|
__ret_391 = __noswap_splatq_laneq_p64(__rev0_391, __p1_391); \
|
|
__ret_391 = __builtin_shufflevector(__ret_391, __ret_391, __lane_reverse_128_64); \
|
|
__ret_391; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_p16(__p0_392, __p1_392) __extension__ ({ \
|
|
poly16x8_t __ret_392; \
|
|
poly16x8_t __s0_392 = __p0_392; \
|
|
__ret_392 = splatq_laneq_p16(__s0_392, __p1_392); \
|
|
__ret_392; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_p16(__p0_393, __p1_393) __extension__ ({ \
|
|
poly16x8_t __ret_393; \
|
|
poly16x8_t __s0_393 = __p0_393; \
|
|
poly16x8_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, __lane_reverse_128_16); \
|
|
__ret_393 = __noswap_splatq_laneq_p16(__rev0_393, __p1_393); \
|
|
__ret_393 = __builtin_shufflevector(__ret_393, __ret_393, __lane_reverse_128_16); \
|
|
__ret_393; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_u8(__p0_394, __p1_394) __extension__ ({ \
|
|
uint8x16_t __ret_394; \
|
|
uint8x16_t __s0_394 = __p0_394; \
|
|
__ret_394 = splatq_laneq_u8(__s0_394, __p1_394); \
|
|
__ret_394; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_u8(__p0_395, __p1_395) __extension__ ({ \
|
|
uint8x16_t __ret_395; \
|
|
uint8x16_t __s0_395 = __p0_395; \
|
|
uint8x16_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, __lane_reverse_128_8); \
|
|
__ret_395 = __noswap_splatq_laneq_u8(__rev0_395, __p1_395); \
|
|
__ret_395 = __builtin_shufflevector(__ret_395, __ret_395, __lane_reverse_128_8); \
|
|
__ret_395; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_u32(__p0_396, __p1_396) __extension__ ({ \
|
|
uint32x4_t __ret_396; \
|
|
uint32x4_t __s0_396 = __p0_396; \
|
|
__ret_396 = splatq_laneq_u32(__s0_396, __p1_396); \
|
|
__ret_396; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_u32(__p0_397, __p1_397) __extension__ ({ \
|
|
uint32x4_t __ret_397; \
|
|
uint32x4_t __s0_397 = __p0_397; \
|
|
uint32x4_t __rev0_397; __rev0_397 = __builtin_shufflevector(__s0_397, __s0_397, __lane_reverse_128_32); \
|
|
__ret_397 = __noswap_splatq_laneq_u32(__rev0_397, __p1_397); \
|
|
__ret_397 = __builtin_shufflevector(__ret_397, __ret_397, __lane_reverse_128_32); \
|
|
__ret_397; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_u64(__p0_398, __p1_398) __extension__ ({ \
|
|
uint64x2_t __ret_398; \
|
|
uint64x2_t __s0_398 = __p0_398; \
|
|
__ret_398 = splatq_laneq_u64(__s0_398, __p1_398); \
|
|
__ret_398; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_u64(__p0_399, __p1_399) __extension__ ({ \
|
|
uint64x2_t __ret_399; \
|
|
uint64x2_t __s0_399 = __p0_399; \
|
|
uint64x2_t __rev0_399; __rev0_399 = __builtin_shufflevector(__s0_399, __s0_399, __lane_reverse_128_64); \
|
|
__ret_399 = __noswap_splatq_laneq_u64(__rev0_399, __p1_399); \
|
|
__ret_399 = __builtin_shufflevector(__ret_399, __ret_399, __lane_reverse_128_64); \
|
|
__ret_399; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_u16(__p0_400, __p1_400) __extension__ ({ \
|
|
uint16x8_t __ret_400; \
|
|
uint16x8_t __s0_400 = __p0_400; \
|
|
__ret_400 = splatq_laneq_u16(__s0_400, __p1_400); \
|
|
__ret_400; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_u16(__p0_401, __p1_401) __extension__ ({ \
|
|
uint16x8_t __ret_401; \
|
|
uint16x8_t __s0_401 = __p0_401; \
|
|
uint16x8_t __rev0_401; __rev0_401 = __builtin_shufflevector(__s0_401, __s0_401, __lane_reverse_128_16); \
|
|
__ret_401 = __noswap_splatq_laneq_u16(__rev0_401, __p1_401); \
|
|
__ret_401 = __builtin_shufflevector(__ret_401, __ret_401, __lane_reverse_128_16); \
|
|
__ret_401; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_s8(__p0_402, __p1_402) __extension__ ({ \
|
|
int8x16_t __ret_402; \
|
|
int8x16_t __s0_402 = __p0_402; \
|
|
__ret_402 = splatq_laneq_s8(__s0_402, __p1_402); \
|
|
__ret_402; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_s8(__p0_403, __p1_403) __extension__ ({ \
|
|
int8x16_t __ret_403; \
|
|
int8x16_t __s0_403 = __p0_403; \
|
|
int8x16_t __rev0_403; __rev0_403 = __builtin_shufflevector(__s0_403, __s0_403, __lane_reverse_128_8); \
|
|
__ret_403 = __noswap_splatq_laneq_s8(__rev0_403, __p1_403); \
|
|
__ret_403 = __builtin_shufflevector(__ret_403, __ret_403, __lane_reverse_128_8); \
|
|
__ret_403; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_f64(__p0_404, __p1_404) __extension__ ({ \
|
|
float64x2_t __ret_404; \
|
|
float64x2_t __s0_404 = __p0_404; \
|
|
__ret_404 = splatq_laneq_f64(__s0_404, __p1_404); \
|
|
__ret_404; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_f64(__p0_405, __p1_405) __extension__ ({ \
|
|
float64x2_t __ret_405; \
|
|
float64x2_t __s0_405 = __p0_405; \
|
|
float64x2_t __rev0_405; __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, __lane_reverse_128_64); \
|
|
__ret_405 = __noswap_splatq_laneq_f64(__rev0_405, __p1_405); \
|
|
__ret_405 = __builtin_shufflevector(__ret_405, __ret_405, __lane_reverse_128_64); \
|
|
__ret_405; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_f32(__p0_406, __p1_406) __extension__ ({ \
|
|
float32x4_t __ret_406; \
|
|
float32x4_t __s0_406 = __p0_406; \
|
|
__ret_406 = splatq_laneq_f32(__s0_406, __p1_406); \
|
|
__ret_406; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_f32(__p0_407, __p1_407) __extension__ ({ \
|
|
float32x4_t __ret_407; \
|
|
float32x4_t __s0_407 = __p0_407; \
|
|
float32x4_t __rev0_407; __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, __lane_reverse_128_32); \
|
|
__ret_407 = __noswap_splatq_laneq_f32(__rev0_407, __p1_407); \
|
|
__ret_407 = __builtin_shufflevector(__ret_407, __ret_407, __lane_reverse_128_32); \
|
|
__ret_407; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_f16(__p0_408, __p1_408) __extension__ ({ \
|
|
float16x8_t __ret_408; \
|
|
float16x8_t __s0_408 = __p0_408; \
|
|
__ret_408 = splatq_laneq_f16(__s0_408, __p1_408); \
|
|
__ret_408; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_f16(__p0_409, __p1_409) __extension__ ({ \
|
|
float16x8_t __ret_409; \
|
|
float16x8_t __s0_409 = __p0_409; \
|
|
float16x8_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, __lane_reverse_128_16); \
|
|
__ret_409 = __noswap_splatq_laneq_f16(__rev0_409, __p1_409); \
|
|
__ret_409 = __builtin_shufflevector(__ret_409, __ret_409, __lane_reverse_128_16); \
|
|
__ret_409; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_s32(__p0_410, __p1_410) __extension__ ({ \
|
|
int32x4_t __ret_410; \
|
|
int32x4_t __s0_410 = __p0_410; \
|
|
__ret_410 = splatq_laneq_s32(__s0_410, __p1_410); \
|
|
__ret_410; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_s32(__p0_411, __p1_411) __extension__ ({ \
|
|
int32x4_t __ret_411; \
|
|
int32x4_t __s0_411 = __p0_411; \
|
|
int32x4_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, __lane_reverse_128_32); \
|
|
__ret_411 = __noswap_splatq_laneq_s32(__rev0_411, __p1_411); \
|
|
__ret_411 = __builtin_shufflevector(__ret_411, __ret_411, __lane_reverse_128_32); \
|
|
__ret_411; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_s64(__p0_412, __p1_412) __extension__ ({ \
|
|
int64x2_t __ret_412; \
|
|
int64x2_t __s0_412 = __p0_412; \
|
|
__ret_412 = splatq_laneq_s64(__s0_412, __p1_412); \
|
|
__ret_412; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_s64(__p0_413, __p1_413) __extension__ ({ \
|
|
int64x2_t __ret_413; \
|
|
int64x2_t __s0_413 = __p0_413; \
|
|
int64x2_t __rev0_413; __rev0_413 = __builtin_shufflevector(__s0_413, __s0_413, __lane_reverse_128_64); \
|
|
__ret_413 = __noswap_splatq_laneq_s64(__rev0_413, __p1_413); \
|
|
__ret_413 = __builtin_shufflevector(__ret_413, __ret_413, __lane_reverse_128_64); \
|
|
__ret_413; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_mf8(__p0_414, __p1_414) __extension__ ({ \
|
|
mfloat8x16_t __ret_414; \
|
|
mfloat8x16_t __s0_414 = __p0_414; \
|
|
__ret_414 = splatq_laneq_mf8(__s0_414, __p1_414); \
|
|
__ret_414; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_mf8(__p0_415, __p1_415) __extension__ ({ \
|
|
mfloat8x16_t __ret_415; \
|
|
mfloat8x16_t __s0_415 = __p0_415; \
|
|
mfloat8x16_t __rev0_415; __rev0_415 = __builtin_shufflevector(__s0_415, __s0_415, __lane_reverse_128_8); \
|
|
__ret_415 = __noswap_splatq_laneq_mf8(__rev0_415, __p1_415); \
|
|
__ret_415 = __builtin_shufflevector(__ret_415, __ret_415, __lane_reverse_128_8); \
|
|
__ret_415; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdupq_laneq_s16(__p0_416, __p1_416) __extension__ ({ \
|
|
int16x8_t __ret_416; \
|
|
int16x8_t __s0_416 = __p0_416; \
|
|
__ret_416 = splatq_laneq_s16(__s0_416, __p1_416); \
|
|
__ret_416; \
|
|
})
|
|
#else
|
|
#define vdupq_laneq_s16(__p0_417, __p1_417) __extension__ ({ \
|
|
int16x8_t __ret_417; \
|
|
int16x8_t __s0_417 = __p0_417; \
|
|
int16x8_t __rev0_417; __rev0_417 = __builtin_shufflevector(__s0_417, __s0_417, __lane_reverse_128_16); \
|
|
__ret_417 = __noswap_splatq_laneq_s16(__rev0_417, __p1_417); \
|
|
__ret_417 = __builtin_shufflevector(__ret_417, __ret_417, __lane_reverse_128_16); \
|
|
__ret_417; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_u8(__p0_418, __p1_418) __extension__ ({ \
|
|
uint8x8_t __ret_418; \
|
|
uint8x16_t __s0_418 = __p0_418; \
|
|
__ret_418 = splat_laneq_u8(__s0_418, __p1_418); \
|
|
__ret_418; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_u8(__p0_419, __p1_419) __extension__ ({ \
|
|
uint8x8_t __ret_419; \
|
|
uint8x16_t __s0_419 = __p0_419; \
|
|
uint8x16_t __rev0_419; __rev0_419 = __builtin_shufflevector(__s0_419, __s0_419, __lane_reverse_128_8); \
|
|
__ret_419 = __noswap_splat_laneq_u8(__rev0_419, __p1_419); \
|
|
__ret_419 = __builtin_shufflevector(__ret_419, __ret_419, __lane_reverse_64_8); \
|
|
__ret_419; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_u32(__p0_420, __p1_420) __extension__ ({ \
|
|
uint32x2_t __ret_420; \
|
|
uint32x4_t __s0_420 = __p0_420; \
|
|
__ret_420 = splat_laneq_u32(__s0_420, __p1_420); \
|
|
__ret_420; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_u32(__p0_421, __p1_421) __extension__ ({ \
|
|
uint32x2_t __ret_421; \
|
|
uint32x4_t __s0_421 = __p0_421; \
|
|
uint32x4_t __rev0_421; __rev0_421 = __builtin_shufflevector(__s0_421, __s0_421, __lane_reverse_128_32); \
|
|
__ret_421 = __noswap_splat_laneq_u32(__rev0_421, __p1_421); \
|
|
__ret_421 = __builtin_shufflevector(__ret_421, __ret_421, __lane_reverse_64_32); \
|
|
__ret_421; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_u64(__p0_422, __p1_422) __extension__ ({ \
|
|
uint64x1_t __ret_422; \
|
|
uint64x2_t __s0_422 = __p0_422; \
|
|
__ret_422 = splat_laneq_u64(__s0_422, __p1_422); \
|
|
__ret_422; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_u64(__p0_423, __p1_423) __extension__ ({ \
|
|
uint64x1_t __ret_423; \
|
|
uint64x2_t __s0_423 = __p0_423; \
|
|
uint64x2_t __rev0_423; __rev0_423 = __builtin_shufflevector(__s0_423, __s0_423, __lane_reverse_128_64); \
|
|
__ret_423 = __noswap_splat_laneq_u64(__rev0_423, __p1_423); \
|
|
__ret_423; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_u16(__p0_424, __p1_424) __extension__ ({ \
|
|
uint16x4_t __ret_424; \
|
|
uint16x8_t __s0_424 = __p0_424; \
|
|
__ret_424 = splat_laneq_u16(__s0_424, __p1_424); \
|
|
__ret_424; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_u16(__p0_425, __p1_425) __extension__ ({ \
|
|
uint16x4_t __ret_425; \
|
|
uint16x8_t __s0_425 = __p0_425; \
|
|
uint16x8_t __rev0_425; __rev0_425 = __builtin_shufflevector(__s0_425, __s0_425, __lane_reverse_128_16); \
|
|
__ret_425 = __noswap_splat_laneq_u16(__rev0_425, __p1_425); \
|
|
__ret_425 = __builtin_shufflevector(__ret_425, __ret_425, __lane_reverse_64_16); \
|
|
__ret_425; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_s8(__p0_426, __p1_426) __extension__ ({ \
|
|
int8x8_t __ret_426; \
|
|
int8x16_t __s0_426 = __p0_426; \
|
|
__ret_426 = splat_laneq_s8(__s0_426, __p1_426); \
|
|
__ret_426; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_s8(__p0_427, __p1_427) __extension__ ({ \
|
|
int8x8_t __ret_427; \
|
|
int8x16_t __s0_427 = __p0_427; \
|
|
int8x16_t __rev0_427; __rev0_427 = __builtin_shufflevector(__s0_427, __s0_427, __lane_reverse_128_8); \
|
|
__ret_427 = __noswap_splat_laneq_s8(__rev0_427, __p1_427); \
|
|
__ret_427 = __builtin_shufflevector(__ret_427, __ret_427, __lane_reverse_64_8); \
|
|
__ret_427; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_f64(__p0_428, __p1_428) __extension__ ({ \
|
|
float64x1_t __ret_428; \
|
|
float64x2_t __s0_428 = __p0_428; \
|
|
__ret_428 = splat_laneq_f64(__s0_428, __p1_428); \
|
|
__ret_428; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_f64(__p0_429, __p1_429) __extension__ ({ \
|
|
float64x1_t __ret_429; \
|
|
float64x2_t __s0_429 = __p0_429; \
|
|
float64x2_t __rev0_429; __rev0_429 = __builtin_shufflevector(__s0_429, __s0_429, __lane_reverse_128_64); \
|
|
__ret_429 = __noswap_splat_laneq_f64(__rev0_429, __p1_429); \
|
|
__ret_429; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_f32(__p0_430, __p1_430) __extension__ ({ \
|
|
float32x2_t __ret_430; \
|
|
float32x4_t __s0_430 = __p0_430; \
|
|
__ret_430 = splat_laneq_f32(__s0_430, __p1_430); \
|
|
__ret_430; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_f32(__p0_431, __p1_431) __extension__ ({ \
|
|
float32x2_t __ret_431; \
|
|
float32x4_t __s0_431 = __p0_431; \
|
|
float32x4_t __rev0_431; __rev0_431 = __builtin_shufflevector(__s0_431, __s0_431, __lane_reverse_128_32); \
|
|
__ret_431 = __noswap_splat_laneq_f32(__rev0_431, __p1_431); \
|
|
__ret_431 = __builtin_shufflevector(__ret_431, __ret_431, __lane_reverse_64_32); \
|
|
__ret_431; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_f16(__p0_432, __p1_432) __extension__ ({ \
|
|
float16x4_t __ret_432; \
|
|
float16x8_t __s0_432 = __p0_432; \
|
|
__ret_432 = splat_laneq_f16(__s0_432, __p1_432); \
|
|
__ret_432; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_f16(__p0_433, __p1_433) __extension__ ({ \
|
|
float16x4_t __ret_433; \
|
|
float16x8_t __s0_433 = __p0_433; \
|
|
float16x8_t __rev0_433; __rev0_433 = __builtin_shufflevector(__s0_433, __s0_433, __lane_reverse_128_16); \
|
|
__ret_433 = __noswap_splat_laneq_f16(__rev0_433, __p1_433); \
|
|
__ret_433 = __builtin_shufflevector(__ret_433, __ret_433, __lane_reverse_64_16); \
|
|
__ret_433; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_s32(__p0_434, __p1_434) __extension__ ({ \
|
|
int32x2_t __ret_434; \
|
|
int32x4_t __s0_434 = __p0_434; \
|
|
__ret_434 = splat_laneq_s32(__s0_434, __p1_434); \
|
|
__ret_434; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_s32(__p0_435, __p1_435) __extension__ ({ \
|
|
int32x2_t __ret_435; \
|
|
int32x4_t __s0_435 = __p0_435; \
|
|
int32x4_t __rev0_435; __rev0_435 = __builtin_shufflevector(__s0_435, __s0_435, __lane_reverse_128_32); \
|
|
__ret_435 = __noswap_splat_laneq_s32(__rev0_435, __p1_435); \
|
|
__ret_435 = __builtin_shufflevector(__ret_435, __ret_435, __lane_reverse_64_32); \
|
|
__ret_435; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_s64(__p0_436, __p1_436) __extension__ ({ \
|
|
int64x1_t __ret_436; \
|
|
int64x2_t __s0_436 = __p0_436; \
|
|
__ret_436 = splat_laneq_s64(__s0_436, __p1_436); \
|
|
__ret_436; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_s64(__p0_437, __p1_437) __extension__ ({ \
|
|
int64x1_t __ret_437; \
|
|
int64x2_t __s0_437 = __p0_437; \
|
|
int64x2_t __rev0_437; __rev0_437 = __builtin_shufflevector(__s0_437, __s0_437, __lane_reverse_128_64); \
|
|
__ret_437 = __noswap_splat_laneq_s64(__rev0_437, __p1_437); \
|
|
__ret_437; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_mf8(__p0_438, __p1_438) __extension__ ({ \
|
|
mfloat8x8_t __ret_438; \
|
|
mfloat8x16_t __s0_438 = __p0_438; \
|
|
__ret_438 = splat_laneq_mf8(__s0_438, __p1_438); \
|
|
__ret_438; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_mf8(__p0_439, __p1_439) __extension__ ({ \
|
|
mfloat8x8_t __ret_439; \
|
|
mfloat8x16_t __s0_439 = __p0_439; \
|
|
mfloat8x16_t __rev0_439; __rev0_439 = __builtin_shufflevector(__s0_439, __s0_439, __lane_reverse_128_8); \
|
|
__ret_439 = __noswap_splat_laneq_mf8(__rev0_439, __p1_439); \
|
|
__ret_439 = __builtin_shufflevector(__ret_439, __ret_439, __lane_reverse_64_8); \
|
|
__ret_439; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdup_laneq_s16(__p0_440, __p1_440) __extension__ ({ \
|
|
int16x4_t __ret_440; \
|
|
int16x8_t __s0_440 = __p0_440; \
|
|
__ret_440 = splat_laneq_s16(__s0_440, __p1_440); \
|
|
__ret_440; \
|
|
})
|
|
#else
|
|
#define vdup_laneq_s16(__p0_441, __p1_441) __extension__ ({ \
|
|
int16x4_t __ret_441; \
|
|
int16x8_t __s0_441 = __p0_441; \
|
|
int16x8_t __rev0_441; __rev0_441 = __builtin_shufflevector(__s0_441, __s0_441, __lane_reverse_128_16); \
|
|
__ret_441 = __noswap_splat_laneq_s16(__rev0_441, __p1_441); \
|
|
__ret_441 = __builtin_shufflevector(__ret_441, __ret_441, __lane_reverse_64_16); \
|
|
__ret_441; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) poly64x1_t vdup_n_p64(poly64_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = (poly64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = (poly64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = (poly64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = (float64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = (float64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vdup_n_f64(float64_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = (float64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vextq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vext_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vfma_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 10));
|
|
return __ret;
|
|
}
|
|
#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64_t __s1 = __p1; \
|
|
float64x1_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vfmad_lane_f64(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_lane_f32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_lane_f32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_lane_f32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x1_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x1_t __s2 = __p2; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __s2), __p3, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x1_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 42)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), __p3, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 41)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
float64x1_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vfma_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), __p3, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x8_t, __s2), __p3, 9)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vfmad_laneq_f64(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vfmad_laneq_f64(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vfmad_laneq_f64(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_laneq_f32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_laneq_f32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32_t __ret; \
|
|
float32_t __s0 = __p0; \
|
|
float32_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vfmas_laneq_f32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 42)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 41)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 41)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x4_t __ret; \
|
|
float32x4_t __s0 = __p0; \
|
|
float32x4_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vfmaq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 41)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 10)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 10)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
float64x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 10)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 9)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 9)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
float32x2_t __ret; \
|
|
float32x2_t __s0 = __p0; \
|
|
float32x2_t __s1 = __p1; \
|
|
float32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vfma_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 9)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = vfmaq_f64(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = vfma_f64(__p0, -__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#define vfmsd_lane_f64(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
|
|
float64_t __ret_442; \
|
|
float64_t __s0_442 = __p0_442; \
|
|
float64_t __s1_442 = __p1_442; \
|
|
float64x1_t __s2_442 = __p2_442; \
|
|
__ret_442 = vfmad_lane_f64(__s0_442, -__s1_442, __s2_442, __p3_442); \
|
|
__ret_442; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmss_lane_f32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \
|
|
float32_t __ret_443; \
|
|
float32_t __s0_443 = __p0_443; \
|
|
float32_t __s1_443 = __p1_443; \
|
|
float32x2_t __s2_443 = __p2_443; \
|
|
__ret_443 = vfmas_lane_f32(__s0_443, -__s1_443, __s2_443, __p3_443); \
|
|
__ret_443; \
|
|
})
|
|
#else
|
|
#define vfmss_lane_f32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \
|
|
float32_t __ret_444; \
|
|
float32_t __s0_444 = __p0_444; \
|
|
float32_t __s1_444 = __p1_444; \
|
|
float32x2_t __s2_444 = __p2_444; \
|
|
float32x2_t __rev2_444; __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, __lane_reverse_64_32); \
|
|
__ret_444 = __noswap_vfmas_lane_f32(__s0_444, -__s1_444, __rev2_444, __p3_444); \
|
|
__ret_444; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_lane_f64(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \
|
|
float64x2_t __ret_445; \
|
|
float64x2_t __s0_445 = __p0_445; \
|
|
float64x2_t __s1_445 = __p1_445; \
|
|
float64x1_t __s2_445 = __p2_445; \
|
|
__ret_445 = vfmaq_lane_f64(__s0_445, -__s1_445, __s2_445, __p3_445); \
|
|
__ret_445; \
|
|
})
|
|
#else
|
|
#define vfmsq_lane_f64(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \
|
|
float64x2_t __ret_446; \
|
|
float64x2_t __s0_446 = __p0_446; \
|
|
float64x2_t __s1_446 = __p1_446; \
|
|
float64x1_t __s2_446 = __p2_446; \
|
|
float64x2_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, __lane_reverse_128_64); \
|
|
float64x2_t __rev1_446; __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, __lane_reverse_128_64); \
|
|
__ret_446 = __noswap_vfmaq_lane_f64(__rev0_446, -__rev1_446, __s2_446, __p3_446); \
|
|
__ret_446 = __builtin_shufflevector(__ret_446, __ret_446, __lane_reverse_128_64); \
|
|
__ret_446; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_lane_f32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \
|
|
float32x4_t __ret_447; \
|
|
float32x4_t __s0_447 = __p0_447; \
|
|
float32x4_t __s1_447 = __p1_447; \
|
|
float32x2_t __s2_447 = __p2_447; \
|
|
__ret_447 = vfmaq_lane_f32(__s0_447, -__s1_447, __s2_447, __p3_447); \
|
|
__ret_447; \
|
|
})
|
|
#else
|
|
#define vfmsq_lane_f32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \
|
|
float32x4_t __ret_448; \
|
|
float32x4_t __s0_448 = __p0_448; \
|
|
float32x4_t __s1_448 = __p1_448; \
|
|
float32x2_t __s2_448 = __p2_448; \
|
|
float32x4_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_448; __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, __lane_reverse_128_32); \
|
|
float32x2_t __rev2_448; __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, __lane_reverse_64_32); \
|
|
__ret_448 = __noswap_vfmaq_lane_f32(__rev0_448, -__rev1_448, __rev2_448, __p3_448); \
|
|
__ret_448 = __builtin_shufflevector(__ret_448, __ret_448, __lane_reverse_128_32); \
|
|
__ret_448; \
|
|
})
|
|
#endif
|
|
|
|
#define vfms_lane_f64(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \
|
|
float64x1_t __ret_449; \
|
|
float64x1_t __s0_449 = __p0_449; \
|
|
float64x1_t __s1_449 = __p1_449; \
|
|
float64x1_t __s2_449 = __p2_449; \
|
|
__ret_449 = vfma_lane_f64(__s0_449, -__s1_449, __s2_449, __p3_449); \
|
|
__ret_449; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_lane_f32(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \
|
|
float32x2_t __ret_450; \
|
|
float32x2_t __s0_450 = __p0_450; \
|
|
float32x2_t __s1_450 = __p1_450; \
|
|
float32x2_t __s2_450 = __p2_450; \
|
|
__ret_450 = vfma_lane_f32(__s0_450, -__s1_450, __s2_450, __p3_450); \
|
|
__ret_450; \
|
|
})
|
|
#else
|
|
#define vfms_lane_f32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \
|
|
float32x2_t __ret_451; \
|
|
float32x2_t __s0_451 = __p0_451; \
|
|
float32x2_t __s1_451 = __p1_451; \
|
|
float32x2_t __s2_451 = __p2_451; \
|
|
float32x2_t __rev0_451; __rev0_451 = __builtin_shufflevector(__s0_451, __s0_451, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_451; __rev1_451 = __builtin_shufflevector(__s1_451, __s1_451, __lane_reverse_64_32); \
|
|
float32x2_t __rev2_451; __rev2_451 = __builtin_shufflevector(__s2_451, __s2_451, __lane_reverse_64_32); \
|
|
__ret_451 = __noswap_vfma_lane_f32(__rev0_451, -__rev1_451, __rev2_451, __p3_451); \
|
|
__ret_451 = __builtin_shufflevector(__ret_451, __ret_451, __lane_reverse_64_32); \
|
|
__ret_451; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsd_laneq_f64(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \
|
|
float64_t __ret_452; \
|
|
float64_t __s0_452 = __p0_452; \
|
|
float64_t __s1_452 = __p1_452; \
|
|
float64x2_t __s2_452 = __p2_452; \
|
|
__ret_452 = vfmad_laneq_f64(__s0_452, -__s1_452, __s2_452, __p3_452); \
|
|
__ret_452; \
|
|
})
|
|
#else
|
|
#define vfmsd_laneq_f64(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \
|
|
float64_t __ret_453; \
|
|
float64_t __s0_453 = __p0_453; \
|
|
float64_t __s1_453 = __p1_453; \
|
|
float64x2_t __s2_453 = __p2_453; \
|
|
float64x2_t __rev2_453; __rev2_453 = __builtin_shufflevector(__s2_453, __s2_453, __lane_reverse_128_64); \
|
|
__ret_453 = __noswap_vfmad_laneq_f64(__s0_453, -__s1_453, __rev2_453, __p3_453); \
|
|
__ret_453; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmss_laneq_f32(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \
|
|
float32_t __ret_454; \
|
|
float32_t __s0_454 = __p0_454; \
|
|
float32_t __s1_454 = __p1_454; \
|
|
float32x4_t __s2_454 = __p2_454; \
|
|
__ret_454 = vfmas_laneq_f32(__s0_454, -__s1_454, __s2_454, __p3_454); \
|
|
__ret_454; \
|
|
})
|
|
#else
|
|
#define vfmss_laneq_f32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \
|
|
float32_t __ret_455; \
|
|
float32_t __s0_455 = __p0_455; \
|
|
float32_t __s1_455 = __p1_455; \
|
|
float32x4_t __s2_455 = __p2_455; \
|
|
float32x4_t __rev2_455; __rev2_455 = __builtin_shufflevector(__s2_455, __s2_455, __lane_reverse_128_32); \
|
|
__ret_455 = __noswap_vfmas_laneq_f32(__s0_455, -__s1_455, __rev2_455, __p3_455); \
|
|
__ret_455; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_laneq_f64(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \
|
|
float64x2_t __ret_456; \
|
|
float64x2_t __s0_456 = __p0_456; \
|
|
float64x2_t __s1_456 = __p1_456; \
|
|
float64x2_t __s2_456 = __p2_456; \
|
|
__ret_456 = vfmaq_laneq_f64(__s0_456, -__s1_456, __s2_456, __p3_456); \
|
|
__ret_456; \
|
|
})
|
|
#else
|
|
#define vfmsq_laneq_f64(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \
|
|
float64x2_t __ret_457; \
|
|
float64x2_t __s0_457 = __p0_457; \
|
|
float64x2_t __s1_457 = __p1_457; \
|
|
float64x2_t __s2_457 = __p2_457; \
|
|
float64x2_t __rev0_457; __rev0_457 = __builtin_shufflevector(__s0_457, __s0_457, __lane_reverse_128_64); \
|
|
float64x2_t __rev1_457; __rev1_457 = __builtin_shufflevector(__s1_457, __s1_457, __lane_reverse_128_64); \
|
|
float64x2_t __rev2_457; __rev2_457 = __builtin_shufflevector(__s2_457, __s2_457, __lane_reverse_128_64); \
|
|
__ret_457 = __noswap_vfmaq_laneq_f64(__rev0_457, -__rev1_457, __rev2_457, __p3_457); \
|
|
__ret_457 = __builtin_shufflevector(__ret_457, __ret_457, __lane_reverse_128_64); \
|
|
__ret_457; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmsq_laneq_f32(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \
|
|
float32x4_t __ret_458; \
|
|
float32x4_t __s0_458 = __p0_458; \
|
|
float32x4_t __s1_458 = __p1_458; \
|
|
float32x4_t __s2_458 = __p2_458; \
|
|
__ret_458 = vfmaq_laneq_f32(__s0_458, -__s1_458, __s2_458, __p3_458); \
|
|
__ret_458; \
|
|
})
|
|
#else
|
|
#define vfmsq_laneq_f32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \
|
|
float32x4_t __ret_459; \
|
|
float32x4_t __s0_459 = __p0_459; \
|
|
float32x4_t __s1_459 = __p1_459; \
|
|
float32x4_t __s2_459 = __p2_459; \
|
|
float32x4_t __rev0_459; __rev0_459 = __builtin_shufflevector(__s0_459, __s0_459, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_459; __rev1_459 = __builtin_shufflevector(__s1_459, __s1_459, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_459; __rev2_459 = __builtin_shufflevector(__s2_459, __s2_459, __lane_reverse_128_32); \
|
|
__ret_459 = __noswap_vfmaq_laneq_f32(__rev0_459, -__rev1_459, __rev2_459, __p3_459); \
|
|
__ret_459 = __builtin_shufflevector(__ret_459, __ret_459, __lane_reverse_128_32); \
|
|
__ret_459; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_laneq_f64(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \
|
|
float64x1_t __ret_460; \
|
|
float64x1_t __s0_460 = __p0_460; \
|
|
float64x1_t __s1_460 = __p1_460; \
|
|
float64x2_t __s2_460 = __p2_460; \
|
|
__ret_460 = vfma_laneq_f64(__s0_460, -__s1_460, __s2_460, __p3_460); \
|
|
__ret_460; \
|
|
})
|
|
#else
|
|
#define vfms_laneq_f64(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \
|
|
float64x1_t __ret_461; \
|
|
float64x1_t __s0_461 = __p0_461; \
|
|
float64x1_t __s1_461 = __p1_461; \
|
|
float64x2_t __s2_461 = __p2_461; \
|
|
float64x2_t __rev2_461; __rev2_461 = __builtin_shufflevector(__s2_461, __s2_461, __lane_reverse_128_64); \
|
|
__ret_461 = __noswap_vfma_laneq_f64(__s0_461, -__s1_461, __rev2_461, __p3_461); \
|
|
__ret_461; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfms_laneq_f32(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \
|
|
float32x2_t __ret_462; \
|
|
float32x2_t __s0_462 = __p0_462; \
|
|
float32x2_t __s1_462 = __p1_462; \
|
|
float32x4_t __s2_462 = __p2_462; \
|
|
__ret_462 = vfma_laneq_f32(__s0_462, -__s1_462, __s2_462, __p3_462); \
|
|
__ret_462; \
|
|
})
|
|
#else
|
|
#define vfms_laneq_f32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \
|
|
float32x2_t __ret_463; \
|
|
float32x2_t __s0_463 = __p0_463; \
|
|
float32x2_t __s1_463 = __p1_463; \
|
|
float32x4_t __s2_463 = __p2_463; \
|
|
float32x2_t __rev0_463; __rev0_463 = __builtin_shufflevector(__s0_463, __s0_463, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_463; __rev1_463 = __builtin_shufflevector(__s1_463, __s1_463, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_463; __rev2_463 = __builtin_shufflevector(__s2_463, __s2_463, __lane_reverse_128_32); \
|
|
__ret_463 = __noswap_vfma_laneq_f32(__rev0_463, -__rev1_463, __rev2_463, __p3_463); \
|
|
__ret_463 = __builtin_shufflevector(__ret_463, __ret_463, __lane_reverse_64_32); \
|
|
__ret_463; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
__ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
__ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) {
|
|
float64x1_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 1);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vget_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64_t, __builtin_neon_vget_lane_i64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64_t, __builtin_neon_vgetq_lane_i64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64_t, __builtin_neon_vgetq_lane_i64(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
|
|
poly64_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(poly64_t, __builtin_neon_vgetq_lane_i64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vgetq_lane_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vgetq_lane_f64(__rev0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x2_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vgetq_lane_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vget_lane_f64(__p0, __p1) __extension__ ({ \
|
|
float64_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vget_lane_f64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p0, 0);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) {
|
|
float64x1_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev0, 0);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vld1_p64(__p0) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vld1_v(__p0, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p64(__p0) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_v(__p0, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p64(__p0) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_v(__p0, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f64(__p0) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_v(__p0, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f64(__p0) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_v(__p0, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_v(__p0, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_v(__p0, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_f64(__p0) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vld1_v(__p0, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_v(__p0, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_v(__p0, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_dup_p64(__p0) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vld1_dup_v(__p0, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_dup_v(__p0, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_dup_v(__p0, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_dup_v(__p0, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_dup_v(__p0, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_dup_v(__p0, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_dup_v(__p0, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_dup_f64(__p0) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vld1_dup_v(__p0, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_dup_v(__p0, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_dup_v(__p0, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 44)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __ret; \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vld1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 44)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 12)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __ret; \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vld1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 12)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_p64_x2(__p0) __extension__ ({ \
|
|
poly64x1x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p64_x2(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p64_x2(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f64_x2(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f64_x2(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_mf8_x2(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_mf8_x2(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld1q_x2_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_f64_x2(__p0) __extension__ ({ \
|
|
float64x1x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_mf8_x2(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_mf8_x2(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld1_x2_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_p64_x3(__p0) __extension__ ({ \
|
|
poly64x1x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p64_x3(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p64_x3(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f64_x3(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f64_x3(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_mf8_x3(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_mf8_x3(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld1q_x3_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_f64_x3(__p0) __extension__ ({ \
|
|
float64x1x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_mf8_x3(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_mf8_x3(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld1_x3_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_p64_x4(__p0) __extension__ ({ \
|
|
poly64x1x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_p64_x4(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_p64_x4(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_f64_x4(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_f64_x4(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1q_mf8_x4(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1q_mf8_x4(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld1q_x4_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld1_f64_x4(__p0) __extension__ ({ \
|
|
float64x1x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld1_mf8_x4(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld1_mf8_x4(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld1_x4_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_p64(__p0) __extension__ ({ \
|
|
poly64x1x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_p64(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_p64(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_u64(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_u64(__p0) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_f64(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_f64(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_s64(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_s64(__p0) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_f64(__p0) __extension__ ({ \
|
|
float64x1x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld2_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_dup_p64(__p0) __extension__ ({ \
|
|
poly64x1x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
__builtin_neon_vld2q_dup_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_dup_f64(__p0) __extension__ ({ \
|
|
float64x1x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
__builtin_neon_vld2_dup_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x2_t __ret; \
|
|
poly64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x2_t __ret; \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
poly8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x2_t __ret; \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
poly64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x2_t __ret; \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
uint8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x2_t __ret; \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
uint64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
int8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x2_t __ret; \
|
|
int8x16x2_t __s1 = __p1; \
|
|
int8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
float64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x2_t __ret; \
|
|
float64x2x2_t __s1 = __p1; \
|
|
float64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
int64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x2_t __ret; \
|
|
int64x2x2_t __s1 = __p1; \
|
|
int64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x2_t __ret; \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
mfloat8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vld2q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x2_t __ret; \
|
|
uint64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 19); \
|
|
__ret; \
|
|
})
|
|
#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x2_t __ret; \
|
|
float64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 10); \
|
|
__ret; \
|
|
})
|
|
#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x2_t __ret; \
|
|
int64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x2_t __ret; \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
mfloat8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vld2_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_p64(__p0) __extension__ ({ \
|
|
poly64x1x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_p64(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_p64(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_u64(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_u64(__p0) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_f64(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_f64(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_s64(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_s64(__p0) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_f64(__p0) __extension__ ({ \
|
|
float64x1x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld3_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_dup_p64(__p0) __extension__ ({ \
|
|
poly64x1x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
__builtin_neon_vld3q_dup_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_dup_f64(__p0) __extension__ ({ \
|
|
float64x1x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
__builtin_neon_vld3_dup_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x3_t __ret; \
|
|
poly64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x3_t __ret; \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
poly8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x3_t __ret; \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
poly64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x3_t __ret; \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
uint8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x3_t __ret; \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
uint64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
int8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x3_t __ret; \
|
|
int8x16x3_t __s1 = __p1; \
|
|
int8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
float64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x3_t __ret; \
|
|
float64x2x3_t __s1 = __p1; \
|
|
float64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
int64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x3_t __ret; \
|
|
int64x2x3_t __s1 = __p1; \
|
|
int64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x3_t __ret; \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
mfloat8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vld3q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x3_t __ret; \
|
|
uint64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 19); \
|
|
__ret; \
|
|
})
|
|
#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x3_t __ret; \
|
|
float64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 10); \
|
|
__ret; \
|
|
})
|
|
#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x3_t __ret; \
|
|
int64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld3_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld3_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x3_t __ret; \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
mfloat8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vld3_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_p64(__p0) __extension__ ({ \
|
|
poly64x1x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_p64(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_p64(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_u64(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_u64(__p0) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_f64(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_f64(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_s64(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_s64(__p0) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_f64(__p0) __extension__ ({ \
|
|
float64x1x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld4_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_dup_p64(__p0) __extension__ ({ \
|
|
poly64x1x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_p64(__p0) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_f64(__p0) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
__builtin_neon_vld4q_dup_v(&__ret, __p0, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_dup_f64(__p0) __extension__ ({ \
|
|
float64x1x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_dup_mf8(__p0) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
__builtin_neon_vld4_dup_v(&__ret, __p0, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x4_t __ret; \
|
|
poly64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 6); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 36); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x4_t __ret; \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
poly8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 36); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 38); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x4_t __ret; \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
poly64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 38); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 48); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x4_t __ret; \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
uint8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 48); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 51); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x4_t __ret; \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
uint64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 51); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
int8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 32); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x4_t __ret; \
|
|
int8x16x4_t __s1 = __p1; \
|
|
int8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 32); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
float64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 42); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x4_t __ret; \
|
|
float64x2x4_t __s1 = __p1; \
|
|
float64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 42); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
int64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 35); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x4_t __ret; \
|
|
int64x2x4_t __s1 = __p1; \
|
|
int64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 35); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_64); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_64); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_64); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 44); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x4_t __ret; \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
mfloat8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vld4q_lane_v(&__ret, __p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 44); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_128_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_128_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_128_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_128_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x4_t __ret; \
|
|
uint64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 19); \
|
|
__ret; \
|
|
})
|
|
#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x4_t __ret; \
|
|
float64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 10); \
|
|
__ret; \
|
|
})
|
|
#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x4_t __ret; \
|
|
int64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 3); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vld4_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 12); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vld4_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x4_t __ret; \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
mfloat8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vld4_lane_v(&__ret, __p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 12); \
|
|
\
|
|
__ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], __lane_reverse_64_8); \
|
|
__ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], __lane_reverse_64_8); \
|
|
__ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], __lane_reverse_64_8); \
|
|
__ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], __lane_reverse_64_8); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vldrq_p128(__p0) __extension__ ({ \
|
|
poly128_t __ret; \
|
|
__ret = __builtin_bit_cast(poly128_t, __builtin_neon_vldrq_p128(__p0)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmax_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vmaxnmvq_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vmaxnmvq_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxnmvq_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxnmvq_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxnmv_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxnmv_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vmaxvq_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vmaxvq_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vmaxvq_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vmaxvq_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vmaxvq_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vmaxvq_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vmaxvq_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vmaxvq_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vmaxvq_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vmaxvq_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxvq_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxvq_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vmaxvq_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vmaxvq_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vmaxvq_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vmaxvq_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vmaxv_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vmaxv_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vmaxv_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vmaxv_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vmaxv_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vmaxv_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vmaxv_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vmaxv_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxv_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmaxv_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vmaxv_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vmaxv_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vmaxv_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vmaxv_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmin_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vminnmvq_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vminnmvq_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminnmvq_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminnmvq_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminnmv_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminnmv_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vminvq_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vminvq_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vminvq_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vminvq_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vminvq_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vminvq_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vminvq_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) {
|
|
int8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vminvq_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vminvq_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vminvq_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminvq_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) {
|
|
float32_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminvq_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vminvq_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) {
|
|
int32_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vminvq_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vminvq_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) {
|
|
int16_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vminvq_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vminv_u8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) {
|
|
uint8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vminv_u8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vminv_u32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) {
|
|
uint32_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vminv_u32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vminv_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) {
|
|
uint16_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vminv_u16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vminv_s8(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) {
|
|
int8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vminv_s8(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminv_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vminv_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vminv_s32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) {
|
|
int32_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vminv_s32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vminv_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) {
|
|
int16_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vminv_s16(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __rev0 + __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 + __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_laneq_u32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \
|
|
uint32x4_t __ret_464; \
|
|
uint32x4_t __s0_464 = __p0_464; \
|
|
uint32x4_t __s1_464 = __p1_464; \
|
|
uint32x4_t __s2_464 = __p2_464; \
|
|
__ret_464 = __s0_464 + __s1_464 * splatq_laneq_u32(__s2_464, __p3_464); \
|
|
__ret_464; \
|
|
})
|
|
#else
|
|
#define vmlaq_laneq_u32(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \
|
|
uint32x4_t __ret_465; \
|
|
uint32x4_t __s0_465 = __p0_465; \
|
|
uint32x4_t __s1_465 = __p1_465; \
|
|
uint32x4_t __s2_465 = __p2_465; \
|
|
uint32x4_t __rev0_465; __rev0_465 = __builtin_shufflevector(__s0_465, __s0_465, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_465; __rev1_465 = __builtin_shufflevector(__s1_465, __s1_465, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2_465; __rev2_465 = __builtin_shufflevector(__s2_465, __s2_465, __lane_reverse_128_32); \
|
|
__ret_465 = __rev0_465 + __rev1_465 * __noswap_splatq_laneq_u32(__rev2_465, __p3_465); \
|
|
__ret_465 = __builtin_shufflevector(__ret_465, __ret_465, __lane_reverse_128_32); \
|
|
__ret_465; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_laneq_u16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \
|
|
uint16x8_t __ret_466; \
|
|
uint16x8_t __s0_466 = __p0_466; \
|
|
uint16x8_t __s1_466 = __p1_466; \
|
|
uint16x8_t __s2_466 = __p2_466; \
|
|
__ret_466 = __s0_466 + __s1_466 * splatq_laneq_u16(__s2_466, __p3_466); \
|
|
__ret_466; \
|
|
})
|
|
#else
|
|
#define vmlaq_laneq_u16(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \
|
|
uint16x8_t __ret_467; \
|
|
uint16x8_t __s0_467 = __p0_467; \
|
|
uint16x8_t __s1_467 = __p1_467; \
|
|
uint16x8_t __s2_467 = __p2_467; \
|
|
uint16x8_t __rev0_467; __rev0_467 = __builtin_shufflevector(__s0_467, __s0_467, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_467; __rev1_467 = __builtin_shufflevector(__s1_467, __s1_467, __lane_reverse_128_16); \
|
|
uint16x8_t __rev2_467; __rev2_467 = __builtin_shufflevector(__s2_467, __s2_467, __lane_reverse_128_16); \
|
|
__ret_467 = __rev0_467 + __rev1_467 * __noswap_splatq_laneq_u16(__rev2_467, __p3_467); \
|
|
__ret_467 = __builtin_shufflevector(__ret_467, __ret_467, __lane_reverse_128_16); \
|
|
__ret_467; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_laneq_f32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \
|
|
float32x4_t __ret_468; \
|
|
float32x4_t __s0_468 = __p0_468; \
|
|
float32x4_t __s1_468 = __p1_468; \
|
|
float32x4_t __s2_468 = __p2_468; \
|
|
__ret_468 = __s0_468 + __s1_468 * splatq_laneq_f32(__s2_468, __p3_468); \
|
|
__ret_468; \
|
|
})
|
|
#else
|
|
#define vmlaq_laneq_f32(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \
|
|
float32x4_t __ret_469; \
|
|
float32x4_t __s0_469 = __p0_469; \
|
|
float32x4_t __s1_469 = __p1_469; \
|
|
float32x4_t __s2_469 = __p2_469; \
|
|
float32x4_t __rev0_469; __rev0_469 = __builtin_shufflevector(__s0_469, __s0_469, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_469; __rev1_469 = __builtin_shufflevector(__s1_469, __s1_469, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_469; __rev2_469 = __builtin_shufflevector(__s2_469, __s2_469, __lane_reverse_128_32); \
|
|
__ret_469 = __rev0_469 + __rev1_469 * __noswap_splatq_laneq_f32(__rev2_469, __p3_469); \
|
|
__ret_469 = __builtin_shufflevector(__ret_469, __ret_469, __lane_reverse_128_32); \
|
|
__ret_469; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_laneq_s32(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \
|
|
int32x4_t __ret_470; \
|
|
int32x4_t __s0_470 = __p0_470; \
|
|
int32x4_t __s1_470 = __p1_470; \
|
|
int32x4_t __s2_470 = __p2_470; \
|
|
__ret_470 = __s0_470 + __s1_470 * splatq_laneq_s32(__s2_470, __p3_470); \
|
|
__ret_470; \
|
|
})
|
|
#else
|
|
#define vmlaq_laneq_s32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \
|
|
int32x4_t __ret_471; \
|
|
int32x4_t __s0_471 = __p0_471; \
|
|
int32x4_t __s1_471 = __p1_471; \
|
|
int32x4_t __s2_471 = __p2_471; \
|
|
int32x4_t __rev0_471; __rev0_471 = __builtin_shufflevector(__s0_471, __s0_471, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_471; __rev1_471 = __builtin_shufflevector(__s1_471, __s1_471, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_471; __rev2_471 = __builtin_shufflevector(__s2_471, __s2_471, __lane_reverse_128_32); \
|
|
__ret_471 = __rev0_471 + __rev1_471 * __noswap_splatq_laneq_s32(__rev2_471, __p3_471); \
|
|
__ret_471 = __builtin_shufflevector(__ret_471, __ret_471, __lane_reverse_128_32); \
|
|
__ret_471; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlaq_laneq_s16(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \
|
|
int16x8_t __ret_472; \
|
|
int16x8_t __s0_472 = __p0_472; \
|
|
int16x8_t __s1_472 = __p1_472; \
|
|
int16x8_t __s2_472 = __p2_472; \
|
|
__ret_472 = __s0_472 + __s1_472 * splatq_laneq_s16(__s2_472, __p3_472); \
|
|
__ret_472; \
|
|
})
|
|
#else
|
|
#define vmlaq_laneq_s16(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \
|
|
int16x8_t __ret_473; \
|
|
int16x8_t __s0_473 = __p0_473; \
|
|
int16x8_t __s1_473 = __p1_473; \
|
|
int16x8_t __s2_473 = __p2_473; \
|
|
int16x8_t __rev0_473; __rev0_473 = __builtin_shufflevector(__s0_473, __s0_473, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_473; __rev1_473 = __builtin_shufflevector(__s1_473, __s1_473, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_473; __rev2_473 = __builtin_shufflevector(__s2_473, __s2_473, __lane_reverse_128_16); \
|
|
__ret_473 = __rev0_473 + __rev1_473 * __noswap_splatq_laneq_s16(__rev2_473, __p3_473); \
|
|
__ret_473 = __builtin_shufflevector(__ret_473, __ret_473, __lane_reverse_128_16); \
|
|
__ret_473; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_laneq_u32(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \
|
|
uint32x2_t __ret_474; \
|
|
uint32x2_t __s0_474 = __p0_474; \
|
|
uint32x2_t __s1_474 = __p1_474; \
|
|
uint32x4_t __s2_474 = __p2_474; \
|
|
__ret_474 = __s0_474 + __s1_474 * splat_laneq_u32(__s2_474, __p3_474); \
|
|
__ret_474; \
|
|
})
|
|
#else
|
|
#define vmla_laneq_u32(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \
|
|
uint32x2_t __ret_475; \
|
|
uint32x2_t __s0_475 = __p0_475; \
|
|
uint32x2_t __s1_475 = __p1_475; \
|
|
uint32x4_t __s2_475 = __p2_475; \
|
|
uint32x2_t __rev0_475; __rev0_475 = __builtin_shufflevector(__s0_475, __s0_475, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_475; __rev1_475 = __builtin_shufflevector(__s1_475, __s1_475, __lane_reverse_64_32); \
|
|
uint32x4_t __rev2_475; __rev2_475 = __builtin_shufflevector(__s2_475, __s2_475, __lane_reverse_128_32); \
|
|
__ret_475 = __rev0_475 + __rev1_475 * __noswap_splat_laneq_u32(__rev2_475, __p3_475); \
|
|
__ret_475 = __builtin_shufflevector(__ret_475, __ret_475, __lane_reverse_64_32); \
|
|
__ret_475; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_laneq_u16(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \
|
|
uint16x4_t __ret_476; \
|
|
uint16x4_t __s0_476 = __p0_476; \
|
|
uint16x4_t __s1_476 = __p1_476; \
|
|
uint16x8_t __s2_476 = __p2_476; \
|
|
__ret_476 = __s0_476 + __s1_476 * splat_laneq_u16(__s2_476, __p3_476); \
|
|
__ret_476; \
|
|
})
|
|
#else
|
|
#define vmla_laneq_u16(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \
|
|
uint16x4_t __ret_477; \
|
|
uint16x4_t __s0_477 = __p0_477; \
|
|
uint16x4_t __s1_477 = __p1_477; \
|
|
uint16x8_t __s2_477 = __p2_477; \
|
|
uint16x4_t __rev0_477; __rev0_477 = __builtin_shufflevector(__s0_477, __s0_477, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_477; __rev1_477 = __builtin_shufflevector(__s1_477, __s1_477, __lane_reverse_64_16); \
|
|
uint16x8_t __rev2_477; __rev2_477 = __builtin_shufflevector(__s2_477, __s2_477, __lane_reverse_128_16); \
|
|
__ret_477 = __rev0_477 + __rev1_477 * __noswap_splat_laneq_u16(__rev2_477, __p3_477); \
|
|
__ret_477 = __builtin_shufflevector(__ret_477, __ret_477, __lane_reverse_64_16); \
|
|
__ret_477; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_laneq_f32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \
|
|
float32x2_t __ret_478; \
|
|
float32x2_t __s0_478 = __p0_478; \
|
|
float32x2_t __s1_478 = __p1_478; \
|
|
float32x4_t __s2_478 = __p2_478; \
|
|
__ret_478 = __s0_478 + __s1_478 * splat_laneq_f32(__s2_478, __p3_478); \
|
|
__ret_478; \
|
|
})
|
|
#else
|
|
#define vmla_laneq_f32(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \
|
|
float32x2_t __ret_479; \
|
|
float32x2_t __s0_479 = __p0_479; \
|
|
float32x2_t __s1_479 = __p1_479; \
|
|
float32x4_t __s2_479 = __p2_479; \
|
|
float32x2_t __rev0_479; __rev0_479 = __builtin_shufflevector(__s0_479, __s0_479, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_479; __rev1_479 = __builtin_shufflevector(__s1_479, __s1_479, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_479; __rev2_479 = __builtin_shufflevector(__s2_479, __s2_479, __lane_reverse_128_32); \
|
|
__ret_479 = __rev0_479 + __rev1_479 * __noswap_splat_laneq_f32(__rev2_479, __p3_479); \
|
|
__ret_479 = __builtin_shufflevector(__ret_479, __ret_479, __lane_reverse_64_32); \
|
|
__ret_479; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_laneq_s32(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \
|
|
int32x2_t __ret_480; \
|
|
int32x2_t __s0_480 = __p0_480; \
|
|
int32x2_t __s1_480 = __p1_480; \
|
|
int32x4_t __s2_480 = __p2_480; \
|
|
__ret_480 = __s0_480 + __s1_480 * splat_laneq_s32(__s2_480, __p3_480); \
|
|
__ret_480; \
|
|
})
|
|
#else
|
|
#define vmla_laneq_s32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \
|
|
int32x2_t __ret_481; \
|
|
int32x2_t __s0_481 = __p0_481; \
|
|
int32x2_t __s1_481 = __p1_481; \
|
|
int32x4_t __s2_481 = __p2_481; \
|
|
int32x2_t __rev0_481; __rev0_481 = __builtin_shufflevector(__s0_481, __s0_481, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_481; __rev1_481 = __builtin_shufflevector(__s1_481, __s1_481, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_481; __rev2_481 = __builtin_shufflevector(__s2_481, __s2_481, __lane_reverse_128_32); \
|
|
__ret_481 = __rev0_481 + __rev1_481 * __noswap_splat_laneq_s32(__rev2_481, __p3_481); \
|
|
__ret_481 = __builtin_shufflevector(__ret_481, __ret_481, __lane_reverse_64_32); \
|
|
__ret_481; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmla_laneq_s16(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \
|
|
int16x4_t __ret_482; \
|
|
int16x4_t __s0_482 = __p0_482; \
|
|
int16x4_t __s1_482 = __p1_482; \
|
|
int16x8_t __s2_482 = __p2_482; \
|
|
__ret_482 = __s0_482 + __s1_482 * splat_laneq_s16(__s2_482, __p3_482); \
|
|
__ret_482; \
|
|
})
|
|
#else
|
|
#define vmla_laneq_s16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \
|
|
int16x4_t __ret_483; \
|
|
int16x4_t __s0_483 = __p0_483; \
|
|
int16x4_t __s1_483 = __p1_483; \
|
|
int16x8_t __s2_483 = __p2_483; \
|
|
int16x4_t __rev0_483; __rev0_483 = __builtin_shufflevector(__s0_483, __s0_483, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_483; __rev1_483 = __builtin_shufflevector(__s1_483, __s1_483, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_483; __rev2_483 = __builtin_shufflevector(__s2_483, __s2_483, __lane_reverse_128_16); \
|
|
__ret_483 = __rev0_483 + __rev1_483 * __noswap_splat_laneq_s16(__rev2_483, __p3_483); \
|
|
__ret_483 = __builtin_shufflevector(__ret_483, __ret_483, __lane_reverse_64_16); \
|
|
__ret_483; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_lane_u32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \
|
|
uint64x2_t __ret_484; \
|
|
uint64x2_t __s0_484 = __p0_484; \
|
|
uint32x4_t __s1_484 = __p1_484; \
|
|
uint32x2_t __s2_484 = __p2_484; \
|
|
__ret_484 = __s0_484 + vmull_u32(vget_high_u32(__s1_484), splat_lane_u32(__s2_484, __p3_484)); \
|
|
__ret_484; \
|
|
})
|
|
#else
|
|
#define vmlal_high_lane_u32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \
|
|
uint64x2_t __ret_485; \
|
|
uint64x2_t __s0_485 = __p0_485; \
|
|
uint32x4_t __s1_485 = __p1_485; \
|
|
uint32x2_t __s2_485 = __p2_485; \
|
|
uint64x2_t __rev0_485; __rev0_485 = __builtin_shufflevector(__s0_485, __s0_485, __lane_reverse_128_64); \
|
|
uint32x4_t __rev1_485; __rev1_485 = __builtin_shufflevector(__s1_485, __s1_485, __lane_reverse_128_32); \
|
|
uint32x2_t __rev2_485; __rev2_485 = __builtin_shufflevector(__s2_485, __s2_485, __lane_reverse_64_32); \
|
|
__ret_485 = __rev0_485 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_485), __noswap_splat_lane_u32(__rev2_485, __p3_485)); \
|
|
__ret_485 = __builtin_shufflevector(__ret_485, __ret_485, __lane_reverse_128_64); \
|
|
__ret_485; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_lane_u16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \
|
|
uint32x4_t __ret_486; \
|
|
uint32x4_t __s0_486 = __p0_486; \
|
|
uint16x8_t __s1_486 = __p1_486; \
|
|
uint16x4_t __s2_486 = __p2_486; \
|
|
__ret_486 = __s0_486 + vmull_u16(vget_high_u16(__s1_486), splat_lane_u16(__s2_486, __p3_486)); \
|
|
__ret_486; \
|
|
})
|
|
#else
|
|
#define vmlal_high_lane_u16(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \
|
|
uint32x4_t __ret_487; \
|
|
uint32x4_t __s0_487 = __p0_487; \
|
|
uint16x8_t __s1_487 = __p1_487; \
|
|
uint16x4_t __s2_487 = __p2_487; \
|
|
uint32x4_t __rev0_487; __rev0_487 = __builtin_shufflevector(__s0_487, __s0_487, __lane_reverse_128_32); \
|
|
uint16x8_t __rev1_487; __rev1_487 = __builtin_shufflevector(__s1_487, __s1_487, __lane_reverse_128_16); \
|
|
uint16x4_t __rev2_487; __rev2_487 = __builtin_shufflevector(__s2_487, __s2_487, __lane_reverse_64_16); \
|
|
__ret_487 = __rev0_487 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_487), __noswap_splat_lane_u16(__rev2_487, __p3_487)); \
|
|
__ret_487 = __builtin_shufflevector(__ret_487, __ret_487, __lane_reverse_128_32); \
|
|
__ret_487; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_lane_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \
|
|
int64x2_t __ret_488; \
|
|
int64x2_t __s0_488 = __p0_488; \
|
|
int32x4_t __s1_488 = __p1_488; \
|
|
int32x2_t __s2_488 = __p2_488; \
|
|
__ret_488 = __s0_488 + vmull_s32(vget_high_s32(__s1_488), splat_lane_s32(__s2_488, __p3_488)); \
|
|
__ret_488; \
|
|
})
|
|
#else
|
|
#define vmlal_high_lane_s32(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \
|
|
int64x2_t __ret_489; \
|
|
int64x2_t __s0_489 = __p0_489; \
|
|
int32x4_t __s1_489 = __p1_489; \
|
|
int32x2_t __s2_489 = __p2_489; \
|
|
int64x2_t __rev0_489; __rev0_489 = __builtin_shufflevector(__s0_489, __s0_489, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_489; __rev1_489 = __builtin_shufflevector(__s1_489, __s1_489, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_489; __rev2_489 = __builtin_shufflevector(__s2_489, __s2_489, __lane_reverse_64_32); \
|
|
__ret_489 = __rev0_489 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_489), __noswap_splat_lane_s32(__rev2_489, __p3_489)); \
|
|
__ret_489 = __builtin_shufflevector(__ret_489, __ret_489, __lane_reverse_128_64); \
|
|
__ret_489; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_lane_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \
|
|
int32x4_t __ret_490; \
|
|
int32x4_t __s0_490 = __p0_490; \
|
|
int16x8_t __s1_490 = __p1_490; \
|
|
int16x4_t __s2_490 = __p2_490; \
|
|
__ret_490 = __s0_490 + vmull_s16(vget_high_s16(__s1_490), splat_lane_s16(__s2_490, __p3_490)); \
|
|
__ret_490; \
|
|
})
|
|
#else
|
|
#define vmlal_high_lane_s16(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \
|
|
int32x4_t __ret_491; \
|
|
int32x4_t __s0_491 = __p0_491; \
|
|
int16x8_t __s1_491 = __p1_491; \
|
|
int16x4_t __s2_491 = __p2_491; \
|
|
int32x4_t __rev0_491; __rev0_491 = __builtin_shufflevector(__s0_491, __s0_491, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_491; __rev1_491 = __builtin_shufflevector(__s1_491, __s1_491, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_491; __rev2_491 = __builtin_shufflevector(__s2_491, __s2_491, __lane_reverse_64_16); \
|
|
__ret_491 = __rev0_491 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_491), __noswap_splat_lane_s16(__rev2_491, __p3_491)); \
|
|
__ret_491 = __builtin_shufflevector(__ret_491, __ret_491, __lane_reverse_128_32); \
|
|
__ret_491; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_laneq_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \
|
|
uint64x2_t __ret_492; \
|
|
uint64x2_t __s0_492 = __p0_492; \
|
|
uint32x4_t __s1_492 = __p1_492; \
|
|
uint32x4_t __s2_492 = __p2_492; \
|
|
__ret_492 = __s0_492 + vmull_u32(vget_high_u32(__s1_492), splat_laneq_u32(__s2_492, __p3_492)); \
|
|
__ret_492; \
|
|
})
|
|
#else
|
|
#define vmlal_high_laneq_u32(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
|
|
uint64x2_t __ret_493; \
|
|
uint64x2_t __s0_493 = __p0_493; \
|
|
uint32x4_t __s1_493 = __p1_493; \
|
|
uint32x4_t __s2_493 = __p2_493; \
|
|
uint64x2_t __rev0_493; __rev0_493 = __builtin_shufflevector(__s0_493, __s0_493, __lane_reverse_128_64); \
|
|
uint32x4_t __rev1_493; __rev1_493 = __builtin_shufflevector(__s1_493, __s1_493, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2_493; __rev2_493 = __builtin_shufflevector(__s2_493, __s2_493, __lane_reverse_128_32); \
|
|
__ret_493 = __rev0_493 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_493), __noswap_splat_laneq_u32(__rev2_493, __p3_493)); \
|
|
__ret_493 = __builtin_shufflevector(__ret_493, __ret_493, __lane_reverse_128_64); \
|
|
__ret_493; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_laneq_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
|
|
uint32x4_t __ret_494; \
|
|
uint32x4_t __s0_494 = __p0_494; \
|
|
uint16x8_t __s1_494 = __p1_494; \
|
|
uint16x8_t __s2_494 = __p2_494; \
|
|
__ret_494 = __s0_494 + vmull_u16(vget_high_u16(__s1_494), splat_laneq_u16(__s2_494, __p3_494)); \
|
|
__ret_494; \
|
|
})
|
|
#else
|
|
#define vmlal_high_laneq_u16(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
|
|
uint32x4_t __ret_495; \
|
|
uint32x4_t __s0_495 = __p0_495; \
|
|
uint16x8_t __s1_495 = __p1_495; \
|
|
uint16x8_t __s2_495 = __p2_495; \
|
|
uint32x4_t __rev0_495; __rev0_495 = __builtin_shufflevector(__s0_495, __s0_495, __lane_reverse_128_32); \
|
|
uint16x8_t __rev1_495; __rev1_495 = __builtin_shufflevector(__s1_495, __s1_495, __lane_reverse_128_16); \
|
|
uint16x8_t __rev2_495; __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, __lane_reverse_128_16); \
|
|
__ret_495 = __rev0_495 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_495), __noswap_splat_laneq_u16(__rev2_495, __p3_495)); \
|
|
__ret_495 = __builtin_shufflevector(__ret_495, __ret_495, __lane_reverse_128_32); \
|
|
__ret_495; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_laneq_s32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
|
|
int64x2_t __ret_496; \
|
|
int64x2_t __s0_496 = __p0_496; \
|
|
int32x4_t __s1_496 = __p1_496; \
|
|
int32x4_t __s2_496 = __p2_496; \
|
|
__ret_496 = __s0_496 + vmull_s32(vget_high_s32(__s1_496), splat_laneq_s32(__s2_496, __p3_496)); \
|
|
__ret_496; \
|
|
})
|
|
#else
|
|
#define vmlal_high_laneq_s32(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
|
|
int64x2_t __ret_497; \
|
|
int64x2_t __s0_497 = __p0_497; \
|
|
int32x4_t __s1_497 = __p1_497; \
|
|
int32x4_t __s2_497 = __p2_497; \
|
|
int64x2_t __rev0_497; __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_497; __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_497; __rev2_497 = __builtin_shufflevector(__s2_497, __s2_497, __lane_reverse_128_32); \
|
|
__ret_497 = __rev0_497 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_497), __noswap_splat_laneq_s32(__rev2_497, __p3_497)); \
|
|
__ret_497 = __builtin_shufflevector(__ret_497, __ret_497, __lane_reverse_128_64); \
|
|
__ret_497; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_high_laneq_s16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
|
|
int32x4_t __ret_498; \
|
|
int32x4_t __s0_498 = __p0_498; \
|
|
int16x8_t __s1_498 = __p1_498; \
|
|
int16x8_t __s2_498 = __p2_498; \
|
|
__ret_498 = __s0_498 + vmull_s16(vget_high_s16(__s1_498), splat_laneq_s16(__s2_498, __p3_498)); \
|
|
__ret_498; \
|
|
})
|
|
#else
|
|
#define vmlal_high_laneq_s16(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
|
|
int32x4_t __ret_499; \
|
|
int32x4_t __s0_499 = __p0_499; \
|
|
int16x8_t __s1_499 = __p1_499; \
|
|
int16x8_t __s2_499 = __p2_499; \
|
|
int32x4_t __rev0_499; __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_499; __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_499; __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, __lane_reverse_128_16); \
|
|
__ret_499 = __rev0_499 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_499), __noswap_splat_laneq_s16(__rev2_499, __p3_499)); \
|
|
__ret_499 = __builtin_shufflevector(__ret_499, __ret_499, __lane_reverse_128_32); \
|
|
__ret_499; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_laneq_u32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
|
|
uint64x2_t __ret_500; \
|
|
uint64x2_t __s0_500 = __p0_500; \
|
|
uint32x2_t __s1_500 = __p1_500; \
|
|
uint32x4_t __s2_500 = __p2_500; \
|
|
__ret_500 = __s0_500 + vmull_u32(__s1_500, splat_laneq_u32(__s2_500, __p3_500)); \
|
|
__ret_500; \
|
|
})
|
|
#else
|
|
#define vmlal_laneq_u32(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
|
|
uint64x2_t __ret_501; \
|
|
uint64x2_t __s0_501 = __p0_501; \
|
|
uint32x2_t __s1_501 = __p1_501; \
|
|
uint32x4_t __s2_501 = __p2_501; \
|
|
uint64x2_t __rev0_501; __rev0_501 = __builtin_shufflevector(__s0_501, __s0_501, __lane_reverse_128_64); \
|
|
uint32x2_t __rev1_501; __rev1_501 = __builtin_shufflevector(__s1_501, __s1_501, __lane_reverse_64_32); \
|
|
uint32x4_t __rev2_501; __rev2_501 = __builtin_shufflevector(__s2_501, __s2_501, __lane_reverse_128_32); \
|
|
__ret_501 = __rev0_501 + __noswap_vmull_u32(__rev1_501, __noswap_splat_laneq_u32(__rev2_501, __p3_501)); \
|
|
__ret_501 = __builtin_shufflevector(__ret_501, __ret_501, __lane_reverse_128_64); \
|
|
__ret_501; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_laneq_u16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
|
|
uint32x4_t __ret_502; \
|
|
uint32x4_t __s0_502 = __p0_502; \
|
|
uint16x4_t __s1_502 = __p1_502; \
|
|
uint16x8_t __s2_502 = __p2_502; \
|
|
__ret_502 = __s0_502 + vmull_u16(__s1_502, splat_laneq_u16(__s2_502, __p3_502)); \
|
|
__ret_502; \
|
|
})
|
|
#else
|
|
#define vmlal_laneq_u16(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
|
|
uint32x4_t __ret_503; \
|
|
uint32x4_t __s0_503 = __p0_503; \
|
|
uint16x4_t __s1_503 = __p1_503; \
|
|
uint16x8_t __s2_503 = __p2_503; \
|
|
uint32x4_t __rev0_503; __rev0_503 = __builtin_shufflevector(__s0_503, __s0_503, __lane_reverse_128_32); \
|
|
uint16x4_t __rev1_503; __rev1_503 = __builtin_shufflevector(__s1_503, __s1_503, __lane_reverse_64_16); \
|
|
uint16x8_t __rev2_503; __rev2_503 = __builtin_shufflevector(__s2_503, __s2_503, __lane_reverse_128_16); \
|
|
__ret_503 = __rev0_503 + __noswap_vmull_u16(__rev1_503, __noswap_splat_laneq_u16(__rev2_503, __p3_503)); \
|
|
__ret_503 = __builtin_shufflevector(__ret_503, __ret_503, __lane_reverse_128_32); \
|
|
__ret_503; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_laneq_s32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
|
|
int64x2_t __ret_504; \
|
|
int64x2_t __s0_504 = __p0_504; \
|
|
int32x2_t __s1_504 = __p1_504; \
|
|
int32x4_t __s2_504 = __p2_504; \
|
|
__ret_504 = __s0_504 + vmull_s32(__s1_504, splat_laneq_s32(__s2_504, __p3_504)); \
|
|
__ret_504; \
|
|
})
|
|
#else
|
|
#define vmlal_laneq_s32(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
|
|
int64x2_t __ret_505; \
|
|
int64x2_t __s0_505 = __p0_505; \
|
|
int32x2_t __s1_505 = __p1_505; \
|
|
int32x4_t __s2_505 = __p2_505; \
|
|
int64x2_t __rev0_505; __rev0_505 = __builtin_shufflevector(__s0_505, __s0_505, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_505; __rev1_505 = __builtin_shufflevector(__s1_505, __s1_505, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_505; __rev2_505 = __builtin_shufflevector(__s2_505, __s2_505, __lane_reverse_128_32); \
|
|
__ret_505 = __rev0_505 + __noswap_vmull_s32(__rev1_505, __noswap_splat_laneq_s32(__rev2_505, __p3_505)); \
|
|
__ret_505 = __builtin_shufflevector(__ret_505, __ret_505, __lane_reverse_128_64); \
|
|
__ret_505; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_laneq_s16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
|
|
int32x4_t __ret_506; \
|
|
int32x4_t __s0_506 = __p0_506; \
|
|
int16x4_t __s1_506 = __p1_506; \
|
|
int16x8_t __s2_506 = __p2_506; \
|
|
__ret_506 = __s0_506 + vmull_s16(__s1_506, splat_laneq_s16(__s2_506, __p3_506)); \
|
|
__ret_506; \
|
|
})
|
|
#else
|
|
#define vmlal_laneq_s16(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
|
|
int32x4_t __ret_507; \
|
|
int32x4_t __s0_507 = __p0_507; \
|
|
int16x4_t __s1_507 = __p1_507; \
|
|
int16x8_t __s2_507 = __p2_507; \
|
|
int32x4_t __rev0_507; __rev0_507 = __builtin_shufflevector(__s0_507, __s0_507, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_507; __rev1_507 = __builtin_shufflevector(__s1_507, __s1_507, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_507; __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, __lane_reverse_128_16); \
|
|
__ret_507 = __rev0_507 + __noswap_vmull_s16(__rev1_507, __noswap_splat_laneq_s16(__rev2_507, __p3_507)); \
|
|
__ret_507 = __builtin_shufflevector(__ret_507, __ret_507, __lane_reverse_128_32); \
|
|
__ret_507; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __rev0 - __rev1 * __rev2;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 - __p1 * __p2;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_laneq_u32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
|
|
uint32x4_t __ret_508; \
|
|
uint32x4_t __s0_508 = __p0_508; \
|
|
uint32x4_t __s1_508 = __p1_508; \
|
|
uint32x4_t __s2_508 = __p2_508; \
|
|
__ret_508 = __s0_508 - __s1_508 * splatq_laneq_u32(__s2_508, __p3_508); \
|
|
__ret_508; \
|
|
})
|
|
#else
|
|
#define vmlsq_laneq_u32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
|
|
uint32x4_t __ret_509; \
|
|
uint32x4_t __s0_509 = __p0_509; \
|
|
uint32x4_t __s1_509 = __p1_509; \
|
|
uint32x4_t __s2_509 = __p2_509; \
|
|
uint32x4_t __rev0_509; __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_509; __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2_509; __rev2_509 = __builtin_shufflevector(__s2_509, __s2_509, __lane_reverse_128_32); \
|
|
__ret_509 = __rev0_509 - __rev1_509 * __noswap_splatq_laneq_u32(__rev2_509, __p3_509); \
|
|
__ret_509 = __builtin_shufflevector(__ret_509, __ret_509, __lane_reverse_128_32); \
|
|
__ret_509; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_laneq_u16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
|
|
uint16x8_t __ret_510; \
|
|
uint16x8_t __s0_510 = __p0_510; \
|
|
uint16x8_t __s1_510 = __p1_510; \
|
|
uint16x8_t __s2_510 = __p2_510; \
|
|
__ret_510 = __s0_510 - __s1_510 * splatq_laneq_u16(__s2_510, __p3_510); \
|
|
__ret_510; \
|
|
})
|
|
#else
|
|
#define vmlsq_laneq_u16(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \
|
|
uint16x8_t __ret_511; \
|
|
uint16x8_t __s0_511 = __p0_511; \
|
|
uint16x8_t __s1_511 = __p1_511; \
|
|
uint16x8_t __s2_511 = __p2_511; \
|
|
uint16x8_t __rev0_511; __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_511; __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, __lane_reverse_128_16); \
|
|
uint16x8_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, __lane_reverse_128_16); \
|
|
__ret_511 = __rev0_511 - __rev1_511 * __noswap_splatq_laneq_u16(__rev2_511, __p3_511); \
|
|
__ret_511 = __builtin_shufflevector(__ret_511, __ret_511, __lane_reverse_128_16); \
|
|
__ret_511; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_laneq_f32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \
|
|
float32x4_t __ret_512; \
|
|
float32x4_t __s0_512 = __p0_512; \
|
|
float32x4_t __s1_512 = __p1_512; \
|
|
float32x4_t __s2_512 = __p2_512; \
|
|
__ret_512 = __s0_512 - __s1_512 * splatq_laneq_f32(__s2_512, __p3_512); \
|
|
__ret_512; \
|
|
})
|
|
#else
|
|
#define vmlsq_laneq_f32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \
|
|
float32x4_t __ret_513; \
|
|
float32x4_t __s0_513 = __p0_513; \
|
|
float32x4_t __s1_513 = __p1_513; \
|
|
float32x4_t __s2_513 = __p2_513; \
|
|
float32x4_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, __lane_reverse_128_32); \
|
|
float32x4_t __rev2_513; __rev2_513 = __builtin_shufflevector(__s2_513, __s2_513, __lane_reverse_128_32); \
|
|
__ret_513 = __rev0_513 - __rev1_513 * __noswap_splatq_laneq_f32(__rev2_513, __p3_513); \
|
|
__ret_513 = __builtin_shufflevector(__ret_513, __ret_513, __lane_reverse_128_32); \
|
|
__ret_513; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_laneq_s32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \
|
|
int32x4_t __ret_514; \
|
|
int32x4_t __s0_514 = __p0_514; \
|
|
int32x4_t __s1_514 = __p1_514; \
|
|
int32x4_t __s2_514 = __p2_514; \
|
|
__ret_514 = __s0_514 - __s1_514 * splatq_laneq_s32(__s2_514, __p3_514); \
|
|
__ret_514; \
|
|
})
|
|
#else
|
|
#define vmlsq_laneq_s32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \
|
|
int32x4_t __ret_515; \
|
|
int32x4_t __s0_515 = __p0_515; \
|
|
int32x4_t __s1_515 = __p1_515; \
|
|
int32x4_t __s2_515 = __p2_515; \
|
|
int32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, __lane_reverse_128_32); \
|
|
__ret_515 = __rev0_515 - __rev1_515 * __noswap_splatq_laneq_s32(__rev2_515, __p3_515); \
|
|
__ret_515 = __builtin_shufflevector(__ret_515, __ret_515, __lane_reverse_128_32); \
|
|
__ret_515; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsq_laneq_s16(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \
|
|
int16x8_t __ret_516; \
|
|
int16x8_t __s0_516 = __p0_516; \
|
|
int16x8_t __s1_516 = __p1_516; \
|
|
int16x8_t __s2_516 = __p2_516; \
|
|
__ret_516 = __s0_516 - __s1_516 * splatq_laneq_s16(__s2_516, __p3_516); \
|
|
__ret_516; \
|
|
})
|
|
#else
|
|
#define vmlsq_laneq_s16(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \
|
|
int16x8_t __ret_517; \
|
|
int16x8_t __s0_517 = __p0_517; \
|
|
int16x8_t __s1_517 = __p1_517; \
|
|
int16x8_t __s2_517 = __p2_517; \
|
|
int16x8_t __rev0_517; __rev0_517 = __builtin_shufflevector(__s0_517, __s0_517, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_517; __rev1_517 = __builtin_shufflevector(__s1_517, __s1_517, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_517; __rev2_517 = __builtin_shufflevector(__s2_517, __s2_517, __lane_reverse_128_16); \
|
|
__ret_517 = __rev0_517 - __rev1_517 * __noswap_splatq_laneq_s16(__rev2_517, __p3_517); \
|
|
__ret_517 = __builtin_shufflevector(__ret_517, __ret_517, __lane_reverse_128_16); \
|
|
__ret_517; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_laneq_u32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \
|
|
uint32x2_t __ret_518; \
|
|
uint32x2_t __s0_518 = __p0_518; \
|
|
uint32x2_t __s1_518 = __p1_518; \
|
|
uint32x4_t __s2_518 = __p2_518; \
|
|
__ret_518 = __s0_518 - __s1_518 * splat_laneq_u32(__s2_518, __p3_518); \
|
|
__ret_518; \
|
|
})
|
|
#else
|
|
#define vmls_laneq_u32(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \
|
|
uint32x2_t __ret_519; \
|
|
uint32x2_t __s0_519 = __p0_519; \
|
|
uint32x2_t __s1_519 = __p1_519; \
|
|
uint32x4_t __s2_519 = __p2_519; \
|
|
uint32x2_t __rev0_519; __rev0_519 = __builtin_shufflevector(__s0_519, __s0_519, __lane_reverse_64_32); \
|
|
uint32x2_t __rev1_519; __rev1_519 = __builtin_shufflevector(__s1_519, __s1_519, __lane_reverse_64_32); \
|
|
uint32x4_t __rev2_519; __rev2_519 = __builtin_shufflevector(__s2_519, __s2_519, __lane_reverse_128_32); \
|
|
__ret_519 = __rev0_519 - __rev1_519 * __noswap_splat_laneq_u32(__rev2_519, __p3_519); \
|
|
__ret_519 = __builtin_shufflevector(__ret_519, __ret_519, __lane_reverse_64_32); \
|
|
__ret_519; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_laneq_u16(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \
|
|
uint16x4_t __ret_520; \
|
|
uint16x4_t __s0_520 = __p0_520; \
|
|
uint16x4_t __s1_520 = __p1_520; \
|
|
uint16x8_t __s2_520 = __p2_520; \
|
|
__ret_520 = __s0_520 - __s1_520 * splat_laneq_u16(__s2_520, __p3_520); \
|
|
__ret_520; \
|
|
})
|
|
#else
|
|
#define vmls_laneq_u16(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \
|
|
uint16x4_t __ret_521; \
|
|
uint16x4_t __s0_521 = __p0_521; \
|
|
uint16x4_t __s1_521 = __p1_521; \
|
|
uint16x8_t __s2_521 = __p2_521; \
|
|
uint16x4_t __rev0_521; __rev0_521 = __builtin_shufflevector(__s0_521, __s0_521, __lane_reverse_64_16); \
|
|
uint16x4_t __rev1_521; __rev1_521 = __builtin_shufflevector(__s1_521, __s1_521, __lane_reverse_64_16); \
|
|
uint16x8_t __rev2_521; __rev2_521 = __builtin_shufflevector(__s2_521, __s2_521, __lane_reverse_128_16); \
|
|
__ret_521 = __rev0_521 - __rev1_521 * __noswap_splat_laneq_u16(__rev2_521, __p3_521); \
|
|
__ret_521 = __builtin_shufflevector(__ret_521, __ret_521, __lane_reverse_64_16); \
|
|
__ret_521; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \
|
|
float32x2_t __ret_522; \
|
|
float32x2_t __s0_522 = __p0_522; \
|
|
float32x2_t __s1_522 = __p1_522; \
|
|
float32x4_t __s2_522 = __p2_522; \
|
|
__ret_522 = __s0_522 - __s1_522 * splat_laneq_f32(__s2_522, __p3_522); \
|
|
__ret_522; \
|
|
})
|
|
#else
|
|
#define vmls_laneq_f32(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \
|
|
float32x2_t __ret_523; \
|
|
float32x2_t __s0_523 = __p0_523; \
|
|
float32x2_t __s1_523 = __p1_523; \
|
|
float32x4_t __s2_523 = __p2_523; \
|
|
float32x2_t __rev0_523; __rev0_523 = __builtin_shufflevector(__s0_523, __s0_523, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_523; __rev1_523 = __builtin_shufflevector(__s1_523, __s1_523, __lane_reverse_64_32); \
|
|
float32x4_t __rev2_523; __rev2_523 = __builtin_shufflevector(__s2_523, __s2_523, __lane_reverse_128_32); \
|
|
__ret_523 = __rev0_523 - __rev1_523 * __noswap_splat_laneq_f32(__rev2_523, __p3_523); \
|
|
__ret_523 = __builtin_shufflevector(__ret_523, __ret_523, __lane_reverse_64_32); \
|
|
__ret_523; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_laneq_s32(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \
|
|
int32x2_t __ret_524; \
|
|
int32x2_t __s0_524 = __p0_524; \
|
|
int32x2_t __s1_524 = __p1_524; \
|
|
int32x4_t __s2_524 = __p2_524; \
|
|
__ret_524 = __s0_524 - __s1_524 * splat_laneq_s32(__s2_524, __p3_524); \
|
|
__ret_524; \
|
|
})
|
|
#else
|
|
#define vmls_laneq_s32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \
|
|
int32x2_t __ret_525; \
|
|
int32x2_t __s0_525 = __p0_525; \
|
|
int32x2_t __s1_525 = __p1_525; \
|
|
int32x4_t __s2_525 = __p2_525; \
|
|
int32x2_t __rev0_525; __rev0_525 = __builtin_shufflevector(__s0_525, __s0_525, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_525; __rev1_525 = __builtin_shufflevector(__s1_525, __s1_525, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_525; __rev2_525 = __builtin_shufflevector(__s2_525, __s2_525, __lane_reverse_128_32); \
|
|
__ret_525 = __rev0_525 - __rev1_525 * __noswap_splat_laneq_s32(__rev2_525, __p3_525); \
|
|
__ret_525 = __builtin_shufflevector(__ret_525, __ret_525, __lane_reverse_64_32); \
|
|
__ret_525; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmls_laneq_s16(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \
|
|
int16x4_t __ret_526; \
|
|
int16x4_t __s0_526 = __p0_526; \
|
|
int16x4_t __s1_526 = __p1_526; \
|
|
int16x8_t __s2_526 = __p2_526; \
|
|
__ret_526 = __s0_526 - __s1_526 * splat_laneq_s16(__s2_526, __p3_526); \
|
|
__ret_526; \
|
|
})
|
|
#else
|
|
#define vmls_laneq_s16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \
|
|
int16x4_t __ret_527; \
|
|
int16x4_t __s0_527 = __p0_527; \
|
|
int16x4_t __s1_527 = __p1_527; \
|
|
int16x8_t __s2_527 = __p2_527; \
|
|
int16x4_t __rev0_527; __rev0_527 = __builtin_shufflevector(__s0_527, __s0_527, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_527; __rev1_527 = __builtin_shufflevector(__s1_527, __s1_527, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_527; __rev2_527 = __builtin_shufflevector(__s2_527, __s2_527, __lane_reverse_128_16); \
|
|
__ret_527 = __rev0_527 - __rev1_527 * __noswap_splat_laneq_s16(__rev2_527, __p3_527); \
|
|
__ret_527 = __builtin_shufflevector(__ret_527, __ret_527, __lane_reverse_64_16); \
|
|
__ret_527; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_lane_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \
|
|
uint64x2_t __ret_528; \
|
|
uint64x2_t __s0_528 = __p0_528; \
|
|
uint32x4_t __s1_528 = __p1_528; \
|
|
uint32x2_t __s2_528 = __p2_528; \
|
|
__ret_528 = __s0_528 - vmull_u32(vget_high_u32(__s1_528), splat_lane_u32(__s2_528, __p3_528)); \
|
|
__ret_528; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_lane_u32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \
|
|
uint64x2_t __ret_529; \
|
|
uint64x2_t __s0_529 = __p0_529; \
|
|
uint32x4_t __s1_529 = __p1_529; \
|
|
uint32x2_t __s2_529 = __p2_529; \
|
|
uint64x2_t __rev0_529; __rev0_529 = __builtin_shufflevector(__s0_529, __s0_529, __lane_reverse_128_64); \
|
|
uint32x4_t __rev1_529; __rev1_529 = __builtin_shufflevector(__s1_529, __s1_529, __lane_reverse_128_32); \
|
|
uint32x2_t __rev2_529; __rev2_529 = __builtin_shufflevector(__s2_529, __s2_529, __lane_reverse_64_32); \
|
|
__ret_529 = __rev0_529 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_529), __noswap_splat_lane_u32(__rev2_529, __p3_529)); \
|
|
__ret_529 = __builtin_shufflevector(__ret_529, __ret_529, __lane_reverse_128_64); \
|
|
__ret_529; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_lane_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \
|
|
uint32x4_t __ret_530; \
|
|
uint32x4_t __s0_530 = __p0_530; \
|
|
uint16x8_t __s1_530 = __p1_530; \
|
|
uint16x4_t __s2_530 = __p2_530; \
|
|
__ret_530 = __s0_530 - vmull_u16(vget_high_u16(__s1_530), splat_lane_u16(__s2_530, __p3_530)); \
|
|
__ret_530; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_lane_u16(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \
|
|
uint32x4_t __ret_531; \
|
|
uint32x4_t __s0_531 = __p0_531; \
|
|
uint16x8_t __s1_531 = __p1_531; \
|
|
uint16x4_t __s2_531 = __p2_531; \
|
|
uint32x4_t __rev0_531; __rev0_531 = __builtin_shufflevector(__s0_531, __s0_531, __lane_reverse_128_32); \
|
|
uint16x8_t __rev1_531; __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, __lane_reverse_128_16); \
|
|
uint16x4_t __rev2_531; __rev2_531 = __builtin_shufflevector(__s2_531, __s2_531, __lane_reverse_64_16); \
|
|
__ret_531 = __rev0_531 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_531), __noswap_splat_lane_u16(__rev2_531, __p3_531)); \
|
|
__ret_531 = __builtin_shufflevector(__ret_531, __ret_531, __lane_reverse_128_32); \
|
|
__ret_531; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_lane_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \
|
|
int64x2_t __ret_532; \
|
|
int64x2_t __s0_532 = __p0_532; \
|
|
int32x4_t __s1_532 = __p1_532; \
|
|
int32x2_t __s2_532 = __p2_532; \
|
|
__ret_532 = __s0_532 - vmull_s32(vget_high_s32(__s1_532), splat_lane_s32(__s2_532, __p3_532)); \
|
|
__ret_532; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_lane_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \
|
|
int64x2_t __ret_533; \
|
|
int64x2_t __s0_533 = __p0_533; \
|
|
int32x4_t __s1_533 = __p1_533; \
|
|
int32x2_t __s2_533 = __p2_533; \
|
|
int64x2_t __rev0_533; __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_533; __rev1_533 = __builtin_shufflevector(__s1_533, __s1_533, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_533; __rev2_533 = __builtin_shufflevector(__s2_533, __s2_533, __lane_reverse_64_32); \
|
|
__ret_533 = __rev0_533 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_533), __noswap_splat_lane_s32(__rev2_533, __p3_533)); \
|
|
__ret_533 = __builtin_shufflevector(__ret_533, __ret_533, __lane_reverse_128_64); \
|
|
__ret_533; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_lane_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \
|
|
int32x4_t __ret_534; \
|
|
int32x4_t __s0_534 = __p0_534; \
|
|
int16x8_t __s1_534 = __p1_534; \
|
|
int16x4_t __s2_534 = __p2_534; \
|
|
__ret_534 = __s0_534 - vmull_s16(vget_high_s16(__s1_534), splat_lane_s16(__s2_534, __p3_534)); \
|
|
__ret_534; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_lane_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \
|
|
int32x4_t __ret_535; \
|
|
int32x4_t __s0_535 = __p0_535; \
|
|
int16x8_t __s1_535 = __p1_535; \
|
|
int16x4_t __s2_535 = __p2_535; \
|
|
int32x4_t __rev0_535; __rev0_535 = __builtin_shufflevector(__s0_535, __s0_535, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_535; __rev2_535 = __builtin_shufflevector(__s2_535, __s2_535, __lane_reverse_64_16); \
|
|
__ret_535 = __rev0_535 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_535), __noswap_splat_lane_s16(__rev2_535, __p3_535)); \
|
|
__ret_535 = __builtin_shufflevector(__ret_535, __ret_535, __lane_reverse_128_32); \
|
|
__ret_535; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_laneq_u32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \
|
|
uint64x2_t __ret_536; \
|
|
uint64x2_t __s0_536 = __p0_536; \
|
|
uint32x4_t __s1_536 = __p1_536; \
|
|
uint32x4_t __s2_536 = __p2_536; \
|
|
__ret_536 = __s0_536 - vmull_u32(vget_high_u32(__s1_536), splat_laneq_u32(__s2_536, __p3_536)); \
|
|
__ret_536; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_laneq_u32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \
|
|
uint64x2_t __ret_537; \
|
|
uint64x2_t __s0_537 = __p0_537; \
|
|
uint32x4_t __s1_537 = __p1_537; \
|
|
uint32x4_t __s2_537 = __p2_537; \
|
|
uint64x2_t __rev0_537; __rev0_537 = __builtin_shufflevector(__s0_537, __s0_537, __lane_reverse_128_64); \
|
|
uint32x4_t __rev1_537; __rev1_537 = __builtin_shufflevector(__s1_537, __s1_537, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2_537; __rev2_537 = __builtin_shufflevector(__s2_537, __s2_537, __lane_reverse_128_32); \
|
|
__ret_537 = __rev0_537 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_537), __noswap_splat_laneq_u32(__rev2_537, __p3_537)); \
|
|
__ret_537 = __builtin_shufflevector(__ret_537, __ret_537, __lane_reverse_128_64); \
|
|
__ret_537; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_laneq_u16(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \
|
|
uint32x4_t __ret_538; \
|
|
uint32x4_t __s0_538 = __p0_538; \
|
|
uint16x8_t __s1_538 = __p1_538; \
|
|
uint16x8_t __s2_538 = __p2_538; \
|
|
__ret_538 = __s0_538 - vmull_u16(vget_high_u16(__s1_538), splat_laneq_u16(__s2_538, __p3_538)); \
|
|
__ret_538; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_laneq_u16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \
|
|
uint32x4_t __ret_539; \
|
|
uint32x4_t __s0_539 = __p0_539; \
|
|
uint16x8_t __s1_539 = __p1_539; \
|
|
uint16x8_t __s2_539 = __p2_539; \
|
|
uint32x4_t __rev0_539; __rev0_539 = __builtin_shufflevector(__s0_539, __s0_539, __lane_reverse_128_32); \
|
|
uint16x8_t __rev1_539; __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, __lane_reverse_128_16); \
|
|
uint16x8_t __rev2_539; __rev2_539 = __builtin_shufflevector(__s2_539, __s2_539, __lane_reverse_128_16); \
|
|
__ret_539 = __rev0_539 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_539), __noswap_splat_laneq_u16(__rev2_539, __p3_539)); \
|
|
__ret_539 = __builtin_shufflevector(__ret_539, __ret_539, __lane_reverse_128_32); \
|
|
__ret_539; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_laneq_s32(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \
|
|
int64x2_t __ret_540; \
|
|
int64x2_t __s0_540 = __p0_540; \
|
|
int32x4_t __s1_540 = __p1_540; \
|
|
int32x4_t __s2_540 = __p2_540; \
|
|
__ret_540 = __s0_540 - vmull_s32(vget_high_s32(__s1_540), splat_laneq_s32(__s2_540, __p3_540)); \
|
|
__ret_540; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_laneq_s32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \
|
|
int64x2_t __ret_541; \
|
|
int64x2_t __s0_541 = __p0_541; \
|
|
int32x4_t __s1_541 = __p1_541; \
|
|
int32x4_t __s2_541 = __p2_541; \
|
|
int64x2_t __rev0_541; __rev0_541 = __builtin_shufflevector(__s0_541, __s0_541, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_541; __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_541; __rev2_541 = __builtin_shufflevector(__s2_541, __s2_541, __lane_reverse_128_32); \
|
|
__ret_541 = __rev0_541 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_541), __noswap_splat_laneq_s32(__rev2_541, __p3_541)); \
|
|
__ret_541 = __builtin_shufflevector(__ret_541, __ret_541, __lane_reverse_128_64); \
|
|
__ret_541; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_high_laneq_s16(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \
|
|
int32x4_t __ret_542; \
|
|
int32x4_t __s0_542 = __p0_542; \
|
|
int16x8_t __s1_542 = __p1_542; \
|
|
int16x8_t __s2_542 = __p2_542; \
|
|
__ret_542 = __s0_542 - vmull_s16(vget_high_s16(__s1_542), splat_laneq_s16(__s2_542, __p3_542)); \
|
|
__ret_542; \
|
|
})
|
|
#else
|
|
#define vmlsl_high_laneq_s16(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \
|
|
int32x4_t __ret_543; \
|
|
int32x4_t __s0_543 = __p0_543; \
|
|
int16x8_t __s1_543 = __p1_543; \
|
|
int16x8_t __s2_543 = __p2_543; \
|
|
int32x4_t __rev0_543; __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_543; __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_543; __rev2_543 = __builtin_shufflevector(__s2_543, __s2_543, __lane_reverse_128_16); \
|
|
__ret_543 = __rev0_543 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_543), __noswap_splat_laneq_s16(__rev2_543, __p3_543)); \
|
|
__ret_543 = __builtin_shufflevector(__ret_543, __ret_543, __lane_reverse_128_32); \
|
|
__ret_543; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_laneq_u32(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \
|
|
uint64x2_t __ret_544; \
|
|
uint64x2_t __s0_544 = __p0_544; \
|
|
uint32x2_t __s1_544 = __p1_544; \
|
|
uint32x4_t __s2_544 = __p2_544; \
|
|
__ret_544 = __s0_544 - vmull_u32(__s1_544, splat_laneq_u32(__s2_544, __p3_544)); \
|
|
__ret_544; \
|
|
})
|
|
#else
|
|
#define vmlsl_laneq_u32(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \
|
|
uint64x2_t __ret_545; \
|
|
uint64x2_t __s0_545 = __p0_545; \
|
|
uint32x2_t __s1_545 = __p1_545; \
|
|
uint32x4_t __s2_545 = __p2_545; \
|
|
uint64x2_t __rev0_545; __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, __lane_reverse_128_64); \
|
|
uint32x2_t __rev1_545; __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, __lane_reverse_64_32); \
|
|
uint32x4_t __rev2_545; __rev2_545 = __builtin_shufflevector(__s2_545, __s2_545, __lane_reverse_128_32); \
|
|
__ret_545 = __rev0_545 - __noswap_vmull_u32(__rev1_545, __noswap_splat_laneq_u32(__rev2_545, __p3_545)); \
|
|
__ret_545 = __builtin_shufflevector(__ret_545, __ret_545, __lane_reverse_128_64); \
|
|
__ret_545; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_laneq_u16(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \
|
|
uint32x4_t __ret_546; \
|
|
uint32x4_t __s0_546 = __p0_546; \
|
|
uint16x4_t __s1_546 = __p1_546; \
|
|
uint16x8_t __s2_546 = __p2_546; \
|
|
__ret_546 = __s0_546 - vmull_u16(__s1_546, splat_laneq_u16(__s2_546, __p3_546)); \
|
|
__ret_546; \
|
|
})
|
|
#else
|
|
#define vmlsl_laneq_u16(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \
|
|
uint32x4_t __ret_547; \
|
|
uint32x4_t __s0_547 = __p0_547; \
|
|
uint16x4_t __s1_547 = __p1_547; \
|
|
uint16x8_t __s2_547 = __p2_547; \
|
|
uint32x4_t __rev0_547; __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, __lane_reverse_128_32); \
|
|
uint16x4_t __rev1_547; __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, __lane_reverse_64_16); \
|
|
uint16x8_t __rev2_547; __rev2_547 = __builtin_shufflevector(__s2_547, __s2_547, __lane_reverse_128_16); \
|
|
__ret_547 = __rev0_547 - __noswap_vmull_u16(__rev1_547, __noswap_splat_laneq_u16(__rev2_547, __p3_547)); \
|
|
__ret_547 = __builtin_shufflevector(__ret_547, __ret_547, __lane_reverse_128_32); \
|
|
__ret_547; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \
|
|
int64x2_t __ret_548; \
|
|
int64x2_t __s0_548 = __p0_548; \
|
|
int32x2_t __s1_548 = __p1_548; \
|
|
int32x4_t __s2_548 = __p2_548; \
|
|
__ret_548 = __s0_548 - vmull_s32(__s1_548, splat_laneq_s32(__s2_548, __p3_548)); \
|
|
__ret_548; \
|
|
})
|
|
#else
|
|
#define vmlsl_laneq_s32(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \
|
|
int64x2_t __ret_549; \
|
|
int64x2_t __s0_549 = __p0_549; \
|
|
int32x2_t __s1_549 = __p1_549; \
|
|
int32x4_t __s2_549 = __p2_549; \
|
|
int64x2_t __rev0_549; __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_549; __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_549; __rev2_549 = __builtin_shufflevector(__s2_549, __s2_549, __lane_reverse_128_32); \
|
|
__ret_549 = __rev0_549 - __noswap_vmull_s32(__rev1_549, __noswap_splat_laneq_s32(__rev2_549, __p3_549)); \
|
|
__ret_549 = __builtin_shufflevector(__ret_549, __ret_549, __lane_reverse_128_64); \
|
|
__ret_549; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \
|
|
int32x4_t __ret_550; \
|
|
int32x4_t __s0_550 = __p0_550; \
|
|
int16x4_t __s1_550 = __p1_550; \
|
|
int16x8_t __s2_550 = __p2_550; \
|
|
__ret_550 = __s0_550 - vmull_s16(__s1_550, splat_laneq_s16(__s2_550, __p3_550)); \
|
|
__ret_550; \
|
|
})
|
|
#else
|
|
#define vmlsl_laneq_s16(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \
|
|
int32x4_t __ret_551; \
|
|
int32x4_t __s0_551 = __p0_551; \
|
|
int16x4_t __s1_551 = __p1_551; \
|
|
int16x8_t __s2_551 = __p2_551; \
|
|
int32x4_t __rev0_551; __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_551; __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_551; __rev2_551 = __builtin_shufflevector(__s2_551, __s2_551, __lane_reverse_128_16); \
|
|
__ret_551 = __rev0_551 - __noswap_vmull_s16(__rev1_551, __noswap_splat_laneq_s16(__rev2_551, __p3_551)); \
|
|
__ret_551 = __builtin_shufflevector(__ret_551, __ret_551, __lane_reverse_128_32); \
|
|
__ret_551; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) poly64x1_t vmov_n_p64(poly64_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = (poly64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = (poly64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = (poly64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = (float64x2_t) {__p0, __p0};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = (float64x2_t) {__p0, __p0};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmov_n_f64(float64_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = (float64x1_t) {__p0};
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_552) {
|
|
uint16x8_t __ret_552;
|
|
uint8x8_t __a1_552 = vget_high_u8(__p0_552);
|
|
__ret_552 = __builtin_bit_cast(uint16x8_t, vshll_n_u8(__a1_552, 0));
|
|
return __ret_552;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_553) {
|
|
uint16x8_t __ret_553;
|
|
uint8x16_t __rev0_553; __rev0_553 = __builtin_shufflevector(__p0_553, __p0_553, __lane_reverse_128_8);
|
|
uint8x8_t __a1_553 = __noswap_vget_high_u8(__rev0_553);
|
|
__ret_553 = __builtin_bit_cast(uint16x8_t, __noswap_vshll_n_u8(__a1_553, 0));
|
|
__ret_553 = __builtin_shufflevector(__ret_553, __ret_553, __lane_reverse_128_16);
|
|
return __ret_553;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_554) {
|
|
uint16x8_t __ret_554;
|
|
uint8x8_t __a1_554 = __noswap_vget_high_u8(__p0_554);
|
|
__ret_554 = __builtin_bit_cast(uint16x8_t, __noswap_vshll_n_u8(__a1_554, 0));
|
|
return __ret_554;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_555) {
|
|
uint64x2_t __ret_555;
|
|
uint32x2_t __a1_555 = vget_high_u32(__p0_555);
|
|
__ret_555 = __builtin_bit_cast(uint64x2_t, vshll_n_u32(__a1_555, 0));
|
|
return __ret_555;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_556) {
|
|
uint64x2_t __ret_556;
|
|
uint32x4_t __rev0_556; __rev0_556 = __builtin_shufflevector(__p0_556, __p0_556, __lane_reverse_128_32);
|
|
uint32x2_t __a1_556 = __noswap_vget_high_u32(__rev0_556);
|
|
__ret_556 = __builtin_bit_cast(uint64x2_t, __noswap_vshll_n_u32(__a1_556, 0));
|
|
__ret_556 = __builtin_shufflevector(__ret_556, __ret_556, __lane_reverse_128_64);
|
|
return __ret_556;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_557) {
|
|
uint64x2_t __ret_557;
|
|
uint32x2_t __a1_557 = __noswap_vget_high_u32(__p0_557);
|
|
__ret_557 = __builtin_bit_cast(uint64x2_t, __noswap_vshll_n_u32(__a1_557, 0));
|
|
return __ret_557;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_558) {
|
|
uint32x4_t __ret_558;
|
|
uint16x4_t __a1_558 = vget_high_u16(__p0_558);
|
|
__ret_558 = __builtin_bit_cast(uint32x4_t, vshll_n_u16(__a1_558, 0));
|
|
return __ret_558;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_559) {
|
|
uint32x4_t __ret_559;
|
|
uint16x8_t __rev0_559; __rev0_559 = __builtin_shufflevector(__p0_559, __p0_559, __lane_reverse_128_16);
|
|
uint16x4_t __a1_559 = __noswap_vget_high_u16(__rev0_559);
|
|
__ret_559 = __builtin_bit_cast(uint32x4_t, __noswap_vshll_n_u16(__a1_559, 0));
|
|
__ret_559 = __builtin_shufflevector(__ret_559, __ret_559, __lane_reverse_128_32);
|
|
return __ret_559;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_560) {
|
|
uint32x4_t __ret_560;
|
|
uint16x4_t __a1_560 = __noswap_vget_high_u16(__p0_560);
|
|
__ret_560 = __builtin_bit_cast(uint32x4_t, __noswap_vshll_n_u16(__a1_560, 0));
|
|
return __ret_560;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_561) {
|
|
int16x8_t __ret_561;
|
|
int8x8_t __a1_561 = vget_high_s8(__p0_561);
|
|
__ret_561 = __builtin_bit_cast(int16x8_t, vshll_n_s8(__a1_561, 0));
|
|
return __ret_561;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_562) {
|
|
int16x8_t __ret_562;
|
|
int8x16_t __rev0_562; __rev0_562 = __builtin_shufflevector(__p0_562, __p0_562, __lane_reverse_128_8);
|
|
int8x8_t __a1_562 = __noswap_vget_high_s8(__rev0_562);
|
|
__ret_562 = __builtin_bit_cast(int16x8_t, __noswap_vshll_n_s8(__a1_562, 0));
|
|
__ret_562 = __builtin_shufflevector(__ret_562, __ret_562, __lane_reverse_128_16);
|
|
return __ret_562;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_563) {
|
|
int16x8_t __ret_563;
|
|
int8x8_t __a1_563 = __noswap_vget_high_s8(__p0_563);
|
|
__ret_563 = __builtin_bit_cast(int16x8_t, __noswap_vshll_n_s8(__a1_563, 0));
|
|
return __ret_563;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_564) {
|
|
int64x2_t __ret_564;
|
|
int32x2_t __a1_564 = vget_high_s32(__p0_564);
|
|
__ret_564 = __builtin_bit_cast(int64x2_t, vshll_n_s32(__a1_564, 0));
|
|
return __ret_564;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_565) {
|
|
int64x2_t __ret_565;
|
|
int32x4_t __rev0_565; __rev0_565 = __builtin_shufflevector(__p0_565, __p0_565, __lane_reverse_128_32);
|
|
int32x2_t __a1_565 = __noswap_vget_high_s32(__rev0_565);
|
|
__ret_565 = __builtin_bit_cast(int64x2_t, __noswap_vshll_n_s32(__a1_565, 0));
|
|
__ret_565 = __builtin_shufflevector(__ret_565, __ret_565, __lane_reverse_128_64);
|
|
return __ret_565;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_566) {
|
|
int64x2_t __ret_566;
|
|
int32x2_t __a1_566 = __noswap_vget_high_s32(__p0_566);
|
|
__ret_566 = __builtin_bit_cast(int64x2_t, __noswap_vshll_n_s32(__a1_566, 0));
|
|
return __ret_566;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_567) {
|
|
int32x4_t __ret_567;
|
|
int16x4_t __a1_567 = vget_high_s16(__p0_567);
|
|
__ret_567 = __builtin_bit_cast(int32x4_t, vshll_n_s16(__a1_567, 0));
|
|
return __ret_567;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_568) {
|
|
int32x4_t __ret_568;
|
|
int16x8_t __rev0_568; __rev0_568 = __builtin_shufflevector(__p0_568, __p0_568, __lane_reverse_128_16);
|
|
int16x4_t __a1_568 = __noswap_vget_high_s16(__rev0_568);
|
|
__ret_568 = __builtin_bit_cast(int32x4_t, __noswap_vshll_n_s16(__a1_568, 0));
|
|
__ret_568 = __builtin_shufflevector(__ret_568, __ret_568, __lane_reverse_128_32);
|
|
return __ret_568;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_569) {
|
|
int32x4_t __ret_569;
|
|
int16x4_t __a1_569 = __noswap_vget_high_s16(__p0_569);
|
|
__ret_569 = __builtin_bit_cast(int32x4_t, __noswap_vshll_n_s16(__a1_569, 0));
|
|
return __ret_569;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vmovn_u32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vmovn_u64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vmovn_u16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vmovn_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vmovn_s64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vmovn_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 * __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 * __p1;
|
|
return __ret;
|
|
}
|
|
#define vmuld_lane_f64(__p0_570, __p1_570, __p2_570) __extension__ ({ \
|
|
float64_t __ret_570; \
|
|
float64_t __s0_570 = __p0_570; \
|
|
float64x1_t __s1_570 = __p1_570; \
|
|
__ret_570 = __s0_570 * vget_lane_f64(__s1_570, __p2_570); \
|
|
__ret_570; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmuls_lane_f32(__p0_571, __p1_571, __p2_571) __extension__ ({ \
|
|
float32_t __ret_571; \
|
|
float32_t __s0_571 = __p0_571; \
|
|
float32x2_t __s1_571 = __p1_571; \
|
|
__ret_571 = __s0_571 * vget_lane_f32(__s1_571, __p2_571); \
|
|
__ret_571; \
|
|
})
|
|
#else
|
|
#define vmuls_lane_f32(__p0_572, __p1_572, __p2_572) __extension__ ({ \
|
|
float32_t __ret_572; \
|
|
float32_t __s0_572 = __p0_572; \
|
|
float32x2_t __s1_572 = __p1_572; \
|
|
float32x2_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, __lane_reverse_64_32); \
|
|
__ret_572 = __s0_572 * __noswap_vget_lane_f32(__rev1_572, __p2_572); \
|
|
__ret_572; \
|
|
})
|
|
#endif
|
|
|
|
#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmul_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_f64(__p0_573, __p1_573, __p2_573) __extension__ ({ \
|
|
float64x2_t __ret_573; \
|
|
float64x2_t __s0_573 = __p0_573; \
|
|
float64x1_t __s1_573 = __p1_573; \
|
|
__ret_573 = __s0_573 * splatq_lane_f64(__s1_573, __p2_573); \
|
|
__ret_573; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_f64(__p0_574, __p1_574, __p2_574) __extension__ ({ \
|
|
float64x2_t __ret_574; \
|
|
float64x2_t __s0_574 = __p0_574; \
|
|
float64x1_t __s1_574 = __p1_574; \
|
|
float64x2_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, __lane_reverse_128_64); \
|
|
__ret_574 = __rev0_574 * __noswap_splatq_lane_f64(__s1_574, __p2_574); \
|
|
__ret_574 = __builtin_shufflevector(__ret_574, __ret_574, __lane_reverse_128_64); \
|
|
__ret_574; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmuld_laneq_f64(__p0_575, __p1_575, __p2_575) __extension__ ({ \
|
|
float64_t __ret_575; \
|
|
float64_t __s0_575 = __p0_575; \
|
|
float64x2_t __s1_575 = __p1_575; \
|
|
__ret_575 = __s0_575 * vgetq_lane_f64(__s1_575, __p2_575); \
|
|
__ret_575; \
|
|
})
|
|
#else
|
|
#define vmuld_laneq_f64(__p0_576, __p1_576, __p2_576) __extension__ ({ \
|
|
float64_t __ret_576; \
|
|
float64_t __s0_576 = __p0_576; \
|
|
float64x2_t __s1_576 = __p1_576; \
|
|
float64x2_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, __lane_reverse_128_64); \
|
|
__ret_576 = __s0_576 * __noswap_vgetq_lane_f64(__rev1_576, __p2_576); \
|
|
__ret_576; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmuls_laneq_f32(__p0_577, __p1_577, __p2_577) __extension__ ({ \
|
|
float32_t __ret_577; \
|
|
float32_t __s0_577 = __p0_577; \
|
|
float32x4_t __s1_577 = __p1_577; \
|
|
__ret_577 = __s0_577 * vgetq_lane_f32(__s1_577, __p2_577); \
|
|
__ret_577; \
|
|
})
|
|
#else
|
|
#define vmuls_laneq_f32(__p0_578, __p1_578, __p2_578) __extension__ ({ \
|
|
float32_t __ret_578; \
|
|
float32_t __s0_578 = __p0_578; \
|
|
float32x4_t __s1_578 = __p1_578; \
|
|
float32x4_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, __lane_reverse_128_32); \
|
|
__ret_578 = __s0_578 * __noswap_vgetq_lane_f32(__rev1_578, __p2_578); \
|
|
__ret_578; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmul_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmul_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_u32(__p0_579, __p1_579, __p2_579) __extension__ ({ \
|
|
uint32x4_t __ret_579; \
|
|
uint32x4_t __s0_579 = __p0_579; \
|
|
uint32x4_t __s1_579 = __p1_579; \
|
|
__ret_579 = __s0_579 * splatq_laneq_u32(__s1_579, __p2_579); \
|
|
__ret_579; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_u32(__p0_580, __p1_580, __p2_580) __extension__ ({ \
|
|
uint32x4_t __ret_580; \
|
|
uint32x4_t __s0_580 = __p0_580; \
|
|
uint32x4_t __s1_580 = __p1_580; \
|
|
uint32x4_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, __lane_reverse_128_32); \
|
|
__ret_580 = __rev0_580 * __noswap_splatq_laneq_u32(__rev1_580, __p2_580); \
|
|
__ret_580 = __builtin_shufflevector(__ret_580, __ret_580, __lane_reverse_128_32); \
|
|
__ret_580; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_u16(__p0_581, __p1_581, __p2_581) __extension__ ({ \
|
|
uint16x8_t __ret_581; \
|
|
uint16x8_t __s0_581 = __p0_581; \
|
|
uint16x8_t __s1_581 = __p1_581; \
|
|
__ret_581 = __s0_581 * splatq_laneq_u16(__s1_581, __p2_581); \
|
|
__ret_581; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_u16(__p0_582, __p1_582, __p2_582) __extension__ ({ \
|
|
uint16x8_t __ret_582; \
|
|
uint16x8_t __s0_582 = __p0_582; \
|
|
uint16x8_t __s1_582 = __p1_582; \
|
|
uint16x8_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, __lane_reverse_128_16); \
|
|
__ret_582 = __rev0_582 * __noswap_splatq_laneq_u16(__rev1_582, __p2_582); \
|
|
__ret_582 = __builtin_shufflevector(__ret_582, __ret_582, __lane_reverse_128_16); \
|
|
__ret_582; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_f64(__p0_583, __p1_583, __p2_583) __extension__ ({ \
|
|
float64x2_t __ret_583; \
|
|
float64x2_t __s0_583 = __p0_583; \
|
|
float64x2_t __s1_583 = __p1_583; \
|
|
__ret_583 = __s0_583 * splatq_laneq_f64(__s1_583, __p2_583); \
|
|
__ret_583; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_f64(__p0_584, __p1_584, __p2_584) __extension__ ({ \
|
|
float64x2_t __ret_584; \
|
|
float64x2_t __s0_584 = __p0_584; \
|
|
float64x2_t __s1_584 = __p1_584; \
|
|
float64x2_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, __lane_reverse_128_64); \
|
|
float64x2_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, __lane_reverse_128_64); \
|
|
__ret_584 = __rev0_584 * __noswap_splatq_laneq_f64(__rev1_584, __p2_584); \
|
|
__ret_584 = __builtin_shufflevector(__ret_584, __ret_584, __lane_reverse_128_64); \
|
|
__ret_584; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_f32(__p0_585, __p1_585, __p2_585) __extension__ ({ \
|
|
float32x4_t __ret_585; \
|
|
float32x4_t __s0_585 = __p0_585; \
|
|
float32x4_t __s1_585 = __p1_585; \
|
|
__ret_585 = __s0_585 * splatq_laneq_f32(__s1_585, __p2_585); \
|
|
__ret_585; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_f32(__p0_586, __p1_586, __p2_586) __extension__ ({ \
|
|
float32x4_t __ret_586; \
|
|
float32x4_t __s0_586 = __p0_586; \
|
|
float32x4_t __s1_586 = __p1_586; \
|
|
float32x4_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, __lane_reverse_128_32); \
|
|
__ret_586 = __rev0_586 * __noswap_splatq_laneq_f32(__rev1_586, __p2_586); \
|
|
__ret_586 = __builtin_shufflevector(__ret_586, __ret_586, __lane_reverse_128_32); \
|
|
__ret_586; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_s32(__p0_587, __p1_587, __p2_587) __extension__ ({ \
|
|
int32x4_t __ret_587; \
|
|
int32x4_t __s0_587 = __p0_587; \
|
|
int32x4_t __s1_587 = __p1_587; \
|
|
__ret_587 = __s0_587 * splatq_laneq_s32(__s1_587, __p2_587); \
|
|
__ret_587; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_s32(__p0_588, __p1_588, __p2_588) __extension__ ({ \
|
|
int32x4_t __ret_588; \
|
|
int32x4_t __s0_588 = __p0_588; \
|
|
int32x4_t __s1_588 = __p1_588; \
|
|
int32x4_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, __lane_reverse_128_32); \
|
|
__ret_588 = __rev0_588 * __noswap_splatq_laneq_s32(__rev1_588, __p2_588); \
|
|
__ret_588 = __builtin_shufflevector(__ret_588, __ret_588, __lane_reverse_128_32); \
|
|
__ret_588; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_laneq_s16(__p0_589, __p1_589, __p2_589) __extension__ ({ \
|
|
int16x8_t __ret_589; \
|
|
int16x8_t __s0_589 = __p0_589; \
|
|
int16x8_t __s1_589 = __p1_589; \
|
|
__ret_589 = __s0_589 * splatq_laneq_s16(__s1_589, __p2_589); \
|
|
__ret_589; \
|
|
})
|
|
#else
|
|
#define vmulq_laneq_s16(__p0_590, __p1_590, __p2_590) __extension__ ({ \
|
|
int16x8_t __ret_590; \
|
|
int16x8_t __s0_590 = __p0_590; \
|
|
int16x8_t __s1_590 = __p1_590; \
|
|
int16x8_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, __lane_reverse_128_16); \
|
|
__ret_590 = __rev0_590 * __noswap_splatq_laneq_s16(__rev1_590, __p2_590); \
|
|
__ret_590 = __builtin_shufflevector(__ret_590, __ret_590, __lane_reverse_128_16); \
|
|
__ret_590; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_u32(__p0_591, __p1_591, __p2_591) __extension__ ({ \
|
|
uint32x2_t __ret_591; \
|
|
uint32x2_t __s0_591 = __p0_591; \
|
|
uint32x4_t __s1_591 = __p1_591; \
|
|
__ret_591 = __s0_591 * splat_laneq_u32(__s1_591, __p2_591); \
|
|
__ret_591; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_u32(__p0_592, __p1_592, __p2_592) __extension__ ({ \
|
|
uint32x2_t __ret_592; \
|
|
uint32x2_t __s0_592 = __p0_592; \
|
|
uint32x4_t __s1_592 = __p1_592; \
|
|
uint32x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, __lane_reverse_64_32); \
|
|
uint32x4_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, __lane_reverse_128_32); \
|
|
__ret_592 = __rev0_592 * __noswap_splat_laneq_u32(__rev1_592, __p2_592); \
|
|
__ret_592 = __builtin_shufflevector(__ret_592, __ret_592, __lane_reverse_64_32); \
|
|
__ret_592; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_u16(__p0_593, __p1_593, __p2_593) __extension__ ({ \
|
|
uint16x4_t __ret_593; \
|
|
uint16x4_t __s0_593 = __p0_593; \
|
|
uint16x8_t __s1_593 = __p1_593; \
|
|
__ret_593 = __s0_593 * splat_laneq_u16(__s1_593, __p2_593); \
|
|
__ret_593; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_u16(__p0_594, __p1_594, __p2_594) __extension__ ({ \
|
|
uint16x4_t __ret_594; \
|
|
uint16x4_t __s0_594 = __p0_594; \
|
|
uint16x8_t __s1_594 = __p1_594; \
|
|
uint16x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, __lane_reverse_64_16); \
|
|
uint16x8_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, __lane_reverse_128_16); \
|
|
__ret_594 = __rev0_594 * __noswap_splat_laneq_u16(__rev1_594, __p2_594); \
|
|
__ret_594 = __builtin_shufflevector(__ret_594, __ret_594, __lane_reverse_64_16); \
|
|
__ret_594; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \
|
|
float32x2_t __ret_595; \
|
|
float32x2_t __s0_595 = __p0_595; \
|
|
float32x4_t __s1_595 = __p1_595; \
|
|
__ret_595 = __s0_595 * splat_laneq_f32(__s1_595, __p2_595); \
|
|
__ret_595; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \
|
|
float32x2_t __ret_596; \
|
|
float32x2_t __s0_596 = __p0_596; \
|
|
float32x4_t __s1_596 = __p1_596; \
|
|
float32x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, __lane_reverse_64_32); \
|
|
float32x4_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, __lane_reverse_128_32); \
|
|
__ret_596 = __rev0_596 * __noswap_splat_laneq_f32(__rev1_596, __p2_596); \
|
|
__ret_596 = __builtin_shufflevector(__ret_596, __ret_596, __lane_reverse_64_32); \
|
|
__ret_596; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_s32(__p0_597, __p1_597, __p2_597) __extension__ ({ \
|
|
int32x2_t __ret_597; \
|
|
int32x2_t __s0_597 = __p0_597; \
|
|
int32x4_t __s1_597 = __p1_597; \
|
|
__ret_597 = __s0_597 * splat_laneq_s32(__s1_597, __p2_597); \
|
|
__ret_597; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_s32(__p0_598, __p1_598, __p2_598) __extension__ ({ \
|
|
int32x2_t __ret_598; \
|
|
int32x2_t __s0_598 = __p0_598; \
|
|
int32x4_t __s1_598 = __p1_598; \
|
|
int32x2_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, __lane_reverse_64_32); \
|
|
int32x4_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, __lane_reverse_128_32); \
|
|
__ret_598 = __rev0_598 * __noswap_splat_laneq_s32(__rev1_598, __p2_598); \
|
|
__ret_598 = __builtin_shufflevector(__ret_598, __ret_598, __lane_reverse_64_32); \
|
|
__ret_598; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_laneq_s16(__p0_599, __p1_599, __p2_599) __extension__ ({ \
|
|
int16x4_t __ret_599; \
|
|
int16x4_t __s0_599 = __p0_599; \
|
|
int16x8_t __s1_599 = __p1_599; \
|
|
__ret_599 = __s0_599 * splat_laneq_s16(__s1_599, __p2_599); \
|
|
__ret_599; \
|
|
})
|
|
#else
|
|
#define vmul_laneq_s16(__p0_600, __p1_600, __p2_600) __extension__ ({ \
|
|
int16x4_t __ret_600; \
|
|
int16x4_t __s0_600 = __p0_600; \
|
|
int16x8_t __s1_600 = __p1_600; \
|
|
int16x4_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, __lane_reverse_64_16); \
|
|
int16x8_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, __lane_reverse_128_16); \
|
|
__ret_600 = __rev0_600 * __noswap_splat_laneq_s16(__rev1_600, __p2_600); \
|
|
__ret_600 = __builtin_shufflevector(__ret_600, __ret_600, __lane_reverse_64_16); \
|
|
__ret_600; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmul_n_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 * (float64x2_t) {__p1, __p1};
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __rev0 * (float64x2_t) {__p1, __p1};
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_lane_u32(__p0_601, __p1_601, __p2_601) __extension__ ({ \
|
|
uint64x2_t __ret_601; \
|
|
uint32x4_t __s0_601 = __p0_601; \
|
|
uint32x2_t __s1_601 = __p1_601; \
|
|
__ret_601 = vmull_u32(vget_high_u32(__s0_601), splat_lane_u32(__s1_601, __p2_601)); \
|
|
__ret_601; \
|
|
})
|
|
#else
|
|
#define vmull_high_lane_u32(__p0_602, __p1_602, __p2_602) __extension__ ({ \
|
|
uint64x2_t __ret_602; \
|
|
uint32x4_t __s0_602 = __p0_602; \
|
|
uint32x2_t __s1_602 = __p1_602; \
|
|
uint32x4_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, __lane_reverse_128_32); \
|
|
uint32x2_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, __lane_reverse_64_32); \
|
|
__ret_602 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_602), __noswap_splat_lane_u32(__rev1_602, __p2_602)); \
|
|
__ret_602 = __builtin_shufflevector(__ret_602, __ret_602, __lane_reverse_128_64); \
|
|
__ret_602; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_lane_u16(__p0_603, __p1_603, __p2_603) __extension__ ({ \
|
|
uint32x4_t __ret_603; \
|
|
uint16x8_t __s0_603 = __p0_603; \
|
|
uint16x4_t __s1_603 = __p1_603; \
|
|
__ret_603 = vmull_u16(vget_high_u16(__s0_603), splat_lane_u16(__s1_603, __p2_603)); \
|
|
__ret_603; \
|
|
})
|
|
#else
|
|
#define vmull_high_lane_u16(__p0_604, __p1_604, __p2_604) __extension__ ({ \
|
|
uint32x4_t __ret_604; \
|
|
uint16x8_t __s0_604 = __p0_604; \
|
|
uint16x4_t __s1_604 = __p1_604; \
|
|
uint16x8_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, __lane_reverse_128_16); \
|
|
uint16x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, __lane_reverse_64_16); \
|
|
__ret_604 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_604), __noswap_splat_lane_u16(__rev1_604, __p2_604)); \
|
|
__ret_604 = __builtin_shufflevector(__ret_604, __ret_604, __lane_reverse_128_32); \
|
|
__ret_604; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_lane_s32(__p0_605, __p1_605, __p2_605) __extension__ ({ \
|
|
int64x2_t __ret_605; \
|
|
int32x4_t __s0_605 = __p0_605; \
|
|
int32x2_t __s1_605 = __p1_605; \
|
|
__ret_605 = vmull_s32(vget_high_s32(__s0_605), splat_lane_s32(__s1_605, __p2_605)); \
|
|
__ret_605; \
|
|
})
|
|
#else
|
|
#define vmull_high_lane_s32(__p0_606, __p1_606, __p2_606) __extension__ ({ \
|
|
int64x2_t __ret_606; \
|
|
int32x4_t __s0_606 = __p0_606; \
|
|
int32x2_t __s1_606 = __p1_606; \
|
|
int32x4_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, __lane_reverse_128_32); \
|
|
int32x2_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, __lane_reverse_64_32); \
|
|
__ret_606 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_606), __noswap_splat_lane_s32(__rev1_606, __p2_606)); \
|
|
__ret_606 = __builtin_shufflevector(__ret_606, __ret_606, __lane_reverse_128_64); \
|
|
__ret_606; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_lane_s16(__p0_607, __p1_607, __p2_607) __extension__ ({ \
|
|
int32x4_t __ret_607; \
|
|
int16x8_t __s0_607 = __p0_607; \
|
|
int16x4_t __s1_607 = __p1_607; \
|
|
__ret_607 = vmull_s16(vget_high_s16(__s0_607), splat_lane_s16(__s1_607, __p2_607)); \
|
|
__ret_607; \
|
|
})
|
|
#else
|
|
#define vmull_high_lane_s16(__p0_608, __p1_608, __p2_608) __extension__ ({ \
|
|
int32x4_t __ret_608; \
|
|
int16x8_t __s0_608 = __p0_608; \
|
|
int16x4_t __s1_608 = __p1_608; \
|
|
int16x8_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, __lane_reverse_128_16); \
|
|
int16x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, __lane_reverse_64_16); \
|
|
__ret_608 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_608), __noswap_splat_lane_s16(__rev1_608, __p2_608)); \
|
|
__ret_608 = __builtin_shufflevector(__ret_608, __ret_608, __lane_reverse_128_32); \
|
|
__ret_608; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_laneq_u32(__p0_609, __p1_609, __p2_609) __extension__ ({ \
|
|
uint64x2_t __ret_609; \
|
|
uint32x4_t __s0_609 = __p0_609; \
|
|
uint32x4_t __s1_609 = __p1_609; \
|
|
__ret_609 = vmull_u32(vget_high_u32(__s0_609), splat_laneq_u32(__s1_609, __p2_609)); \
|
|
__ret_609; \
|
|
})
|
|
#else
|
|
#define vmull_high_laneq_u32(__p0_610, __p1_610, __p2_610) __extension__ ({ \
|
|
uint64x2_t __ret_610; \
|
|
uint32x4_t __s0_610 = __p0_610; \
|
|
uint32x4_t __s1_610 = __p1_610; \
|
|
uint32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, __lane_reverse_128_32); \
|
|
__ret_610 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_610), __noswap_splat_laneq_u32(__rev1_610, __p2_610)); \
|
|
__ret_610 = __builtin_shufflevector(__ret_610, __ret_610, __lane_reverse_128_64); \
|
|
__ret_610; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_laneq_u16(__p0_611, __p1_611, __p2_611) __extension__ ({ \
|
|
uint32x4_t __ret_611; \
|
|
uint16x8_t __s0_611 = __p0_611; \
|
|
uint16x8_t __s1_611 = __p1_611; \
|
|
__ret_611 = vmull_u16(vget_high_u16(__s0_611), splat_laneq_u16(__s1_611, __p2_611)); \
|
|
__ret_611; \
|
|
})
|
|
#else
|
|
#define vmull_high_laneq_u16(__p0_612, __p1_612, __p2_612) __extension__ ({ \
|
|
uint32x4_t __ret_612; \
|
|
uint16x8_t __s0_612 = __p0_612; \
|
|
uint16x8_t __s1_612 = __p1_612; \
|
|
uint16x8_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, __lane_reverse_128_16); \
|
|
uint16x8_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, __lane_reverse_128_16); \
|
|
__ret_612 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_612), __noswap_splat_laneq_u16(__rev1_612, __p2_612)); \
|
|
__ret_612 = __builtin_shufflevector(__ret_612, __ret_612, __lane_reverse_128_32); \
|
|
__ret_612; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_laneq_s32(__p0_613, __p1_613, __p2_613) __extension__ ({ \
|
|
int64x2_t __ret_613; \
|
|
int32x4_t __s0_613 = __p0_613; \
|
|
int32x4_t __s1_613 = __p1_613; \
|
|
__ret_613 = vmull_s32(vget_high_s32(__s0_613), splat_laneq_s32(__s1_613, __p2_613)); \
|
|
__ret_613; \
|
|
})
|
|
#else
|
|
#define vmull_high_laneq_s32(__p0_614, __p1_614, __p2_614) __extension__ ({ \
|
|
int64x2_t __ret_614; \
|
|
int32x4_t __s0_614 = __p0_614; \
|
|
int32x4_t __s1_614 = __p1_614; \
|
|
int32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, __lane_reverse_128_32); \
|
|
__ret_614 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_614), __noswap_splat_laneq_s32(__rev1_614, __p2_614)); \
|
|
__ret_614 = __builtin_shufflevector(__ret_614, __ret_614, __lane_reverse_128_64); \
|
|
__ret_614; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_high_laneq_s16(__p0_615, __p1_615, __p2_615) __extension__ ({ \
|
|
int32x4_t __ret_615; \
|
|
int16x8_t __s0_615 = __p0_615; \
|
|
int16x8_t __s1_615 = __p1_615; \
|
|
__ret_615 = vmull_s16(vget_high_s16(__s0_615), splat_laneq_s16(__s1_615, __p2_615)); \
|
|
__ret_615; \
|
|
})
|
|
#else
|
|
#define vmull_high_laneq_s16(__p0_616, __p1_616, __p2_616) __extension__ ({ \
|
|
int32x4_t __ret_616; \
|
|
int16x8_t __s0_616 = __p0_616; \
|
|
int16x8_t __s1_616 = __p1_616; \
|
|
int16x8_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, __lane_reverse_128_16); \
|
|
__ret_616 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_616), __noswap_splat_laneq_s16(__rev1_616, __p2_616)); \
|
|
__ret_616 = __builtin_shufflevector(__ret_616, __ret_616, __lane_reverse_128_32); \
|
|
__ret_616; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmull_n_u32(vget_high_u32(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmull_n_u16(vget_high_u16(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmull_n_s32(vget_high_s32(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmull_n_s16(vget_high_s16(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_laneq_u32(__p0_617, __p1_617, __p2_617) __extension__ ({ \
|
|
uint64x2_t __ret_617; \
|
|
uint32x2_t __s0_617 = __p0_617; \
|
|
uint32x4_t __s1_617 = __p1_617; \
|
|
__ret_617 = vmull_u32(__s0_617, splat_laneq_u32(__s1_617, __p2_617)); \
|
|
__ret_617; \
|
|
})
|
|
#else
|
|
#define vmull_laneq_u32(__p0_618, __p1_618, __p2_618) __extension__ ({ \
|
|
uint64x2_t __ret_618; \
|
|
uint32x2_t __s0_618 = __p0_618; \
|
|
uint32x4_t __s1_618 = __p1_618; \
|
|
uint32x2_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, __lane_reverse_64_32); \
|
|
uint32x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, __lane_reverse_128_32); \
|
|
__ret_618 = __noswap_vmull_u32(__rev0_618, __noswap_splat_laneq_u32(__rev1_618, __p2_618)); \
|
|
__ret_618 = __builtin_shufflevector(__ret_618, __ret_618, __lane_reverse_128_64); \
|
|
__ret_618; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_laneq_u16(__p0_619, __p1_619, __p2_619) __extension__ ({ \
|
|
uint32x4_t __ret_619; \
|
|
uint16x4_t __s0_619 = __p0_619; \
|
|
uint16x8_t __s1_619 = __p1_619; \
|
|
__ret_619 = vmull_u16(__s0_619, splat_laneq_u16(__s1_619, __p2_619)); \
|
|
__ret_619; \
|
|
})
|
|
#else
|
|
#define vmull_laneq_u16(__p0_620, __p1_620, __p2_620) __extension__ ({ \
|
|
uint32x4_t __ret_620; \
|
|
uint16x4_t __s0_620 = __p0_620; \
|
|
uint16x8_t __s1_620 = __p1_620; \
|
|
uint16x4_t __rev0_620; __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, __lane_reverse_64_16); \
|
|
uint16x8_t __rev1_620; __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, __lane_reverse_128_16); \
|
|
__ret_620 = __noswap_vmull_u16(__rev0_620, __noswap_splat_laneq_u16(__rev1_620, __p2_620)); \
|
|
__ret_620 = __builtin_shufflevector(__ret_620, __ret_620, __lane_reverse_128_32); \
|
|
__ret_620; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_laneq_s32(__p0_621, __p1_621, __p2_621) __extension__ ({ \
|
|
int64x2_t __ret_621; \
|
|
int32x2_t __s0_621 = __p0_621; \
|
|
int32x4_t __s1_621 = __p1_621; \
|
|
__ret_621 = vmull_s32(__s0_621, splat_laneq_s32(__s1_621, __p2_621)); \
|
|
__ret_621; \
|
|
})
|
|
#else
|
|
#define vmull_laneq_s32(__p0_622, __p1_622, __p2_622) __extension__ ({ \
|
|
int64x2_t __ret_622; \
|
|
int32x2_t __s0_622 = __p0_622; \
|
|
int32x4_t __s1_622 = __p1_622; \
|
|
int32x2_t __rev0_622; __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, __lane_reverse_64_32); \
|
|
int32x4_t __rev1_622; __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, __lane_reverse_128_32); \
|
|
__ret_622 = __noswap_vmull_s32(__rev0_622, __noswap_splat_laneq_s32(__rev1_622, __p2_622)); \
|
|
__ret_622 = __builtin_shufflevector(__ret_622, __ret_622, __lane_reverse_128_64); \
|
|
__ret_622; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmull_laneq_s16(__p0_623, __p1_623, __p2_623) __extension__ ({ \
|
|
int32x4_t __ret_623; \
|
|
int16x4_t __s0_623 = __p0_623; \
|
|
int16x8_t __s1_623 = __p1_623; \
|
|
__ret_623 = vmull_s16(__s0_623, splat_laneq_s16(__s1_623, __p2_623)); \
|
|
__ret_623; \
|
|
})
|
|
#else
|
|
#define vmull_laneq_s16(__p0_624, __p1_624, __p2_624) __extension__ ({ \
|
|
int32x4_t __ret_624; \
|
|
int16x4_t __s0_624 = __p0_624; \
|
|
int16x8_t __s1_624 = __p1_624; \
|
|
int16x4_t __rev0_624; __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, __lane_reverse_64_16); \
|
|
int16x8_t __rev1_624; __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, __lane_reverse_128_16); \
|
|
__ret_624 = __noswap_vmull_s16(__rev0_624, __noswap_splat_laneq_s16(__rev1_624, __p2_624)); \
|
|
__ret_624 = __builtin_shufflevector(__ret_624, __ret_624, __lane_reverse_128_32); \
|
|
__ret_624; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vmulxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vmulx_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmulx_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmulx_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vmulx_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vmulxd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vmulxs_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#define vmulxd_lane_f64(__p0_625, __p1_625, __p2_625) __extension__ ({ \
|
|
float64_t __ret_625; \
|
|
float64_t __s0_625 = __p0_625; \
|
|
float64x1_t __s1_625 = __p1_625; \
|
|
__ret_625 = vmulxd_f64(__s0_625, vget_lane_f64(__s1_625, __p2_625)); \
|
|
__ret_625; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxs_lane_f32(__p0_626, __p1_626, __p2_626) __extension__ ({ \
|
|
float32_t __ret_626; \
|
|
float32_t __s0_626 = __p0_626; \
|
|
float32x2_t __s1_626 = __p1_626; \
|
|
__ret_626 = vmulxs_f32(__s0_626, vget_lane_f32(__s1_626, __p2_626)); \
|
|
__ret_626; \
|
|
})
|
|
#else
|
|
#define vmulxs_lane_f32(__p0_627, __p1_627, __p2_627) __extension__ ({ \
|
|
float32_t __ret_627; \
|
|
float32_t __s0_627 = __p0_627; \
|
|
float32x2_t __s1_627 = __p1_627; \
|
|
float32x2_t __rev1_627; __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, __lane_reverse_64_32); \
|
|
__ret_627 = vmulxs_f32(__s0_627, __noswap_vget_lane_f32(__rev1_627, __p2_627)); \
|
|
__ret_627; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_lane_f64(__p0_628, __p1_628, __p2_628) __extension__ ({ \
|
|
float64x2_t __ret_628; \
|
|
float64x2_t __s0_628 = __p0_628; \
|
|
float64x1_t __s1_628 = __p1_628; \
|
|
__ret_628 = vmulxq_f64(__s0_628, splatq_lane_f64(__s1_628, __p2_628)); \
|
|
__ret_628; \
|
|
})
|
|
#else
|
|
#define vmulxq_lane_f64(__p0_629, __p1_629, __p2_629) __extension__ ({ \
|
|
float64x2_t __ret_629; \
|
|
float64x2_t __s0_629 = __p0_629; \
|
|
float64x1_t __s1_629 = __p1_629; \
|
|
float64x2_t __rev0_629; __rev0_629 = __builtin_shufflevector(__s0_629, __s0_629, __lane_reverse_128_64); \
|
|
__ret_629 = __noswap_vmulxq_f64(__rev0_629, __noswap_splatq_lane_f64(__s1_629, __p2_629)); \
|
|
__ret_629 = __builtin_shufflevector(__ret_629, __ret_629, __lane_reverse_128_64); \
|
|
__ret_629; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_lane_f32(__p0_630, __p1_630, __p2_630) __extension__ ({ \
|
|
float32x4_t __ret_630; \
|
|
float32x4_t __s0_630 = __p0_630; \
|
|
float32x2_t __s1_630 = __p1_630; \
|
|
__ret_630 = vmulxq_f32(__s0_630, splatq_lane_f32(__s1_630, __p2_630)); \
|
|
__ret_630; \
|
|
})
|
|
#else
|
|
#define vmulxq_lane_f32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
|
|
float32x4_t __ret_631; \
|
|
float32x4_t __s0_631 = __p0_631; \
|
|
float32x2_t __s1_631 = __p1_631; \
|
|
float32x4_t __rev0_631; __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, __lane_reverse_128_32); \
|
|
float32x2_t __rev1_631; __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, __lane_reverse_64_32); \
|
|
__ret_631 = __noswap_vmulxq_f32(__rev0_631, __noswap_splatq_lane_f32(__rev1_631, __p2_631)); \
|
|
__ret_631 = __builtin_shufflevector(__ret_631, __ret_631, __lane_reverse_128_32); \
|
|
__ret_631; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_lane_f32(__p0_632, __p1_632, __p2_632) __extension__ ({ \
|
|
float32x2_t __ret_632; \
|
|
float32x2_t __s0_632 = __p0_632; \
|
|
float32x2_t __s1_632 = __p1_632; \
|
|
__ret_632 = vmulx_f32(__s0_632, splat_lane_f32(__s1_632, __p2_632)); \
|
|
__ret_632; \
|
|
})
|
|
#else
|
|
#define vmulx_lane_f32(__p0_633, __p1_633, __p2_633) __extension__ ({ \
|
|
float32x2_t __ret_633; \
|
|
float32x2_t __s0_633 = __p0_633; \
|
|
float32x2_t __s1_633 = __p1_633; \
|
|
float32x2_t __rev0_633; __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, __lane_reverse_64_32); \
|
|
float32x2_t __rev1_633; __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, __lane_reverse_64_32); \
|
|
__ret_633 = __noswap_vmulx_f32(__rev0_633, __noswap_splat_lane_f32(__rev1_633, __p2_633)); \
|
|
__ret_633 = __builtin_shufflevector(__ret_633, __ret_633, __lane_reverse_64_32); \
|
|
__ret_633; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxd_laneq_f64(__p0_634, __p1_634, __p2_634) __extension__ ({ \
|
|
float64_t __ret_634; \
|
|
float64_t __s0_634 = __p0_634; \
|
|
float64x2_t __s1_634 = __p1_634; \
|
|
__ret_634 = vmulxd_f64(__s0_634, vgetq_lane_f64(__s1_634, __p2_634)); \
|
|
__ret_634; \
|
|
})
|
|
#else
|
|
#define vmulxd_laneq_f64(__p0_635, __p1_635, __p2_635) __extension__ ({ \
|
|
float64_t __ret_635; \
|
|
float64_t __s0_635 = __p0_635; \
|
|
float64x2_t __s1_635 = __p1_635; \
|
|
float64x2_t __rev1_635; __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, __lane_reverse_128_64); \
|
|
__ret_635 = vmulxd_f64(__s0_635, __noswap_vgetq_lane_f64(__rev1_635, __p2_635)); \
|
|
__ret_635; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxs_laneq_f32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
|
|
float32_t __ret_636; \
|
|
float32_t __s0_636 = __p0_636; \
|
|
float32x4_t __s1_636 = __p1_636; \
|
|
__ret_636 = vmulxs_f32(__s0_636, vgetq_lane_f32(__s1_636, __p2_636)); \
|
|
__ret_636; \
|
|
})
|
|
#else
|
|
#define vmulxs_laneq_f32(__p0_637, __p1_637, __p2_637) __extension__ ({ \
|
|
float32_t __ret_637; \
|
|
float32_t __s0_637 = __p0_637; \
|
|
float32x4_t __s1_637 = __p1_637; \
|
|
float32x4_t __rev1_637; __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, __lane_reverse_128_32); \
|
|
__ret_637 = vmulxs_f32(__s0_637, __noswap_vgetq_lane_f32(__rev1_637, __p2_637)); \
|
|
__ret_637; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_laneq_f64(__p0_638, __p1_638, __p2_638) __extension__ ({ \
|
|
float64x2_t __ret_638; \
|
|
float64x2_t __s0_638 = __p0_638; \
|
|
float64x2_t __s1_638 = __p1_638; \
|
|
__ret_638 = vmulxq_f64(__s0_638, splatq_laneq_f64(__s1_638, __p2_638)); \
|
|
__ret_638; \
|
|
})
|
|
#else
|
|
#define vmulxq_laneq_f64(__p0_639, __p1_639, __p2_639) __extension__ ({ \
|
|
float64x2_t __ret_639; \
|
|
float64x2_t __s0_639 = __p0_639; \
|
|
float64x2_t __s1_639 = __p1_639; \
|
|
float64x2_t __rev0_639; __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, __lane_reverse_128_64); \
|
|
float64x2_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, __lane_reverse_128_64); \
|
|
__ret_639 = __noswap_vmulxq_f64(__rev0_639, __noswap_splatq_laneq_f64(__rev1_639, __p2_639)); \
|
|
__ret_639 = __builtin_shufflevector(__ret_639, __ret_639, __lane_reverse_128_64); \
|
|
__ret_639; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulxq_laneq_f32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
|
|
float32x4_t __ret_640; \
|
|
float32x4_t __s0_640 = __p0_640; \
|
|
float32x4_t __s1_640 = __p1_640; \
|
|
__ret_640 = vmulxq_f32(__s0_640, splatq_laneq_f32(__s1_640, __p2_640)); \
|
|
__ret_640; \
|
|
})
|
|
#else
|
|
#define vmulxq_laneq_f32(__p0_641, __p1_641, __p2_641) __extension__ ({ \
|
|
float32x4_t __ret_641; \
|
|
float32x4_t __s0_641 = __p0_641; \
|
|
float32x4_t __s1_641 = __p1_641; \
|
|
float32x4_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, __lane_reverse_128_32); \
|
|
float32x4_t __rev1_641; __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, __lane_reverse_128_32); \
|
|
__ret_641 = __noswap_vmulxq_f32(__rev0_641, __noswap_splatq_laneq_f32(__rev1_641, __p2_641)); \
|
|
__ret_641 = __builtin_shufflevector(__ret_641, __ret_641, __lane_reverse_128_32); \
|
|
__ret_641; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_laneq_f32(__p0_642, __p1_642, __p2_642) __extension__ ({ \
|
|
float32x2_t __ret_642; \
|
|
float32x2_t __s0_642 = __p0_642; \
|
|
float32x4_t __s1_642 = __p1_642; \
|
|
__ret_642 = vmulx_f32(__s0_642, splat_laneq_f32(__s1_642, __p2_642)); \
|
|
__ret_642; \
|
|
})
|
|
#else
|
|
#define vmulx_laneq_f32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
|
|
float32x2_t __ret_643; \
|
|
float32x2_t __s0_643 = __p0_643; \
|
|
float32x4_t __s1_643 = __p1_643; \
|
|
float32x2_t __rev0_643; __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, __lane_reverse_64_32); \
|
|
float32x4_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, __lane_reverse_128_32); \
|
|
__ret_643 = __noswap_vmulx_f32(__rev0_643, __noswap_splat_laneq_f32(__rev1_643, __p2_643)); \
|
|
__ret_643 = __builtin_shufflevector(__ret_643, __ret_643, __lane_reverse_64_32); \
|
|
__ret_643; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = -__rev0;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vneg_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vneg_s64(int64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = -__p0;
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vnegd_s64(int64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vnegd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vpaddd_u64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) {
|
|
uint64_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vpaddd_u64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpaddd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpaddd_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vpaddd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) {
|
|
int64_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vpaddd_s64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpadds_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpadds_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpmaxq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpmaxqd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpmaxqd_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmaxs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmaxs_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpmaxnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpmaxnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpmaxnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpmaxnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmaxnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpmaxnm_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpmaxnmqd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpmaxnmqd_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmaxnms_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmaxnms_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vpminq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpminqd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpminqd_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmins_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpmins_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpminnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vpminnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpminnmq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vpminnmq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpminnm_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vpminnm_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpminnmqd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) {
|
|
float64_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vpminnmqd_f64(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpminnms_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) {
|
|
float32_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vpminnms_f32(__rev0));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqabsq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqabs_s64(int64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqabs_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqabsb_s8(int8_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqabsb_s8(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqabss_s32(int32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqabss_s32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqabsd_s64(int64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqabsd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqabsh_s16(int16_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqabsh_s16(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqaddb_u8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqadds_u32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vqaddd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqaddh_u16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqaddb_s8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqadds_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqaddd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqaddh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlals_s32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_high_lane_s32(__p0_644, __p1_644, __p2_644, __p3_644) __extension__ ({ \
|
|
int64x2_t __ret_644; \
|
|
int64x2_t __s0_644 = __p0_644; \
|
|
int32x4_t __s1_644 = __p1_644; \
|
|
int32x2_t __s2_644 = __p2_644; \
|
|
__ret_644 = vqdmlal_s32(__s0_644, vget_high_s32(__s1_644), splat_lane_s32(__s2_644, __p3_644)); \
|
|
__ret_644; \
|
|
})
|
|
#else
|
|
#define vqdmlal_high_lane_s32(__p0_645, __p1_645, __p2_645, __p3_645) __extension__ ({ \
|
|
int64x2_t __ret_645; \
|
|
int64x2_t __s0_645 = __p0_645; \
|
|
int32x4_t __s1_645 = __p1_645; \
|
|
int32x2_t __s2_645 = __p2_645; \
|
|
int64x2_t __rev0_645; __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_645; __rev2_645 = __builtin_shufflevector(__s2_645, __s2_645, __lane_reverse_64_32); \
|
|
__ret_645 = __noswap_vqdmlal_s32(__rev0_645, __noswap_vget_high_s32(__rev1_645), __noswap_splat_lane_s32(__rev2_645, __p3_645)); \
|
|
__ret_645 = __builtin_shufflevector(__ret_645, __ret_645, __lane_reverse_128_64); \
|
|
__ret_645; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_high_lane_s16(__p0_646, __p1_646, __p2_646, __p3_646) __extension__ ({ \
|
|
int32x4_t __ret_646; \
|
|
int32x4_t __s0_646 = __p0_646; \
|
|
int16x8_t __s1_646 = __p1_646; \
|
|
int16x4_t __s2_646 = __p2_646; \
|
|
__ret_646 = vqdmlal_s16(__s0_646, vget_high_s16(__s1_646), splat_lane_s16(__s2_646, __p3_646)); \
|
|
__ret_646; \
|
|
})
|
|
#else
|
|
#define vqdmlal_high_lane_s16(__p0_647, __p1_647, __p2_647, __p3_647) __extension__ ({ \
|
|
int32x4_t __ret_647; \
|
|
int32x4_t __s0_647 = __p0_647; \
|
|
int16x8_t __s1_647 = __p1_647; \
|
|
int16x4_t __s2_647 = __p2_647; \
|
|
int32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_647; __rev2_647 = __builtin_shufflevector(__s2_647, __s2_647, __lane_reverse_64_16); \
|
|
__ret_647 = __noswap_vqdmlal_s16(__rev0_647, __noswap_vget_high_s16(__rev1_647), __noswap_splat_lane_s16(__rev2_647, __p3_647)); \
|
|
__ret_647 = __builtin_shufflevector(__ret_647, __ret_647, __lane_reverse_128_32); \
|
|
__ret_647; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_high_laneq_s32(__p0_648, __p1_648, __p2_648, __p3_648) __extension__ ({ \
|
|
int64x2_t __ret_648; \
|
|
int64x2_t __s0_648 = __p0_648; \
|
|
int32x4_t __s1_648 = __p1_648; \
|
|
int32x4_t __s2_648 = __p2_648; \
|
|
__ret_648 = vqdmlal_s32(__s0_648, vget_high_s32(__s1_648), splat_laneq_s32(__s2_648, __p3_648)); \
|
|
__ret_648; \
|
|
})
|
|
#else
|
|
#define vqdmlal_high_laneq_s32(__p0_649, __p1_649, __p2_649, __p3_649) __extension__ ({ \
|
|
int64x2_t __ret_649; \
|
|
int64x2_t __s0_649 = __p0_649; \
|
|
int32x4_t __s1_649 = __p1_649; \
|
|
int32x4_t __s2_649 = __p2_649; \
|
|
int64x2_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_649; __rev2_649 = __builtin_shufflevector(__s2_649, __s2_649, __lane_reverse_128_32); \
|
|
__ret_649 = __noswap_vqdmlal_s32(__rev0_649, __noswap_vget_high_s32(__rev1_649), __noswap_splat_laneq_s32(__rev2_649, __p3_649)); \
|
|
__ret_649 = __builtin_shufflevector(__ret_649, __ret_649, __lane_reverse_128_64); \
|
|
__ret_649; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_high_laneq_s16(__p0_650, __p1_650, __p2_650, __p3_650) __extension__ ({ \
|
|
int32x4_t __ret_650; \
|
|
int32x4_t __s0_650 = __p0_650; \
|
|
int16x8_t __s1_650 = __p1_650; \
|
|
int16x8_t __s2_650 = __p2_650; \
|
|
__ret_650 = vqdmlal_s16(__s0_650, vget_high_s16(__s1_650), splat_laneq_s16(__s2_650, __p3_650)); \
|
|
__ret_650; \
|
|
})
|
|
#else
|
|
#define vqdmlal_high_laneq_s16(__p0_651, __p1_651, __p2_651, __p3_651) __extension__ ({ \
|
|
int32x4_t __ret_651; \
|
|
int32x4_t __s0_651 = __p0_651; \
|
|
int16x8_t __s1_651 = __p1_651; \
|
|
int16x8_t __s2_651 = __p2_651; \
|
|
int32x4_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_651; __rev2_651 = __builtin_shufflevector(__s2_651, __s2_651, __lane_reverse_128_16); \
|
|
__ret_651 = __noswap_vqdmlal_s16(__rev0_651, __noswap_vget_high_s16(__rev1_651), __noswap_splat_laneq_s16(__rev2_651, __p3_651)); \
|
|
__ret_651 = __builtin_shufflevector(__ret_651, __ret_651, __lane_reverse_128_32); \
|
|
__ret_651; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x2_t __s2 = __p2; \
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x4_t __s2 = __p2; \
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x4_t __s2 = __p2; \
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x8_t __s2 = __p2; \
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_laneq_s32(__p0_652, __p1_652, __p2_652, __p3_652) __extension__ ({ \
|
|
int64x2_t __ret_652; \
|
|
int64x2_t __s0_652 = __p0_652; \
|
|
int32x2_t __s1_652 = __p1_652; \
|
|
int32x4_t __s2_652 = __p2_652; \
|
|
__ret_652 = vqdmlal_s32(__s0_652, __s1_652, splat_laneq_s32(__s2_652, __p3_652)); \
|
|
__ret_652; \
|
|
})
|
|
#else
|
|
#define vqdmlal_laneq_s32(__p0_653, __p1_653, __p2_653, __p3_653) __extension__ ({ \
|
|
int64x2_t __ret_653; \
|
|
int64x2_t __s0_653 = __p0_653; \
|
|
int32x2_t __s1_653 = __p1_653; \
|
|
int32x4_t __s2_653 = __p2_653; \
|
|
int64x2_t __rev0_653; __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_653; __rev2_653 = __builtin_shufflevector(__s2_653, __s2_653, __lane_reverse_128_32); \
|
|
__ret_653 = __noswap_vqdmlal_s32(__rev0_653, __rev1_653, __noswap_splat_laneq_s32(__rev2_653, __p3_653)); \
|
|
__ret_653 = __builtin_shufflevector(__ret_653, __ret_653, __lane_reverse_128_64); \
|
|
__ret_653; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlal_laneq_s16(__p0_654, __p1_654, __p2_654, __p3_654) __extension__ ({ \
|
|
int32x4_t __ret_654; \
|
|
int32x4_t __s0_654 = __p0_654; \
|
|
int16x4_t __s1_654 = __p1_654; \
|
|
int16x8_t __s2_654 = __p2_654; \
|
|
__ret_654 = vqdmlal_s16(__s0_654, __s1_654, splat_laneq_s16(__s2_654, __p3_654)); \
|
|
__ret_654; \
|
|
})
|
|
#else
|
|
#define vqdmlal_laneq_s16(__p0_655, __p1_655, __p2_655, __p3_655) __extension__ ({ \
|
|
int32x4_t __ret_655; \
|
|
int32x4_t __s0_655 = __p0_655; \
|
|
int16x4_t __s1_655 = __p1_655; \
|
|
int16x8_t __s2_655 = __p2_655; \
|
|
int32x4_t __rev0_655; __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_655; __rev2_655 = __builtin_shufflevector(__s2_655, __s2_655, __lane_reverse_128_16); \
|
|
__ret_655 = __noswap_vqdmlal_s16(__rev0_655, __rev1_655, __noswap_splat_laneq_s16(__rev2_655, __p3_655)); \
|
|
__ret_655 = __builtin_shufflevector(__ret_655, __ret_655, __lane_reverse_128_32); \
|
|
__ret_655; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_high_lane_s32(__p0_656, __p1_656, __p2_656, __p3_656) __extension__ ({ \
|
|
int64x2_t __ret_656; \
|
|
int64x2_t __s0_656 = __p0_656; \
|
|
int32x4_t __s1_656 = __p1_656; \
|
|
int32x2_t __s2_656 = __p2_656; \
|
|
__ret_656 = vqdmlsl_s32(__s0_656, vget_high_s32(__s1_656), splat_lane_s32(__s2_656, __p3_656)); \
|
|
__ret_656; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_high_lane_s32(__p0_657, __p1_657, __p2_657, __p3_657) __extension__ ({ \
|
|
int64x2_t __ret_657; \
|
|
int64x2_t __s0_657 = __p0_657; \
|
|
int32x4_t __s1_657 = __p1_657; \
|
|
int32x2_t __s2_657 = __p2_657; \
|
|
int64x2_t __rev0_657; __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, __lane_reverse_128_32); \
|
|
int32x2_t __rev2_657; __rev2_657 = __builtin_shufflevector(__s2_657, __s2_657, __lane_reverse_64_32); \
|
|
__ret_657 = __noswap_vqdmlsl_s32(__rev0_657, __noswap_vget_high_s32(__rev1_657), __noswap_splat_lane_s32(__rev2_657, __p3_657)); \
|
|
__ret_657 = __builtin_shufflevector(__ret_657, __ret_657, __lane_reverse_128_64); \
|
|
__ret_657; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_high_lane_s16(__p0_658, __p1_658, __p2_658, __p3_658) __extension__ ({ \
|
|
int32x4_t __ret_658; \
|
|
int32x4_t __s0_658 = __p0_658; \
|
|
int16x8_t __s1_658 = __p1_658; \
|
|
int16x4_t __s2_658 = __p2_658; \
|
|
__ret_658 = vqdmlsl_s16(__s0_658, vget_high_s16(__s1_658), splat_lane_s16(__s2_658, __p3_658)); \
|
|
__ret_658; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_high_lane_s16(__p0_659, __p1_659, __p2_659, __p3_659) __extension__ ({ \
|
|
int32x4_t __ret_659; \
|
|
int32x4_t __s0_659 = __p0_659; \
|
|
int16x8_t __s1_659 = __p1_659; \
|
|
int16x4_t __s2_659 = __p2_659; \
|
|
int32x4_t __rev0_659; __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, __lane_reverse_128_16); \
|
|
int16x4_t __rev2_659; __rev2_659 = __builtin_shufflevector(__s2_659, __s2_659, __lane_reverse_64_16); \
|
|
__ret_659 = __noswap_vqdmlsl_s16(__rev0_659, __noswap_vget_high_s16(__rev1_659), __noswap_splat_lane_s16(__rev2_659, __p3_659)); \
|
|
__ret_659 = __builtin_shufflevector(__ret_659, __ret_659, __lane_reverse_128_32); \
|
|
__ret_659; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_high_laneq_s32(__p0_660, __p1_660, __p2_660, __p3_660) __extension__ ({ \
|
|
int64x2_t __ret_660; \
|
|
int64x2_t __s0_660 = __p0_660; \
|
|
int32x4_t __s1_660 = __p1_660; \
|
|
int32x4_t __s2_660 = __p2_660; \
|
|
__ret_660 = vqdmlsl_s32(__s0_660, vget_high_s32(__s1_660), splat_laneq_s32(__s2_660, __p3_660)); \
|
|
__ret_660; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_high_laneq_s32(__p0_661, __p1_661, __p2_661, __p3_661) __extension__ ({ \
|
|
int64x2_t __ret_661; \
|
|
int64x2_t __s0_661 = __p0_661; \
|
|
int32x4_t __s1_661 = __p1_661; \
|
|
int32x4_t __s2_661 = __p2_661; \
|
|
int64x2_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, __lane_reverse_128_64); \
|
|
int32x4_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_661; __rev2_661 = __builtin_shufflevector(__s2_661, __s2_661, __lane_reverse_128_32); \
|
|
__ret_661 = __noswap_vqdmlsl_s32(__rev0_661, __noswap_vget_high_s32(__rev1_661), __noswap_splat_laneq_s32(__rev2_661, __p3_661)); \
|
|
__ret_661 = __builtin_shufflevector(__ret_661, __ret_661, __lane_reverse_128_64); \
|
|
__ret_661; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_high_laneq_s16(__p0_662, __p1_662, __p2_662, __p3_662) __extension__ ({ \
|
|
int32x4_t __ret_662; \
|
|
int32x4_t __s0_662 = __p0_662; \
|
|
int16x8_t __s1_662 = __p1_662; \
|
|
int16x8_t __s2_662 = __p2_662; \
|
|
__ret_662 = vqdmlsl_s16(__s0_662, vget_high_s16(__s1_662), splat_laneq_s16(__s2_662, __p3_662)); \
|
|
__ret_662; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_high_laneq_s16(__p0_663, __p1_663, __p2_663, __p3_663) __extension__ ({ \
|
|
int32x4_t __ret_663; \
|
|
int32x4_t __s0_663 = __p0_663; \
|
|
int16x8_t __s1_663 = __p1_663; \
|
|
int16x8_t __s2_663 = __p2_663; \
|
|
int32x4_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, __lane_reverse_128_32); \
|
|
int16x8_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_663; __rev2_663 = __builtin_shufflevector(__s2_663, __s2_663, __lane_reverse_128_16); \
|
|
__ret_663 = __noswap_vqdmlsl_s16(__rev0_663, __noswap_vget_high_s16(__rev1_663), __noswap_splat_laneq_s16(__rev2_663, __p3_663)); \
|
|
__ret_663 = __builtin_shufflevector(__ret_663, __ret_663, __lane_reverse_128_32); \
|
|
__ret_663; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x2_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x2_t __s2 = __p2; \
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x4_t __s2 = __p2; \
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int32_t __s1 = __p1; \
|
|
int32x4_t __s2 = __p2; \
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x8_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
int16_t __s1 = __p1; \
|
|
int16x8_t __s2 = __p2; \
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_laneq_s32(__p0_664, __p1_664, __p2_664, __p3_664) __extension__ ({ \
|
|
int64x2_t __ret_664; \
|
|
int64x2_t __s0_664 = __p0_664; \
|
|
int32x2_t __s1_664 = __p1_664; \
|
|
int32x4_t __s2_664 = __p2_664; \
|
|
__ret_664 = vqdmlsl_s32(__s0_664, __s1_664, splat_laneq_s32(__s2_664, __p3_664)); \
|
|
__ret_664; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_laneq_s32(__p0_665, __p1_665, __p2_665, __p3_665) __extension__ ({ \
|
|
int64x2_t __ret_665; \
|
|
int64x2_t __s0_665 = __p0_665; \
|
|
int32x2_t __s1_665 = __p1_665; \
|
|
int32x4_t __s2_665 = __p2_665; \
|
|
int64x2_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_665; __rev2_665 = __builtin_shufflevector(__s2_665, __s2_665, __lane_reverse_128_32); \
|
|
__ret_665 = __noswap_vqdmlsl_s32(__rev0_665, __rev1_665, __noswap_splat_laneq_s32(__rev2_665, __p3_665)); \
|
|
__ret_665 = __builtin_shufflevector(__ret_665, __ret_665, __lane_reverse_128_64); \
|
|
__ret_665; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmlsl_laneq_s16(__p0_666, __p1_666, __p2_666, __p3_666) __extension__ ({ \
|
|
int32x4_t __ret_666; \
|
|
int32x4_t __s0_666 = __p0_666; \
|
|
int16x4_t __s1_666 = __p1_666; \
|
|
int16x8_t __s2_666 = __p2_666; \
|
|
__ret_666 = vqdmlsl_s16(__s0_666, __s1_666, splat_laneq_s16(__s2_666, __p3_666)); \
|
|
__ret_666; \
|
|
})
|
|
#else
|
|
#define vqdmlsl_laneq_s16(__p0_667, __p1_667, __p2_667, __p3_667) __extension__ ({ \
|
|
int32x4_t __ret_667; \
|
|
int32x4_t __s0_667 = __p0_667; \
|
|
int16x4_t __s1_667 = __p1_667; \
|
|
int16x8_t __s2_667 = __p2_667; \
|
|
int32x4_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_667; __rev2_667 = __builtin_shufflevector(__s2_667, __s2_667, __lane_reverse_128_16); \
|
|
__ret_667 = __noswap_vqdmlsl_s16(__rev0_667, __rev1_667, __noswap_splat_laneq_s16(__rev2_667, __p3_667)); \
|
|
__ret_667 = __builtin_shufflevector(__ret_667, __ret_667, __lane_reverse_128_32); \
|
|
__ret_667; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmulhs_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqdmulhh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhs_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \
|
|
int32_t __ret_668; \
|
|
int32_t __s0_668 = __p0_668; \
|
|
int32x2_t __s1_668 = __p1_668; \
|
|
__ret_668 = vqdmulhs_s32(__s0_668, vget_lane_s32(__s1_668, __p2_668)); \
|
|
__ret_668; \
|
|
})
|
|
#else
|
|
#define vqdmulhs_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
|
|
int32_t __ret_669; \
|
|
int32_t __s0_669 = __p0_669; \
|
|
int32x2_t __s1_669 = __p1_669; \
|
|
int32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, __lane_reverse_64_32); \
|
|
__ret_669 = vqdmulhs_s32(__s0_669, __noswap_vget_lane_s32(__rev1_669, __p2_669)); \
|
|
__ret_669; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhh_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \
|
|
int16_t __ret_670; \
|
|
int16_t __s0_670 = __p0_670; \
|
|
int16x4_t __s1_670 = __p1_670; \
|
|
__ret_670 = vqdmulhh_s16(__s0_670, vget_lane_s16(__s1_670, __p2_670)); \
|
|
__ret_670; \
|
|
})
|
|
#else
|
|
#define vqdmulhh_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \
|
|
int16_t __ret_671; \
|
|
int16_t __s0_671 = __p0_671; \
|
|
int16x4_t __s1_671 = __p1_671; \
|
|
int16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, __lane_reverse_64_16); \
|
|
__ret_671 = vqdmulhh_s16(__s0_671, __noswap_vget_lane_s16(__rev1_671, __p2_671)); \
|
|
__ret_671; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhs_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \
|
|
int32_t __ret_672; \
|
|
int32_t __s0_672 = __p0_672; \
|
|
int32x4_t __s1_672 = __p1_672; \
|
|
__ret_672 = vqdmulhs_s32(__s0_672, vgetq_lane_s32(__s1_672, __p2_672)); \
|
|
__ret_672; \
|
|
})
|
|
#else
|
|
#define vqdmulhs_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \
|
|
int32_t __ret_673; \
|
|
int32_t __s0_673 = __p0_673; \
|
|
int32x4_t __s1_673 = __p1_673; \
|
|
int32x4_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, __lane_reverse_128_32); \
|
|
__ret_673 = vqdmulhs_s32(__s0_673, __noswap_vgetq_lane_s32(__rev1_673, __p2_673)); \
|
|
__ret_673; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhh_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
|
|
int16_t __ret_674; \
|
|
int16_t __s0_674 = __p0_674; \
|
|
int16x8_t __s1_674 = __p1_674; \
|
|
__ret_674 = vqdmulhh_s16(__s0_674, vgetq_lane_s16(__s1_674, __p2_674)); \
|
|
__ret_674; \
|
|
})
|
|
#else
|
|
#define vqdmulhh_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \
|
|
int16_t __ret_675; \
|
|
int16_t __s0_675 = __p0_675; \
|
|
int16x8_t __s1_675 = __p1_675; \
|
|
int16x8_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, __lane_reverse_128_16); \
|
|
__ret_675 = vqdmulhh_s16(__s0_675, __noswap_vgetq_lane_s16(__rev1_675, __p2_675)); \
|
|
__ret_675; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqdmulls_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqdmullh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_high_lane_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
|
|
int64x2_t __ret_676; \
|
|
int32x4_t __s0_676 = __p0_676; \
|
|
int32x2_t __s1_676 = __p1_676; \
|
|
__ret_676 = vqdmull_s32(vget_high_s32(__s0_676), splat_lane_s32(__s1_676, __p2_676)); \
|
|
__ret_676; \
|
|
})
|
|
#else
|
|
#define vqdmull_high_lane_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \
|
|
int64x2_t __ret_677; \
|
|
int32x4_t __s0_677 = __p0_677; \
|
|
int32x2_t __s1_677 = __p1_677; \
|
|
int32x4_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, __lane_reverse_128_32); \
|
|
int32x2_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, __lane_reverse_64_32); \
|
|
__ret_677 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_677), __noswap_splat_lane_s32(__rev1_677, __p2_677)); \
|
|
__ret_677 = __builtin_shufflevector(__ret_677, __ret_677, __lane_reverse_128_64); \
|
|
__ret_677; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_high_lane_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \
|
|
int32x4_t __ret_678; \
|
|
int16x8_t __s0_678 = __p0_678; \
|
|
int16x4_t __s1_678 = __p1_678; \
|
|
__ret_678 = vqdmull_s16(vget_high_s16(__s0_678), splat_lane_s16(__s1_678, __p2_678)); \
|
|
__ret_678; \
|
|
})
|
|
#else
|
|
#define vqdmull_high_lane_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
|
|
int32x4_t __ret_679; \
|
|
int16x8_t __s0_679 = __p0_679; \
|
|
int16x4_t __s1_679 = __p1_679; \
|
|
int16x8_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, __lane_reverse_128_16); \
|
|
int16x4_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, __lane_reverse_64_16); \
|
|
__ret_679 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_679), __noswap_splat_lane_s16(__rev1_679, __p2_679)); \
|
|
__ret_679 = __builtin_shufflevector(__ret_679, __ret_679, __lane_reverse_128_32); \
|
|
__ret_679; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_high_laneq_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \
|
|
int64x2_t __ret_680; \
|
|
int32x4_t __s0_680 = __p0_680; \
|
|
int32x4_t __s1_680 = __p1_680; \
|
|
__ret_680 = vqdmull_s32(vget_high_s32(__s0_680), splat_laneq_s32(__s1_680, __p2_680)); \
|
|
__ret_680; \
|
|
})
|
|
#else
|
|
#define vqdmull_high_laneq_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
|
|
int64x2_t __ret_681; \
|
|
int32x4_t __s0_681 = __p0_681; \
|
|
int32x4_t __s1_681 = __p1_681; \
|
|
int32x4_t __rev0_681; __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, __lane_reverse_128_32); \
|
|
__ret_681 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_681), __noswap_splat_laneq_s32(__rev1_681, __p2_681)); \
|
|
__ret_681 = __builtin_shufflevector(__ret_681, __ret_681, __lane_reverse_128_64); \
|
|
__ret_681; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_high_laneq_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \
|
|
int32x4_t __ret_682; \
|
|
int16x8_t __s0_682 = __p0_682; \
|
|
int16x8_t __s1_682 = __p1_682; \
|
|
__ret_682 = vqdmull_s16(vget_high_s16(__s0_682), splat_laneq_s16(__s1_682, __p2_682)); \
|
|
__ret_682; \
|
|
})
|
|
#else
|
|
#define vqdmull_high_laneq_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \
|
|
int32x4_t __ret_683; \
|
|
int16x8_t __s0_683 = __p0_683; \
|
|
int16x8_t __s1_683 = __p1_683; \
|
|
int16x8_t __rev0_683; __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, __lane_reverse_128_16); \
|
|
__ret_683 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_683), __noswap_splat_laneq_s16(__rev1_683, __p2_683)); \
|
|
__ret_683 = __builtin_shufflevector(__ret_683, __ret_683, __lane_reverse_128_32); \
|
|
__ret_683; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulls_lane_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \
|
|
int64_t __ret_684; \
|
|
int32_t __s0_684 = __p0_684; \
|
|
int32x2_t __s1_684 = __p1_684; \
|
|
__ret_684 = vqdmulls_s32(__s0_684, vget_lane_s32(__s1_684, __p2_684)); \
|
|
__ret_684; \
|
|
})
|
|
#else
|
|
#define vqdmulls_lane_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \
|
|
int64_t __ret_685; \
|
|
int32_t __s0_685 = __p0_685; \
|
|
int32x2_t __s1_685 = __p1_685; \
|
|
int32x2_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, __lane_reverse_64_32); \
|
|
__ret_685 = vqdmulls_s32(__s0_685, __noswap_vget_lane_s32(__rev1_685, __p2_685)); \
|
|
__ret_685; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmullh_lane_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
|
|
int32_t __ret_686; \
|
|
int16_t __s0_686 = __p0_686; \
|
|
int16x4_t __s1_686 = __p1_686; \
|
|
__ret_686 = vqdmullh_s16(__s0_686, vget_lane_s16(__s1_686, __p2_686)); \
|
|
__ret_686; \
|
|
})
|
|
#else
|
|
#define vqdmullh_lane_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \
|
|
int32_t __ret_687; \
|
|
int16_t __s0_687 = __p0_687; \
|
|
int16x4_t __s1_687 = __p1_687; \
|
|
int16x4_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, __lane_reverse_64_16); \
|
|
__ret_687 = vqdmullh_s16(__s0_687, __noswap_vget_lane_s16(__rev1_687, __p2_687)); \
|
|
__ret_687; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmulls_laneq_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
|
|
int64_t __ret_688; \
|
|
int32_t __s0_688 = __p0_688; \
|
|
int32x4_t __s1_688 = __p1_688; \
|
|
__ret_688 = vqdmulls_s32(__s0_688, vgetq_lane_s32(__s1_688, __p2_688)); \
|
|
__ret_688; \
|
|
})
|
|
#else
|
|
#define vqdmulls_laneq_s32(__p0_689, __p1_689, __p2_689) __extension__ ({ \
|
|
int64_t __ret_689; \
|
|
int32_t __s0_689 = __p0_689; \
|
|
int32x4_t __s1_689 = __p1_689; \
|
|
int32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, __lane_reverse_128_32); \
|
|
__ret_689 = vqdmulls_s32(__s0_689, __noswap_vgetq_lane_s32(__rev1_689, __p2_689)); \
|
|
__ret_689; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmullh_laneq_s16(__p0_690, __p1_690, __p2_690) __extension__ ({ \
|
|
int32_t __ret_690; \
|
|
int16_t __s0_690 = __p0_690; \
|
|
int16x8_t __s1_690 = __p1_690; \
|
|
__ret_690 = vqdmullh_s16(__s0_690, vgetq_lane_s16(__s1_690, __p2_690)); \
|
|
__ret_690; \
|
|
})
|
|
#else
|
|
#define vqdmullh_laneq_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
|
|
int32_t __ret_691; \
|
|
int16_t __s0_691 = __p0_691; \
|
|
int16x8_t __s1_691 = __p1_691; \
|
|
int16x8_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, __lane_reverse_128_16); \
|
|
__ret_691 = vqdmullh_s16(__s0_691, __noswap_vgetq_lane_s16(__rev1_691, __p2_691)); \
|
|
__ret_691; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_laneq_s32(__p0_692, __p1_692, __p2_692) __extension__ ({ \
|
|
int64x2_t __ret_692; \
|
|
int32x2_t __s0_692 = __p0_692; \
|
|
int32x4_t __s1_692 = __p1_692; \
|
|
__ret_692 = vqdmull_s32(__s0_692, splat_laneq_s32(__s1_692, __p2_692)); \
|
|
__ret_692; \
|
|
})
|
|
#else
|
|
#define vqdmull_laneq_s32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
|
|
int64x2_t __ret_693; \
|
|
int32x2_t __s0_693 = __p0_693; \
|
|
int32x4_t __s1_693 = __p1_693; \
|
|
int32x2_t __rev0_693; __rev0_693 = __builtin_shufflevector(__s0_693, __s0_693, __lane_reverse_64_32); \
|
|
int32x4_t __rev1_693; __rev1_693 = __builtin_shufflevector(__s1_693, __s1_693, __lane_reverse_128_32); \
|
|
__ret_693 = __noswap_vqdmull_s32(__rev0_693, __noswap_splat_laneq_s32(__rev1_693, __p2_693)); \
|
|
__ret_693 = __builtin_shufflevector(__ret_693, __ret_693, __lane_reverse_128_64); \
|
|
__ret_693; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqdmull_laneq_s16(__p0_694, __p1_694, __p2_694) __extension__ ({ \
|
|
int32x4_t __ret_694; \
|
|
int16x4_t __s0_694 = __p0_694; \
|
|
int16x8_t __s1_694 = __p1_694; \
|
|
__ret_694 = vqdmull_s16(__s0_694, splat_laneq_s16(__s1_694, __p2_694)); \
|
|
__ret_694; \
|
|
})
|
|
#else
|
|
#define vqdmull_laneq_s16(__p0_695, __p1_695, __p2_695) __extension__ ({ \
|
|
int32x4_t __ret_695; \
|
|
int16x4_t __s0_695 = __p0_695; \
|
|
int16x8_t __s1_695 = __p1_695; \
|
|
int16x4_t __rev0_695; __rev0_695 = __builtin_shufflevector(__s0_695, __s0_695, __lane_reverse_64_16); \
|
|
int16x8_t __rev1_695; __rev1_695 = __builtin_shufflevector(__s1_695, __s1_695, __lane_reverse_128_16); \
|
|
__ret_695 = __noswap_vqdmull_s16(__rev0_695, __noswap_splat_laneq_s16(__rev1_695, __p2_695)); \
|
|
__ret_695 = __builtin_shufflevector(__ret_695, __ret_695, __lane_reverse_128_32); \
|
|
__ret_695; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int16_t vqmovns_s32(int32_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqmovns_s32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqmovnd_s64(int64_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqmovnd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqmovnh_s16(int16_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqmovnh_s16(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vqmovns_u32(uint32_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqmovns_u32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqmovnd_u64(uint64_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqmovnd_u64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8_t vqmovnh_u16(uint16_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqmovnh_u16(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vqmovn_u32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vqmovn_u64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vqmovn_u16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vqmovn_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vqmovn_s64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vqmovn_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint16_t vqmovuns_s32(int32_t __p0) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqmovuns_s32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqmovund_s64(int64_t __p0) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqmovund_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8_t vqmovunh_s16(int16_t __p0) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqmovunh_s16(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__builtin_bit_cast(uint16x4_t, __p0), vqmovun_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__builtin_bit_cast(uint16x4_t, __rev0), __noswap_vqmovun_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__builtin_bit_cast(uint32x2_t, __p0), vqmovun_s64(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__builtin_bit_cast(uint32x2_t, __rev0), __noswap_vqmovun_s64(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__builtin_bit_cast(uint8x8_t, __p0), vqmovun_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__builtin_bit_cast(uint8x8_t, __rev0), __noswap_vqmovun_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __p0), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vqnegq_v(__builtin_bit_cast(int8x16_t, __rev0), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vqneg_s64(int64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vqneg_v(__builtin_bit_cast(int8x8_t, __p0), 3));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqnegb_s8(int8_t __p0) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqnegb_s8(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqnegs_s32(int32_t __p0) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqnegs_s32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqnegd_s64(int64_t __p0) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqnegd_s64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqnegh_s16(int16_t __p0) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqnegh_s16(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqrdmulhs_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqrdmulhh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_lane_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x2_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_lane_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x4_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_lane_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhs_lane_s32(__p0_696, __p1_696, __p2_696) __extension__ ({ \
|
|
int32_t __ret_696; \
|
|
int32_t __s0_696 = __p0_696; \
|
|
int32x2_t __s1_696 = __p1_696; \
|
|
__ret_696 = vqrdmulhs_s32(__s0_696, vget_lane_s32(__s1_696, __p2_696)); \
|
|
__ret_696; \
|
|
})
|
|
#else
|
|
#define vqrdmulhs_lane_s32(__p0_697, __p1_697, __p2_697) __extension__ ({ \
|
|
int32_t __ret_697; \
|
|
int32_t __s0_697 = __p0_697; \
|
|
int32x2_t __s1_697 = __p1_697; \
|
|
int32x2_t __rev1_697; __rev1_697 = __builtin_shufflevector(__s1_697, __s1_697, __lane_reverse_64_32); \
|
|
__ret_697 = vqrdmulhs_s32(__s0_697, __noswap_vget_lane_s32(__rev1_697, __p2_697)); \
|
|
__ret_697; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhh_lane_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \
|
|
int16_t __ret_698; \
|
|
int16_t __s0_698 = __p0_698; \
|
|
int16x4_t __s1_698 = __p1_698; \
|
|
__ret_698 = vqrdmulhh_s16(__s0_698, vget_lane_s16(__s1_698, __p2_698)); \
|
|
__ret_698; \
|
|
})
|
|
#else
|
|
#define vqrdmulhh_lane_s16(__p0_699, __p1_699, __p2_699) __extension__ ({ \
|
|
int16_t __ret_699; \
|
|
int16_t __s0_699 = __p0_699; \
|
|
int16x4_t __s1_699 = __p1_699; \
|
|
int16x4_t __rev1_699; __rev1_699 = __builtin_shufflevector(__s1_699, __s1_699, __lane_reverse_64_16); \
|
|
__ret_699 = vqrdmulhh_s16(__s0_699, __noswap_vget_lane_s16(__rev1_699, __p2_699)); \
|
|
__ret_699; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhs_laneq_s32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
|
|
int32_t __ret_700; \
|
|
int32_t __s0_700 = __p0_700; \
|
|
int32x4_t __s1_700 = __p1_700; \
|
|
__ret_700 = vqrdmulhs_s32(__s0_700, vgetq_lane_s32(__s1_700, __p2_700)); \
|
|
__ret_700; \
|
|
})
|
|
#else
|
|
#define vqrdmulhs_laneq_s32(__p0_701, __p1_701, __p2_701) __extension__ ({ \
|
|
int32_t __ret_701; \
|
|
int32_t __s0_701 = __p0_701; \
|
|
int32x4_t __s1_701 = __p1_701; \
|
|
int32x4_t __rev1_701; __rev1_701 = __builtin_shufflevector(__s1_701, __s1_701, __lane_reverse_128_32); \
|
|
__ret_701 = vqrdmulhs_s32(__s0_701, __noswap_vgetq_lane_s32(__rev1_701, __p2_701)); \
|
|
__ret_701; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhh_laneq_s16(__p0_702, __p1_702, __p2_702) __extension__ ({ \
|
|
int16_t __ret_702; \
|
|
int16_t __s0_702 = __p0_702; \
|
|
int16x8_t __s1_702 = __p1_702; \
|
|
__ret_702 = vqrdmulhh_s16(__s0_702, vgetq_lane_s16(__s1_702, __p2_702)); \
|
|
__ret_702; \
|
|
})
|
|
#else
|
|
#define vqrdmulhh_laneq_s16(__p0_703, __p1_703, __p2_703) __extension__ ({ \
|
|
int16_t __ret_703; \
|
|
int16_t __s0_703 = __p0_703; \
|
|
int16x8_t __s1_703 = __p1_703; \
|
|
int16x8_t __rev1_703; __rev1_703 = __builtin_shufflevector(__s1_703, __s1_703, __lane_reverse_128_16); \
|
|
__ret_703 = vqrdmulhh_s16(__s0_703, __noswap_vgetq_lane_s16(__rev1_703, __p2_703)); \
|
|
__ret_703; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 34)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x4_t __ret; \
|
|
int32x4_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vqrdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 34)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 33)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x8_t __ret; \
|
|
int16x8_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vqrdmulhq_laneq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 33)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
|
|
int32x2_t __ret; \
|
|
int32x2_t __s0 = __p0; \
|
|
int32x4_t __s1 = __p1; \
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_32); \
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vqrdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 1)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
|
|
int16x4_t __ret; \
|
|
int16x4_t __s0 = __p0; \
|
|
int16x8_t __s1 = __p1; \
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_64_16); \
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_16); \
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vqrdmulh_laneq_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 1)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqrshlb_u8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqrshls_u32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vqrshld_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqrshlh_u16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqrshlb_s8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqrshls_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqrshld_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqrshlh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_u32(__p0_704, __p1_704, __p2_704) __extension__ ({ \
|
|
uint16x8_t __ret_704; \
|
|
uint16x4_t __s0_704 = __p0_704; \
|
|
uint32x4_t __s1_704 = __p1_704; \
|
|
__ret_704 = __builtin_bit_cast(uint16x8_t, vcombine_u16(__builtin_bit_cast(uint16x4_t, __s0_704), __builtin_bit_cast(uint16x4_t, vqrshrn_n_u32(__s1_704, __p2_704)))); \
|
|
__ret_704; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_u32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
|
|
uint16x8_t __ret_705; \
|
|
uint16x4_t __s0_705 = __p0_705; \
|
|
uint32x4_t __s1_705 = __p1_705; \
|
|
uint16x4_t __rev0_705; __rev0_705 = __builtin_shufflevector(__s0_705, __s0_705, __lane_reverse_64_16); \
|
|
uint32x4_t __rev1_705; __rev1_705 = __builtin_shufflevector(__s1_705, __s1_705, __lane_reverse_128_32); \
|
|
__ret_705 = __builtin_bit_cast(uint16x8_t, __noswap_vcombine_u16(__builtin_bit_cast(uint16x4_t, __rev0_705), __builtin_bit_cast(uint16x4_t, __noswap_vqrshrn_n_u32(__rev1_705, __p2_705)))); \
|
|
__ret_705 = __builtin_shufflevector(__ret_705, __ret_705, __lane_reverse_128_16); \
|
|
__ret_705; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_u64(__p0_706, __p1_706, __p2_706) __extension__ ({ \
|
|
uint32x4_t __ret_706; \
|
|
uint32x2_t __s0_706 = __p0_706; \
|
|
uint64x2_t __s1_706 = __p1_706; \
|
|
__ret_706 = __builtin_bit_cast(uint32x4_t, vcombine_u32(__builtin_bit_cast(uint32x2_t, __s0_706), __builtin_bit_cast(uint32x2_t, vqrshrn_n_u64(__s1_706, __p2_706)))); \
|
|
__ret_706; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_u64(__p0_707, __p1_707, __p2_707) __extension__ ({ \
|
|
uint32x4_t __ret_707; \
|
|
uint32x2_t __s0_707 = __p0_707; \
|
|
uint64x2_t __s1_707 = __p1_707; \
|
|
uint32x2_t __rev0_707; __rev0_707 = __builtin_shufflevector(__s0_707, __s0_707, __lane_reverse_64_32); \
|
|
uint64x2_t __rev1_707; __rev1_707 = __builtin_shufflevector(__s1_707, __s1_707, __lane_reverse_128_64); \
|
|
__ret_707 = __builtin_bit_cast(uint32x4_t, __noswap_vcombine_u32(__builtin_bit_cast(uint32x2_t, __rev0_707), __builtin_bit_cast(uint32x2_t, __noswap_vqrshrn_n_u64(__rev1_707, __p2_707)))); \
|
|
__ret_707 = __builtin_shufflevector(__ret_707, __ret_707, __lane_reverse_128_32); \
|
|
__ret_707; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_u16(__p0_708, __p1_708, __p2_708) __extension__ ({ \
|
|
uint8x16_t __ret_708; \
|
|
uint8x8_t __s0_708 = __p0_708; \
|
|
uint16x8_t __s1_708 = __p1_708; \
|
|
__ret_708 = __builtin_bit_cast(uint8x16_t, vcombine_u8(__builtin_bit_cast(uint8x8_t, __s0_708), __builtin_bit_cast(uint8x8_t, vqrshrn_n_u16(__s1_708, __p2_708)))); \
|
|
__ret_708; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_u16(__p0_709, __p1_709, __p2_709) __extension__ ({ \
|
|
uint8x16_t __ret_709; \
|
|
uint8x8_t __s0_709 = __p0_709; \
|
|
uint16x8_t __s1_709 = __p1_709; \
|
|
uint8x8_t __rev0_709; __rev0_709 = __builtin_shufflevector(__s0_709, __s0_709, __lane_reverse_64_8); \
|
|
uint16x8_t __rev1_709; __rev1_709 = __builtin_shufflevector(__s1_709, __s1_709, __lane_reverse_128_16); \
|
|
__ret_709 = __builtin_bit_cast(uint8x16_t, __noswap_vcombine_u8(__builtin_bit_cast(uint8x8_t, __rev0_709), __builtin_bit_cast(uint8x8_t, __noswap_vqrshrn_n_u16(__rev1_709, __p2_709)))); \
|
|
__ret_709 = __builtin_shufflevector(__ret_709, __ret_709, __lane_reverse_128_8); \
|
|
__ret_709; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_s32(__p0_710, __p1_710, __p2_710) __extension__ ({ \
|
|
int16x8_t __ret_710; \
|
|
int16x4_t __s0_710 = __p0_710; \
|
|
int32x4_t __s1_710 = __p1_710; \
|
|
__ret_710 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_710), __builtin_bit_cast(int16x4_t, vqrshrn_n_s32(__s1_710, __p2_710)))); \
|
|
__ret_710; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_s32(__p0_711, __p1_711, __p2_711) __extension__ ({ \
|
|
int16x8_t __ret_711; \
|
|
int16x4_t __s0_711 = __p0_711; \
|
|
int32x4_t __s1_711 = __p1_711; \
|
|
int16x4_t __rev0_711; __rev0_711 = __builtin_shufflevector(__s0_711, __s0_711, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_711; __rev1_711 = __builtin_shufflevector(__s1_711, __s1_711, __lane_reverse_128_32); \
|
|
__ret_711 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_711), __builtin_bit_cast(int16x4_t, __noswap_vqrshrn_n_s32(__rev1_711, __p2_711)))); \
|
|
__ret_711 = __builtin_shufflevector(__ret_711, __ret_711, __lane_reverse_128_16); \
|
|
__ret_711; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_s64(__p0_712, __p1_712, __p2_712) __extension__ ({ \
|
|
int32x4_t __ret_712; \
|
|
int32x2_t __s0_712 = __p0_712; \
|
|
int64x2_t __s1_712 = __p1_712; \
|
|
__ret_712 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_712), __builtin_bit_cast(int32x2_t, vqrshrn_n_s64(__s1_712, __p2_712)))); \
|
|
__ret_712; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_s64(__p0_713, __p1_713, __p2_713) __extension__ ({ \
|
|
int32x4_t __ret_713; \
|
|
int32x2_t __s0_713 = __p0_713; \
|
|
int64x2_t __s1_713 = __p1_713; \
|
|
int32x2_t __rev0_713; __rev0_713 = __builtin_shufflevector(__s0_713, __s0_713, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_713; __rev1_713 = __builtin_shufflevector(__s1_713, __s1_713, __lane_reverse_128_64); \
|
|
__ret_713 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_713), __builtin_bit_cast(int32x2_t, __noswap_vqrshrn_n_s64(__rev1_713, __p2_713)))); \
|
|
__ret_713 = __builtin_shufflevector(__ret_713, __ret_713, __lane_reverse_128_32); \
|
|
__ret_713; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrn_high_n_s16(__p0_714, __p1_714, __p2_714) __extension__ ({ \
|
|
int8x16_t __ret_714; \
|
|
int8x8_t __s0_714 = __p0_714; \
|
|
int16x8_t __s1_714 = __p1_714; \
|
|
__ret_714 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_714), __builtin_bit_cast(int8x8_t, vqrshrn_n_s16(__s1_714, __p2_714)))); \
|
|
__ret_714; \
|
|
})
|
|
#else
|
|
#define vqrshrn_high_n_s16(__p0_715, __p1_715, __p2_715) __extension__ ({ \
|
|
int8x16_t __ret_715; \
|
|
int8x8_t __s0_715 = __p0_715; \
|
|
int16x8_t __s1_715 = __p1_715; \
|
|
int8x8_t __rev0_715; __rev0_715 = __builtin_shufflevector(__s0_715, __s0_715, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_715; __rev1_715 = __builtin_shufflevector(__s1_715, __s1_715, __lane_reverse_128_16); \
|
|
__ret_715 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_715), __builtin_bit_cast(int8x8_t, __noswap_vqrshrn_n_s16(__rev1_715, __p2_715)))); \
|
|
__ret_715 = __builtin_shufflevector(__ret_715, __ret_715, __lane_reverse_128_8); \
|
|
__ret_715; \
|
|
})
|
|
#endif
|
|
|
|
#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqrshrns_n_u32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqrshrnd_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqrshrnh_n_u16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqrshrns_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqrshrnd_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqrshrnh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_high_n_s32(__p0_716, __p1_716, __p2_716) __extension__ ({ \
|
|
int16x8_t __ret_716; \
|
|
int16x4_t __s0_716 = __p0_716; \
|
|
int32x4_t __s1_716 = __p1_716; \
|
|
__ret_716 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_716), __builtin_bit_cast(int16x4_t, vqrshrun_n_s32(__s1_716, __p2_716)))); \
|
|
__ret_716; \
|
|
})
|
|
#else
|
|
#define vqrshrun_high_n_s32(__p0_717, __p1_717, __p2_717) __extension__ ({ \
|
|
int16x8_t __ret_717; \
|
|
int16x4_t __s0_717 = __p0_717; \
|
|
int32x4_t __s1_717 = __p1_717; \
|
|
int16x4_t __rev0_717; __rev0_717 = __builtin_shufflevector(__s0_717, __s0_717, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_717; __rev1_717 = __builtin_shufflevector(__s1_717, __s1_717, __lane_reverse_128_32); \
|
|
__ret_717 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_717), __builtin_bit_cast(int16x4_t, __noswap_vqrshrun_n_s32(__rev1_717, __p2_717)))); \
|
|
__ret_717 = __builtin_shufflevector(__ret_717, __ret_717, __lane_reverse_128_16); \
|
|
__ret_717; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_high_n_s64(__p0_718, __p1_718, __p2_718) __extension__ ({ \
|
|
int32x4_t __ret_718; \
|
|
int32x2_t __s0_718 = __p0_718; \
|
|
int64x2_t __s1_718 = __p1_718; \
|
|
__ret_718 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_718), __builtin_bit_cast(int32x2_t, vqrshrun_n_s64(__s1_718, __p2_718)))); \
|
|
__ret_718; \
|
|
})
|
|
#else
|
|
#define vqrshrun_high_n_s64(__p0_719, __p1_719, __p2_719) __extension__ ({ \
|
|
int32x4_t __ret_719; \
|
|
int32x2_t __s0_719 = __p0_719; \
|
|
int64x2_t __s1_719 = __p1_719; \
|
|
int32x2_t __rev0_719; __rev0_719 = __builtin_shufflevector(__s0_719, __s0_719, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_719; __rev1_719 = __builtin_shufflevector(__s1_719, __s1_719, __lane_reverse_128_64); \
|
|
__ret_719 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_719), __builtin_bit_cast(int32x2_t, __noswap_vqrshrun_n_s64(__rev1_719, __p2_719)))); \
|
|
__ret_719 = __builtin_shufflevector(__ret_719, __ret_719, __lane_reverse_128_32); \
|
|
__ret_719; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrshrun_high_n_s16(__p0_720, __p1_720, __p2_720) __extension__ ({ \
|
|
int8x16_t __ret_720; \
|
|
int8x8_t __s0_720 = __p0_720; \
|
|
int16x8_t __s1_720 = __p1_720; \
|
|
__ret_720 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_720), __builtin_bit_cast(int8x8_t, vqrshrun_n_s16(__s1_720, __p2_720)))); \
|
|
__ret_720; \
|
|
})
|
|
#else
|
|
#define vqrshrun_high_n_s16(__p0_721, __p1_721, __p2_721) __extension__ ({ \
|
|
int8x16_t __ret_721; \
|
|
int8x8_t __s0_721 = __p0_721; \
|
|
int16x8_t __s1_721 = __p1_721; \
|
|
int8x8_t __rev0_721; __rev0_721 = __builtin_shufflevector(__s0_721, __s0_721, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_721; __rev1_721 = __builtin_shufflevector(__s1_721, __s1_721, __lane_reverse_128_16); \
|
|
__ret_721 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_721), __builtin_bit_cast(int8x8_t, __noswap_vqrshrun_n_s16(__rev1_721, __p2_721)))); \
|
|
__ret_721 = __builtin_shufflevector(__ret_721, __ret_721, __lane_reverse_128_8); \
|
|
__ret_721; \
|
|
})
|
|
#endif
|
|
|
|
#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqrshruns_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqrshrund_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqrshrunh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("neon"))) uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqshlb_u8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqshls_u32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vqshld_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqshlh_u16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqshlb_s8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqshls_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqshld_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqshlh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqshlb_n_u8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqshls_n_u32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vqshld_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqshlh_n_u16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqshlb_n_s8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqshls_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqshld_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqshlh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int8_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqshlub_n_s8(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqshlus_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqshlud_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqshluh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_u32(__p0_722, __p1_722, __p2_722) __extension__ ({ \
|
|
uint16x8_t __ret_722; \
|
|
uint16x4_t __s0_722 = __p0_722; \
|
|
uint32x4_t __s1_722 = __p1_722; \
|
|
__ret_722 = __builtin_bit_cast(uint16x8_t, vcombine_u16(__builtin_bit_cast(uint16x4_t, __s0_722), __builtin_bit_cast(uint16x4_t, vqshrn_n_u32(__s1_722, __p2_722)))); \
|
|
__ret_722; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_u32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
|
|
uint16x8_t __ret_723; \
|
|
uint16x4_t __s0_723 = __p0_723; \
|
|
uint32x4_t __s1_723 = __p1_723; \
|
|
uint16x4_t __rev0_723; __rev0_723 = __builtin_shufflevector(__s0_723, __s0_723, __lane_reverse_64_16); \
|
|
uint32x4_t __rev1_723; __rev1_723 = __builtin_shufflevector(__s1_723, __s1_723, __lane_reverse_128_32); \
|
|
__ret_723 = __builtin_bit_cast(uint16x8_t, __noswap_vcombine_u16(__builtin_bit_cast(uint16x4_t, __rev0_723), __builtin_bit_cast(uint16x4_t, __noswap_vqshrn_n_u32(__rev1_723, __p2_723)))); \
|
|
__ret_723 = __builtin_shufflevector(__ret_723, __ret_723, __lane_reverse_128_16); \
|
|
__ret_723; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_u64(__p0_724, __p1_724, __p2_724) __extension__ ({ \
|
|
uint32x4_t __ret_724; \
|
|
uint32x2_t __s0_724 = __p0_724; \
|
|
uint64x2_t __s1_724 = __p1_724; \
|
|
__ret_724 = __builtin_bit_cast(uint32x4_t, vcombine_u32(__builtin_bit_cast(uint32x2_t, __s0_724), __builtin_bit_cast(uint32x2_t, vqshrn_n_u64(__s1_724, __p2_724)))); \
|
|
__ret_724; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_u64(__p0_725, __p1_725, __p2_725) __extension__ ({ \
|
|
uint32x4_t __ret_725; \
|
|
uint32x2_t __s0_725 = __p0_725; \
|
|
uint64x2_t __s1_725 = __p1_725; \
|
|
uint32x2_t __rev0_725; __rev0_725 = __builtin_shufflevector(__s0_725, __s0_725, __lane_reverse_64_32); \
|
|
uint64x2_t __rev1_725; __rev1_725 = __builtin_shufflevector(__s1_725, __s1_725, __lane_reverse_128_64); \
|
|
__ret_725 = __builtin_bit_cast(uint32x4_t, __noswap_vcombine_u32(__builtin_bit_cast(uint32x2_t, __rev0_725), __builtin_bit_cast(uint32x2_t, __noswap_vqshrn_n_u64(__rev1_725, __p2_725)))); \
|
|
__ret_725 = __builtin_shufflevector(__ret_725, __ret_725, __lane_reverse_128_32); \
|
|
__ret_725; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_u16(__p0_726, __p1_726, __p2_726) __extension__ ({ \
|
|
uint8x16_t __ret_726; \
|
|
uint8x8_t __s0_726 = __p0_726; \
|
|
uint16x8_t __s1_726 = __p1_726; \
|
|
__ret_726 = __builtin_bit_cast(uint8x16_t, vcombine_u8(__builtin_bit_cast(uint8x8_t, __s0_726), __builtin_bit_cast(uint8x8_t, vqshrn_n_u16(__s1_726, __p2_726)))); \
|
|
__ret_726; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_u16(__p0_727, __p1_727, __p2_727) __extension__ ({ \
|
|
uint8x16_t __ret_727; \
|
|
uint8x8_t __s0_727 = __p0_727; \
|
|
uint16x8_t __s1_727 = __p1_727; \
|
|
uint8x8_t __rev0_727; __rev0_727 = __builtin_shufflevector(__s0_727, __s0_727, __lane_reverse_64_8); \
|
|
uint16x8_t __rev1_727; __rev1_727 = __builtin_shufflevector(__s1_727, __s1_727, __lane_reverse_128_16); \
|
|
__ret_727 = __builtin_bit_cast(uint8x16_t, __noswap_vcombine_u8(__builtin_bit_cast(uint8x8_t, __rev0_727), __builtin_bit_cast(uint8x8_t, __noswap_vqshrn_n_u16(__rev1_727, __p2_727)))); \
|
|
__ret_727 = __builtin_shufflevector(__ret_727, __ret_727, __lane_reverse_128_8); \
|
|
__ret_727; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_s32(__p0_728, __p1_728, __p2_728) __extension__ ({ \
|
|
int16x8_t __ret_728; \
|
|
int16x4_t __s0_728 = __p0_728; \
|
|
int32x4_t __s1_728 = __p1_728; \
|
|
__ret_728 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_728), __builtin_bit_cast(int16x4_t, vqshrn_n_s32(__s1_728, __p2_728)))); \
|
|
__ret_728; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_s32(__p0_729, __p1_729, __p2_729) __extension__ ({ \
|
|
int16x8_t __ret_729; \
|
|
int16x4_t __s0_729 = __p0_729; \
|
|
int32x4_t __s1_729 = __p1_729; \
|
|
int16x4_t __rev0_729; __rev0_729 = __builtin_shufflevector(__s0_729, __s0_729, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_729; __rev1_729 = __builtin_shufflevector(__s1_729, __s1_729, __lane_reverse_128_32); \
|
|
__ret_729 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_729), __builtin_bit_cast(int16x4_t, __noswap_vqshrn_n_s32(__rev1_729, __p2_729)))); \
|
|
__ret_729 = __builtin_shufflevector(__ret_729, __ret_729, __lane_reverse_128_16); \
|
|
__ret_729; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_s64(__p0_730, __p1_730, __p2_730) __extension__ ({ \
|
|
int32x4_t __ret_730; \
|
|
int32x2_t __s0_730 = __p0_730; \
|
|
int64x2_t __s1_730 = __p1_730; \
|
|
__ret_730 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_730), __builtin_bit_cast(int32x2_t, vqshrn_n_s64(__s1_730, __p2_730)))); \
|
|
__ret_730; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_s64(__p0_731, __p1_731, __p2_731) __extension__ ({ \
|
|
int32x4_t __ret_731; \
|
|
int32x2_t __s0_731 = __p0_731; \
|
|
int64x2_t __s1_731 = __p1_731; \
|
|
int32x2_t __rev0_731; __rev0_731 = __builtin_shufflevector(__s0_731, __s0_731, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_731; __rev1_731 = __builtin_shufflevector(__s1_731, __s1_731, __lane_reverse_128_64); \
|
|
__ret_731 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_731), __builtin_bit_cast(int32x2_t, __noswap_vqshrn_n_s64(__rev1_731, __p2_731)))); \
|
|
__ret_731 = __builtin_shufflevector(__ret_731, __ret_731, __lane_reverse_128_32); \
|
|
__ret_731; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrn_high_n_s16(__p0_732, __p1_732, __p2_732) __extension__ ({ \
|
|
int8x16_t __ret_732; \
|
|
int8x8_t __s0_732 = __p0_732; \
|
|
int16x8_t __s1_732 = __p1_732; \
|
|
__ret_732 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_732), __builtin_bit_cast(int8x8_t, vqshrn_n_s16(__s1_732, __p2_732)))); \
|
|
__ret_732; \
|
|
})
|
|
#else
|
|
#define vqshrn_high_n_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
|
|
int8x16_t __ret_733; \
|
|
int8x8_t __s0_733 = __p0_733; \
|
|
int16x8_t __s1_733 = __p1_733; \
|
|
int8x8_t __rev0_733; __rev0_733 = __builtin_shufflevector(__s0_733, __s0_733, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_733; __rev1_733 = __builtin_shufflevector(__s1_733, __s1_733, __lane_reverse_128_16); \
|
|
__ret_733 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_733), __builtin_bit_cast(int8x8_t, __noswap_vqshrn_n_s16(__rev1_733, __p2_733)))); \
|
|
__ret_733 = __builtin_shufflevector(__ret_733, __ret_733, __lane_reverse_128_8); \
|
|
__ret_733; \
|
|
})
|
|
#endif
|
|
|
|
#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
uint32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqshrns_n_u32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqshrnd_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
uint16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqshrnh_n_u16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
|
|
int16_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqshrns_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
|
|
int32_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqshrnd_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
|
|
int8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqshrnh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_high_n_s32(__p0_734, __p1_734, __p2_734) __extension__ ({ \
|
|
int16x8_t __ret_734; \
|
|
int16x4_t __s0_734 = __p0_734; \
|
|
int32x4_t __s1_734 = __p1_734; \
|
|
__ret_734 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_734), __builtin_bit_cast(int16x4_t, vqshrun_n_s32(__s1_734, __p2_734)))); \
|
|
__ret_734; \
|
|
})
|
|
#else
|
|
#define vqshrun_high_n_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \
|
|
int16x8_t __ret_735; \
|
|
int16x4_t __s0_735 = __p0_735; \
|
|
int32x4_t __s1_735 = __p1_735; \
|
|
int16x4_t __rev0_735; __rev0_735 = __builtin_shufflevector(__s0_735, __s0_735, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_735; __rev1_735 = __builtin_shufflevector(__s1_735, __s1_735, __lane_reverse_128_32); \
|
|
__ret_735 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_735), __builtin_bit_cast(int16x4_t, __noswap_vqshrun_n_s32(__rev1_735, __p2_735)))); \
|
|
__ret_735 = __builtin_shufflevector(__ret_735, __ret_735, __lane_reverse_128_16); \
|
|
__ret_735; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_high_n_s64(__p0_736, __p1_736, __p2_736) __extension__ ({ \
|
|
int32x4_t __ret_736; \
|
|
int32x2_t __s0_736 = __p0_736; \
|
|
int64x2_t __s1_736 = __p1_736; \
|
|
__ret_736 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_736), __builtin_bit_cast(int32x2_t, vqshrun_n_s64(__s1_736, __p2_736)))); \
|
|
__ret_736; \
|
|
})
|
|
#else
|
|
#define vqshrun_high_n_s64(__p0_737, __p1_737, __p2_737) __extension__ ({ \
|
|
int32x4_t __ret_737; \
|
|
int32x2_t __s0_737 = __p0_737; \
|
|
int64x2_t __s1_737 = __p1_737; \
|
|
int32x2_t __rev0_737; __rev0_737 = __builtin_shufflevector(__s0_737, __s0_737, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_737; __rev1_737 = __builtin_shufflevector(__s1_737, __s1_737, __lane_reverse_128_64); \
|
|
__ret_737 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_737), __builtin_bit_cast(int32x2_t, __noswap_vqshrun_n_s64(__rev1_737, __p2_737)))); \
|
|
__ret_737 = __builtin_shufflevector(__ret_737, __ret_737, __lane_reverse_128_32); \
|
|
__ret_737; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqshrun_high_n_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \
|
|
int8x16_t __ret_738; \
|
|
int8x8_t __s0_738 = __p0_738; \
|
|
int16x8_t __s1_738 = __p1_738; \
|
|
__ret_738 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_738), __builtin_bit_cast(int8x8_t, vqshrun_n_s16(__s1_738, __p2_738)))); \
|
|
__ret_738; \
|
|
})
|
|
#else
|
|
#define vqshrun_high_n_s16(__p0_739, __p1_739, __p2_739) __extension__ ({ \
|
|
int8x16_t __ret_739; \
|
|
int8x8_t __s0_739 = __p0_739; \
|
|
int16x8_t __s1_739 = __p1_739; \
|
|
int8x8_t __rev0_739; __rev0_739 = __builtin_shufflevector(__s0_739, __s0_739, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_739; __rev1_739 = __builtin_shufflevector(__s1_739, __s1_739, __lane_reverse_128_16); \
|
|
__ret_739 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_739), __builtin_bit_cast(int8x8_t, __noswap_vqshrun_n_s16(__rev1_739, __p2_739)))); \
|
|
__ret_739 = __builtin_shufflevector(__ret_739, __ret_739, __lane_reverse_128_8); \
|
|
__ret_739; \
|
|
})
|
|
#endif
|
|
|
|
#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
|
|
uint16_t __ret; \
|
|
int32_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqshruns_n_s32(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
|
|
uint32_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqshrund_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
|
|
uint8_t __ret; \
|
|
int16_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqshrunh_n_s16(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("neon"))) uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vqsubb_u8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vqsubs_u32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vqsubd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vqsubh_u16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vqsubb_s8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqsubs_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vqsubd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqsubh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl1q_mf8(mfloat8x16_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl1q_mf8(mfloat8x16_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl1_mf8(mfloat8x16_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl1_mf8(mfloat8x16_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl1_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl2q_mf8(mfloat8x16x2_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p1), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl2q_mf8(mfloat8x16x2_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl2q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev1), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl2_mf8(mfloat8x16x2_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl2_mf8(mfloat8x16x2_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16x2_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl2_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl3q_mf8(mfloat8x16x3_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p1), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl3q_mf8(mfloat8x16x3_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl3q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev1), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl3_mf8(mfloat8x16x3_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl3_mf8(mfloat8x16x3_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16x3_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl3_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x16_t, __p1), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x16_t, __rev1), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl4q_mf8(mfloat8x16x4_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x16_t, __p1), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbl4q_mf8(mfloat8x16x4_t __p0, uint8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbl4q_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x16_t, __rev1), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl4_mf8(mfloat8x16x4_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __p0.val[0]), __builtin_bit_cast(int8x16_t, __p0.val[1]), __builtin_bit_cast(int8x16_t, __p0.val[2]), __builtin_bit_cast(int8x16_t, __p0.val[3]), __builtin_bit_cast(int8x8_t, __p1), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbl4_mf8(mfloat8x16x4_t __p0, uint8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x16x4_t __rev0;
|
|
__rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], __lane_reverse_128_8);
|
|
__rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], __lane_reverse_128_8);
|
|
__rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], __lane_reverse_128_8);
|
|
__rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbl4_v(__builtin_bit_cast(int8x16_t, __rev0.val[0]), __builtin_bit_cast(int8x16_t, __rev0.val[1]), __builtin_bit_cast(int8x16_t, __rev0.val[2]), __builtin_bit_cast(int8x16_t, __rev0.val[3]), __builtin_bit_cast(int8x8_t, __rev1), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx1q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx1_mf8(mfloat8x8_t __p0, mfloat8x16_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx1_mf8(mfloat8x8_t __p0, mfloat8x16_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx1_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p2), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev2), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx2q_mf8(mfloat8x16_t __p0, mfloat8x16x2_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p2), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx2q_mf8(mfloat8x16_t __p0, mfloat8x16x2_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx2q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev2), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx2_mf8(mfloat8x8_t __p0, mfloat8x16x2_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx2_mf8(mfloat8x8_t __p0, mfloat8x16x2_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x16x2_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx2_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p2), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev2), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx3q_mf8(mfloat8x16_t __p0, mfloat8x16x3_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p2), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx3q_mf8(mfloat8x16_t __p0, mfloat8x16x3_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx3q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev2), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx3_mf8(mfloat8x8_t __p0, mfloat8x16x3_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx3_mf8(mfloat8x8_t __p0, mfloat8x16x3_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x16x3_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx3_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x16_t, __p2), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x16_t, __rev2), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx4q_mf8(mfloat8x16_t __p0, mfloat8x16x4_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x16_t, __p2), 44));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vqtbx4q_mf8(mfloat8x16_t __p0, mfloat8x16x4_t __p1, uint8x16_t __p2) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __builtin_neon_vqtbx4q_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x16_t, __rev2), 44));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx4_mf8(mfloat8x8_t __p0, mfloat8x16x4_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x16_t, __p1.val[0]), __builtin_bit_cast(int8x16_t, __p1.val[1]), __builtin_bit_cast(int8x16_t, __p1.val[2]), __builtin_bit_cast(int8x16_t, __p1.val[3]), __builtin_bit_cast(int8x8_t, __p2), 12));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vqtbx4_mf8(mfloat8x8_t __p0, mfloat8x16x4_t __p1, uint8x8_t __p2) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x16x4_t __rev1;
|
|
__rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], __lane_reverse_128_8);
|
|
__rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], __lane_reverse_128_8);
|
|
__rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], __lane_reverse_128_8);
|
|
__rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], __lane_reverse_128_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __builtin_neon_vqtbx4_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __builtin_bit_cast(int8x8_t, __rev2), 12));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __p0), 4));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(poly8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __rev0), 4));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __p0), 36));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(poly8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __rev0), 36));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __p0), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __rev0), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __p0), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vrbitq_v(__builtin_bit_cast(int8x16_t, __rev0), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __p0), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __rev0), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __p0), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vrbit_v(__builtin_bit_cast(int8x8_t, __rev0), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrecpeq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrecpe_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrecpe_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vrecped_f64(float64_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vrecped_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vrecpes_f32(float32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrecpes_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrecpsq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrecpsq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrecps_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vrecpsd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrecpss_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vrecpxd_f64(float64_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vrecpxd_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vrecpxs_f32(float32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrecpxs_f32(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_mf8(mfloat8x8_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_mf8(mfloat8x8_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
|
|
poly64x1_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_mf8(mfloat8x8_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_mf8(mfloat8x16_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_bit_cast(poly8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_mf8(mfloat8x16_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
|
|
poly128_t __ret;
|
|
__ret = __builtin_bit_cast(poly128_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_mf8(mfloat8x16_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_bit_cast(poly64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_mf8(mfloat8x16_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_bit_cast(poly16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_mf8(mfloat8x16_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_mf8(mfloat8x16_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_mf8(mfloat8x16_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_mf8(mfloat8x16_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_mf8(mfloat8x16_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_mf8(mfloat8x16_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_mf8(mfloat8x16_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_mf8(mfloat8x16_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_bit_cast(float16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_mf8(mfloat8x16_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_mf8(mfloat8x16_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_p8(poly8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_p128(poly128_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_p64(poly64x2_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_p16(poly16x8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_u8(uint8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_u32(uint32x4_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_u64(uint64x2_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_u16(uint16x8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_s8(int8x16_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_f64(float64x2_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_f32(float32x4_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_f16(float16x8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_s32(int32x4_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_s64(int64x2_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vreinterpretq_mf8_s16(int16x8_t __p0) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x16_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_mf8(mfloat8x16_t __p0) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_mf8(mfloat8x8_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_mf8(mfloat8x8_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_mf8(mfloat8x8_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_mf8(mfloat8x8_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_mf8(mfloat8x8_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_mf8(mfloat8x8_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_mf8(mfloat8x8_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_mf8(mfloat8x8_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_bit_cast(float16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_mf8(mfloat8x8_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_mf8(mfloat8x8_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_p8(poly8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_p64(poly64x1_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_p16(poly16x4_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_u8(uint8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_u32(uint32x2_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_u64(uint64x1_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_u16(uint16x4_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_s8(int8x8_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_f64(float64x1_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_f32(float32x2_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_f16(float16x4_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_s32(int32x2_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_s64(int64x1_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vreinterpret_mf8_s16(int16x4_t __p0) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_bit_cast(mfloat8x8_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_mf8(mfloat8x8_t __p0) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __p0);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vrshld_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vrshld_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vrshrd_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vrshrd_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_u32(__p0_740, __p1_740, __p2_740) __extension__ ({ \
|
|
uint16x8_t __ret_740; \
|
|
uint16x4_t __s0_740 = __p0_740; \
|
|
uint32x4_t __s1_740 = __p1_740; \
|
|
__ret_740 = __builtin_bit_cast(uint16x8_t, vcombine_u16(__builtin_bit_cast(uint16x4_t, __s0_740), __builtin_bit_cast(uint16x4_t, vrshrn_n_u32(__s1_740, __p2_740)))); \
|
|
__ret_740; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_u32(__p0_741, __p1_741, __p2_741) __extension__ ({ \
|
|
uint16x8_t __ret_741; \
|
|
uint16x4_t __s0_741 = __p0_741; \
|
|
uint32x4_t __s1_741 = __p1_741; \
|
|
uint16x4_t __rev0_741; __rev0_741 = __builtin_shufflevector(__s0_741, __s0_741, __lane_reverse_64_16); \
|
|
uint32x4_t __rev1_741; __rev1_741 = __builtin_shufflevector(__s1_741, __s1_741, __lane_reverse_128_32); \
|
|
__ret_741 = __builtin_bit_cast(uint16x8_t, __noswap_vcombine_u16(__builtin_bit_cast(uint16x4_t, __rev0_741), __builtin_bit_cast(uint16x4_t, __noswap_vrshrn_n_u32(__rev1_741, __p2_741)))); \
|
|
__ret_741 = __builtin_shufflevector(__ret_741, __ret_741, __lane_reverse_128_16); \
|
|
__ret_741; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_u64(__p0_742, __p1_742, __p2_742) __extension__ ({ \
|
|
uint32x4_t __ret_742; \
|
|
uint32x2_t __s0_742 = __p0_742; \
|
|
uint64x2_t __s1_742 = __p1_742; \
|
|
__ret_742 = __builtin_bit_cast(uint32x4_t, vcombine_u32(__builtin_bit_cast(uint32x2_t, __s0_742), __builtin_bit_cast(uint32x2_t, vrshrn_n_u64(__s1_742, __p2_742)))); \
|
|
__ret_742; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_u64(__p0_743, __p1_743, __p2_743) __extension__ ({ \
|
|
uint32x4_t __ret_743; \
|
|
uint32x2_t __s0_743 = __p0_743; \
|
|
uint64x2_t __s1_743 = __p1_743; \
|
|
uint32x2_t __rev0_743; __rev0_743 = __builtin_shufflevector(__s0_743, __s0_743, __lane_reverse_64_32); \
|
|
uint64x2_t __rev1_743; __rev1_743 = __builtin_shufflevector(__s1_743, __s1_743, __lane_reverse_128_64); \
|
|
__ret_743 = __builtin_bit_cast(uint32x4_t, __noswap_vcombine_u32(__builtin_bit_cast(uint32x2_t, __rev0_743), __builtin_bit_cast(uint32x2_t, __noswap_vrshrn_n_u64(__rev1_743, __p2_743)))); \
|
|
__ret_743 = __builtin_shufflevector(__ret_743, __ret_743, __lane_reverse_128_32); \
|
|
__ret_743; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_u16(__p0_744, __p1_744, __p2_744) __extension__ ({ \
|
|
uint8x16_t __ret_744; \
|
|
uint8x8_t __s0_744 = __p0_744; \
|
|
uint16x8_t __s1_744 = __p1_744; \
|
|
__ret_744 = __builtin_bit_cast(uint8x16_t, vcombine_u8(__builtin_bit_cast(uint8x8_t, __s0_744), __builtin_bit_cast(uint8x8_t, vrshrn_n_u16(__s1_744, __p2_744)))); \
|
|
__ret_744; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_u16(__p0_745, __p1_745, __p2_745) __extension__ ({ \
|
|
uint8x16_t __ret_745; \
|
|
uint8x8_t __s0_745 = __p0_745; \
|
|
uint16x8_t __s1_745 = __p1_745; \
|
|
uint8x8_t __rev0_745; __rev0_745 = __builtin_shufflevector(__s0_745, __s0_745, __lane_reverse_64_8); \
|
|
uint16x8_t __rev1_745; __rev1_745 = __builtin_shufflevector(__s1_745, __s1_745, __lane_reverse_128_16); \
|
|
__ret_745 = __builtin_bit_cast(uint8x16_t, __noswap_vcombine_u8(__builtin_bit_cast(uint8x8_t, __rev0_745), __builtin_bit_cast(uint8x8_t, __noswap_vrshrn_n_u16(__rev1_745, __p2_745)))); \
|
|
__ret_745 = __builtin_shufflevector(__ret_745, __ret_745, __lane_reverse_128_8); \
|
|
__ret_745; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_s32(__p0_746, __p1_746, __p2_746) __extension__ ({ \
|
|
int16x8_t __ret_746; \
|
|
int16x4_t __s0_746 = __p0_746; \
|
|
int32x4_t __s1_746 = __p1_746; \
|
|
__ret_746 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_746), __builtin_bit_cast(int16x4_t, vrshrn_n_s32(__s1_746, __p2_746)))); \
|
|
__ret_746; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \
|
|
int16x8_t __ret_747; \
|
|
int16x4_t __s0_747 = __p0_747; \
|
|
int32x4_t __s1_747 = __p1_747; \
|
|
int16x4_t __rev0_747; __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_747; __rev1_747 = __builtin_shufflevector(__s1_747, __s1_747, __lane_reverse_128_32); \
|
|
__ret_747 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_747), __builtin_bit_cast(int16x4_t, __noswap_vrshrn_n_s32(__rev1_747, __p2_747)))); \
|
|
__ret_747 = __builtin_shufflevector(__ret_747, __ret_747, __lane_reverse_128_16); \
|
|
__ret_747; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_s64(__p0_748, __p1_748, __p2_748) __extension__ ({ \
|
|
int32x4_t __ret_748; \
|
|
int32x2_t __s0_748 = __p0_748; \
|
|
int64x2_t __s1_748 = __p1_748; \
|
|
__ret_748 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_748), __builtin_bit_cast(int32x2_t, vrshrn_n_s64(__s1_748, __p2_748)))); \
|
|
__ret_748; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_s64(__p0_749, __p1_749, __p2_749) __extension__ ({ \
|
|
int32x4_t __ret_749; \
|
|
int32x2_t __s0_749 = __p0_749; \
|
|
int64x2_t __s1_749 = __p1_749; \
|
|
int32x2_t __rev0_749; __rev0_749 = __builtin_shufflevector(__s0_749, __s0_749, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_749; __rev1_749 = __builtin_shufflevector(__s1_749, __s1_749, __lane_reverse_128_64); \
|
|
__ret_749 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_749), __builtin_bit_cast(int32x2_t, __noswap_vrshrn_n_s64(__rev1_749, __p2_749)))); \
|
|
__ret_749 = __builtin_shufflevector(__ret_749, __ret_749, __lane_reverse_128_32); \
|
|
__ret_749; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vrshrn_high_n_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \
|
|
int8x16_t __ret_750; \
|
|
int8x8_t __s0_750 = __p0_750; \
|
|
int16x8_t __s1_750 = __p1_750; \
|
|
__ret_750 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_750), __builtin_bit_cast(int8x8_t, vrshrn_n_s16(__s1_750, __p2_750)))); \
|
|
__ret_750; \
|
|
})
|
|
#else
|
|
#define vrshrn_high_n_s16(__p0_751, __p1_751, __p2_751) __extension__ ({ \
|
|
int8x16_t __ret_751; \
|
|
int8x8_t __s0_751 = __p0_751; \
|
|
int16x8_t __s1_751 = __p1_751; \
|
|
int8x8_t __rev0_751; __rev0_751 = __builtin_shufflevector(__s0_751, __s0_751, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_751; __rev1_751 = __builtin_shufflevector(__s1_751, __s1_751, __lane_reverse_128_16); \
|
|
__ret_751 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_751), __builtin_bit_cast(int8x8_t, __noswap_vrshrn_n_s16(__rev1_751, __p2_751)))); \
|
|
__ret_751 = __builtin_shufflevector(__ret_751, __ret_751, __lane_reverse_128_8); \
|
|
__ret_751; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrsqrteq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrsqrte_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrsqrte_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vrsqrted_f64(float64_t __p0) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vrsqrted_f64(__p0));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vrsqrtes_f32(float32_t __p0) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrsqrtes_f32(__p0));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrsqrtsq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrsqrtsq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrsqrts_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 10));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
|
|
float64_t __ret;
|
|
__ret = __builtin_bit_cast(float64_t, __builtin_neon_vrsqrtsd_f64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
|
|
float32_t __ret;
|
|
__ret = __builtin_bit_cast(float32_t, __builtin_neon_vrsqrtss_f32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64_t __s0 = __p0; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vset_lane_i64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsetq_lane_i64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vsetq_lane_f64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vsetq_lane_f64(__s0, __rev1, __p2)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vsetq_lane_f64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64_t __s0 = __p0; \
|
|
float64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vset_lane_f64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
__ai __attribute__((target("neon"))) uint64_t vshld_u64(uint64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vshld_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vshld_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vshld_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#define vshld_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vshld_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vshld_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vshld_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_u8(__p0_752, __p1_752) __extension__ ({ \
|
|
uint16x8_t __ret_752; \
|
|
uint8x16_t __s0_752 = __p0_752; \
|
|
__ret_752 = __builtin_bit_cast(uint16x8_t, vshll_n_u8(vget_high_u8(__s0_752), __p1_752)); \
|
|
__ret_752; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_u8(__p0_753, __p1_753) __extension__ ({ \
|
|
uint16x8_t __ret_753; \
|
|
uint8x16_t __s0_753 = __p0_753; \
|
|
uint8x16_t __rev0_753; __rev0_753 = __builtin_shufflevector(__s0_753, __s0_753, __lane_reverse_128_8); \
|
|
__ret_753 = __builtin_bit_cast(uint16x8_t, __noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_753), __p1_753)); \
|
|
__ret_753 = __builtin_shufflevector(__ret_753, __ret_753, __lane_reverse_128_16); \
|
|
__ret_753; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_u32(__p0_754, __p1_754) __extension__ ({ \
|
|
uint64x2_t __ret_754; \
|
|
uint32x4_t __s0_754 = __p0_754; \
|
|
__ret_754 = __builtin_bit_cast(uint64x2_t, vshll_n_u32(vget_high_u32(__s0_754), __p1_754)); \
|
|
__ret_754; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_u32(__p0_755, __p1_755) __extension__ ({ \
|
|
uint64x2_t __ret_755; \
|
|
uint32x4_t __s0_755 = __p0_755; \
|
|
uint32x4_t __rev0_755; __rev0_755 = __builtin_shufflevector(__s0_755, __s0_755, __lane_reverse_128_32); \
|
|
__ret_755 = __builtin_bit_cast(uint64x2_t, __noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_755), __p1_755)); \
|
|
__ret_755 = __builtin_shufflevector(__ret_755, __ret_755, __lane_reverse_128_64); \
|
|
__ret_755; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_u16(__p0_756, __p1_756) __extension__ ({ \
|
|
uint32x4_t __ret_756; \
|
|
uint16x8_t __s0_756 = __p0_756; \
|
|
__ret_756 = __builtin_bit_cast(uint32x4_t, vshll_n_u16(vget_high_u16(__s0_756), __p1_756)); \
|
|
__ret_756; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_u16(__p0_757, __p1_757) __extension__ ({ \
|
|
uint32x4_t __ret_757; \
|
|
uint16x8_t __s0_757 = __p0_757; \
|
|
uint16x8_t __rev0_757; __rev0_757 = __builtin_shufflevector(__s0_757, __s0_757, __lane_reverse_128_16); \
|
|
__ret_757 = __builtin_bit_cast(uint32x4_t, __noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_757), __p1_757)); \
|
|
__ret_757 = __builtin_shufflevector(__ret_757, __ret_757, __lane_reverse_128_32); \
|
|
__ret_757; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_s8(__p0_758, __p1_758) __extension__ ({ \
|
|
int16x8_t __ret_758; \
|
|
int8x16_t __s0_758 = __p0_758; \
|
|
__ret_758 = __builtin_bit_cast(int16x8_t, vshll_n_s8(vget_high_s8(__s0_758), __p1_758)); \
|
|
__ret_758; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_s8(__p0_759, __p1_759) __extension__ ({ \
|
|
int16x8_t __ret_759; \
|
|
int8x16_t __s0_759 = __p0_759; \
|
|
int8x16_t __rev0_759; __rev0_759 = __builtin_shufflevector(__s0_759, __s0_759, __lane_reverse_128_8); \
|
|
__ret_759 = __builtin_bit_cast(int16x8_t, __noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_759), __p1_759)); \
|
|
__ret_759 = __builtin_shufflevector(__ret_759, __ret_759, __lane_reverse_128_16); \
|
|
__ret_759; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_s32(__p0_760, __p1_760) __extension__ ({ \
|
|
int64x2_t __ret_760; \
|
|
int32x4_t __s0_760 = __p0_760; \
|
|
__ret_760 = __builtin_bit_cast(int64x2_t, vshll_n_s32(vget_high_s32(__s0_760), __p1_760)); \
|
|
__ret_760; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_s32(__p0_761, __p1_761) __extension__ ({ \
|
|
int64x2_t __ret_761; \
|
|
int32x4_t __s0_761 = __p0_761; \
|
|
int32x4_t __rev0_761; __rev0_761 = __builtin_shufflevector(__s0_761, __s0_761, __lane_reverse_128_32); \
|
|
__ret_761 = __builtin_bit_cast(int64x2_t, __noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_761), __p1_761)); \
|
|
__ret_761 = __builtin_shufflevector(__ret_761, __ret_761, __lane_reverse_128_64); \
|
|
__ret_761; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshll_high_n_s16(__p0_762, __p1_762) __extension__ ({ \
|
|
int32x4_t __ret_762; \
|
|
int16x8_t __s0_762 = __p0_762; \
|
|
__ret_762 = __builtin_bit_cast(int32x4_t, vshll_n_s16(vget_high_s16(__s0_762), __p1_762)); \
|
|
__ret_762; \
|
|
})
|
|
#else
|
|
#define vshll_high_n_s16(__p0_763, __p1_763) __extension__ ({ \
|
|
int32x4_t __ret_763; \
|
|
int16x8_t __s0_763 = __p0_763; \
|
|
int16x8_t __rev0_763; __rev0_763 = __builtin_shufflevector(__s0_763, __s0_763, __lane_reverse_128_16); \
|
|
__ret_763 = __builtin_bit_cast(int32x4_t, __noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_763), __p1_763)); \
|
|
__ret_763 = __builtin_shufflevector(__ret_763, __ret_763, __lane_reverse_128_32); \
|
|
__ret_763; \
|
|
})
|
|
#endif
|
|
|
|
#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vshrd_n_u64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vshrd_n_s64(__s0, __p1)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_u32(__p0_764, __p1_764, __p2_764) __extension__ ({ \
|
|
uint16x8_t __ret_764; \
|
|
uint16x4_t __s0_764 = __p0_764; \
|
|
uint32x4_t __s1_764 = __p1_764; \
|
|
__ret_764 = __builtin_bit_cast(uint16x8_t, vcombine_u16(__builtin_bit_cast(uint16x4_t, __s0_764), __builtin_bit_cast(uint16x4_t, vshrn_n_u32(__s1_764, __p2_764)))); \
|
|
__ret_764; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_u32(__p0_765, __p1_765, __p2_765) __extension__ ({ \
|
|
uint16x8_t __ret_765; \
|
|
uint16x4_t __s0_765 = __p0_765; \
|
|
uint32x4_t __s1_765 = __p1_765; \
|
|
uint16x4_t __rev0_765; __rev0_765 = __builtin_shufflevector(__s0_765, __s0_765, __lane_reverse_64_16); \
|
|
uint32x4_t __rev1_765; __rev1_765 = __builtin_shufflevector(__s1_765, __s1_765, __lane_reverse_128_32); \
|
|
__ret_765 = __builtin_bit_cast(uint16x8_t, __noswap_vcombine_u16(__builtin_bit_cast(uint16x4_t, __rev0_765), __builtin_bit_cast(uint16x4_t, __noswap_vshrn_n_u32(__rev1_765, __p2_765)))); \
|
|
__ret_765 = __builtin_shufflevector(__ret_765, __ret_765, __lane_reverse_128_16); \
|
|
__ret_765; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_u64(__p0_766, __p1_766, __p2_766) __extension__ ({ \
|
|
uint32x4_t __ret_766; \
|
|
uint32x2_t __s0_766 = __p0_766; \
|
|
uint64x2_t __s1_766 = __p1_766; \
|
|
__ret_766 = __builtin_bit_cast(uint32x4_t, vcombine_u32(__builtin_bit_cast(uint32x2_t, __s0_766), __builtin_bit_cast(uint32x2_t, vshrn_n_u64(__s1_766, __p2_766)))); \
|
|
__ret_766; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_u64(__p0_767, __p1_767, __p2_767) __extension__ ({ \
|
|
uint32x4_t __ret_767; \
|
|
uint32x2_t __s0_767 = __p0_767; \
|
|
uint64x2_t __s1_767 = __p1_767; \
|
|
uint32x2_t __rev0_767; __rev0_767 = __builtin_shufflevector(__s0_767, __s0_767, __lane_reverse_64_32); \
|
|
uint64x2_t __rev1_767; __rev1_767 = __builtin_shufflevector(__s1_767, __s1_767, __lane_reverse_128_64); \
|
|
__ret_767 = __builtin_bit_cast(uint32x4_t, __noswap_vcombine_u32(__builtin_bit_cast(uint32x2_t, __rev0_767), __builtin_bit_cast(uint32x2_t, __noswap_vshrn_n_u64(__rev1_767, __p2_767)))); \
|
|
__ret_767 = __builtin_shufflevector(__ret_767, __ret_767, __lane_reverse_128_32); \
|
|
__ret_767; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_u16(__p0_768, __p1_768, __p2_768) __extension__ ({ \
|
|
uint8x16_t __ret_768; \
|
|
uint8x8_t __s0_768 = __p0_768; \
|
|
uint16x8_t __s1_768 = __p1_768; \
|
|
__ret_768 = __builtin_bit_cast(uint8x16_t, vcombine_u8(__builtin_bit_cast(uint8x8_t, __s0_768), __builtin_bit_cast(uint8x8_t, vshrn_n_u16(__s1_768, __p2_768)))); \
|
|
__ret_768; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_u16(__p0_769, __p1_769, __p2_769) __extension__ ({ \
|
|
uint8x16_t __ret_769; \
|
|
uint8x8_t __s0_769 = __p0_769; \
|
|
uint16x8_t __s1_769 = __p1_769; \
|
|
uint8x8_t __rev0_769; __rev0_769 = __builtin_shufflevector(__s0_769, __s0_769, __lane_reverse_64_8); \
|
|
uint16x8_t __rev1_769; __rev1_769 = __builtin_shufflevector(__s1_769, __s1_769, __lane_reverse_128_16); \
|
|
__ret_769 = __builtin_bit_cast(uint8x16_t, __noswap_vcombine_u8(__builtin_bit_cast(uint8x8_t, __rev0_769), __builtin_bit_cast(uint8x8_t, __noswap_vshrn_n_u16(__rev1_769, __p2_769)))); \
|
|
__ret_769 = __builtin_shufflevector(__ret_769, __ret_769, __lane_reverse_128_8); \
|
|
__ret_769; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_s32(__p0_770, __p1_770, __p2_770) __extension__ ({ \
|
|
int16x8_t __ret_770; \
|
|
int16x4_t __s0_770 = __p0_770; \
|
|
int32x4_t __s1_770 = __p1_770; \
|
|
__ret_770 = __builtin_bit_cast(int16x8_t, vcombine_s16(__builtin_bit_cast(int16x4_t, __s0_770), __builtin_bit_cast(int16x4_t, vshrn_n_s32(__s1_770, __p2_770)))); \
|
|
__ret_770; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_s32(__p0_771, __p1_771, __p2_771) __extension__ ({ \
|
|
int16x8_t __ret_771; \
|
|
int16x4_t __s0_771 = __p0_771; \
|
|
int32x4_t __s1_771 = __p1_771; \
|
|
int16x4_t __rev0_771; __rev0_771 = __builtin_shufflevector(__s0_771, __s0_771, __lane_reverse_64_16); \
|
|
int32x4_t __rev1_771; __rev1_771 = __builtin_shufflevector(__s1_771, __s1_771, __lane_reverse_128_32); \
|
|
__ret_771 = __builtin_bit_cast(int16x8_t, __noswap_vcombine_s16(__builtin_bit_cast(int16x4_t, __rev0_771), __builtin_bit_cast(int16x4_t, __noswap_vshrn_n_s32(__rev1_771, __p2_771)))); \
|
|
__ret_771 = __builtin_shufflevector(__ret_771, __ret_771, __lane_reverse_128_16); \
|
|
__ret_771; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_s64(__p0_772, __p1_772, __p2_772) __extension__ ({ \
|
|
int32x4_t __ret_772; \
|
|
int32x2_t __s0_772 = __p0_772; \
|
|
int64x2_t __s1_772 = __p1_772; \
|
|
__ret_772 = __builtin_bit_cast(int32x4_t, vcombine_s32(__builtin_bit_cast(int32x2_t, __s0_772), __builtin_bit_cast(int32x2_t, vshrn_n_s64(__s1_772, __p2_772)))); \
|
|
__ret_772; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_s64(__p0_773, __p1_773, __p2_773) __extension__ ({ \
|
|
int32x4_t __ret_773; \
|
|
int32x2_t __s0_773 = __p0_773; \
|
|
int64x2_t __s1_773 = __p1_773; \
|
|
int32x2_t __rev0_773; __rev0_773 = __builtin_shufflevector(__s0_773, __s0_773, __lane_reverse_64_32); \
|
|
int64x2_t __rev1_773; __rev1_773 = __builtin_shufflevector(__s1_773, __s1_773, __lane_reverse_128_64); \
|
|
__ret_773 = __builtin_bit_cast(int32x4_t, __noswap_vcombine_s32(__builtin_bit_cast(int32x2_t, __rev0_773), __builtin_bit_cast(int32x2_t, __noswap_vshrn_n_s64(__rev1_773, __p2_773)))); \
|
|
__ret_773 = __builtin_shufflevector(__ret_773, __ret_773, __lane_reverse_128_32); \
|
|
__ret_773; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vshrn_high_n_s16(__p0_774, __p1_774, __p2_774) __extension__ ({ \
|
|
int8x16_t __ret_774; \
|
|
int8x8_t __s0_774 = __p0_774; \
|
|
int16x8_t __s1_774 = __p1_774; \
|
|
__ret_774 = __builtin_bit_cast(int8x16_t, vcombine_s8(__builtin_bit_cast(int8x8_t, __s0_774), __builtin_bit_cast(int8x8_t, vshrn_n_s16(__s1_774, __p2_774)))); \
|
|
__ret_774; \
|
|
})
|
|
#else
|
|
#define vshrn_high_n_s16(__p0_775, __p1_775, __p2_775) __extension__ ({ \
|
|
int8x16_t __ret_775; \
|
|
int8x8_t __s0_775 = __p0_775; \
|
|
int16x8_t __s1_775 = __p1_775; \
|
|
int8x8_t __rev0_775; __rev0_775 = __builtin_shufflevector(__s0_775, __s0_775, __lane_reverse_64_8); \
|
|
int16x8_t __rev1_775; __rev1_775 = __builtin_shufflevector(__s1_775, __s1_775, __lane_reverse_128_16); \
|
|
__ret_775 = __builtin_bit_cast(int8x16_t, __noswap_vcombine_s8(__builtin_bit_cast(int8x8_t, __rev0_775), __builtin_bit_cast(int8x8_t, __noswap_vshrn_n_s16(__rev1_775, __p2_775)))); \
|
|
__ret_775 = __builtin_shufflevector(__ret_775, __ret_775, __lane_reverse_128_8); \
|
|
__ret_775; \
|
|
})
|
|
#endif
|
|
|
|
#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vslid_n_u64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vslid_n_s64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vsli_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsliq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
|
|
uint8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8_t, __builtin_neon_vsqaddb_u8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
|
|
uint32_t __ret;
|
|
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vsqadds_u32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vsqaddd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
|
|
uint16_t __ret;
|
|
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vsqaddh_u16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vsqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 16));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint8x8_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 16));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 18));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint32x2_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 18));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 17));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint16x4_t, __builtin_neon_vsqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 17));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vsqrtq_v(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vsqrtq_v(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vsqrtq_v(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vsqrtq_v(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vsqrt_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vsqrt_v(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vsqrt_v(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vsqrt_v(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vsrad_n_u64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vsrad_n_s64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64_t __ret; \
|
|
uint64_t __s0 = __p0; \
|
|
uint64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vsrid_n_u64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64_t __ret; \
|
|
int64_t __s0 = __p0; \
|
|
int64_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vsrid_n_s64(__s0, __s1, __p2)); \
|
|
__ret; \
|
|
})
|
|
#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s0 = __p0; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vsri_n_v(__builtin_bit_cast(int8x8_t, __s0), __builtin_bit_cast(int8x8_t, __s1), __p2, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s0 = __p0; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vsriq_n_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 38); \
|
|
})
|
|
#else
|
|
#define vst1q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 42); \
|
|
})
|
|
#else
|
|
#define vst1q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __s1), 44); \
|
|
})
|
|
#else
|
|
#define vst1q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __s1), 12); \
|
|
})
|
|
#else
|
|
#define vst1_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 38); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 42); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 44); \
|
|
})
|
|
#else
|
|
#define vst1q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16_t __s1 = __p1; \
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 12); \
|
|
})
|
|
#else
|
|
#define vst1_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8_t __s1 = __p1; \
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1), __p2, 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
|
|
poly64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 38); \
|
|
})
|
|
#else
|
|
#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
poly64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 42); \
|
|
})
|
|
#else
|
|
#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
float64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_mf8_x2(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 44); \
|
|
})
|
|
#else
|
|
#define vst1q_mf8_x2(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
mfloat8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x2_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
|
|
float64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_mf8_x2(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 12); \
|
|
})
|
|
#else
|
|
#define vst1_mf8_x2(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
mfloat8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
|
|
poly64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 38); \
|
|
})
|
|
#else
|
|
#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
poly64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 42); \
|
|
})
|
|
#else
|
|
#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
float64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_mf8_x3(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 44); \
|
|
})
|
|
#else
|
|
#define vst1q_mf8_x3(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
mfloat8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x3_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
|
|
float64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_mf8_x3(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 12); \
|
|
})
|
|
#else
|
|
#define vst1_mf8_x3(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
mfloat8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
|
|
poly64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 38); \
|
|
})
|
|
#else
|
|
#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
poly64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 42); \
|
|
})
|
|
#else
|
|
#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
float64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1q_mf8_x4(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 44); \
|
|
})
|
|
#else
|
|
#define vst1q_mf8_x4(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
mfloat8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst1q_x4_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
|
|
float64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst1_mf8_x4(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 12); \
|
|
})
|
|
#else
|
|
#define vst1_mf8_x4(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
mfloat8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst1_x4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 38); \
|
|
})
|
|
#else
|
|
#define vst2q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
poly64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 51); \
|
|
})
|
|
#else
|
|
#define vst2q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
uint64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 42); \
|
|
})
|
|
#else
|
|
#define vst2q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
float64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 35); \
|
|
})
|
|
#else
|
|
#define vst2q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
int64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), 44); \
|
|
})
|
|
#else
|
|
#define vst2q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
mfloat8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), 12); \
|
|
})
|
|
#else
|
|
#define vst2_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
mfloat8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 36); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x2_t __s1 = __p1; \
|
|
poly8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 38); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x2_t __s1 = __p1; \
|
|
poly64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 48); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x2_t __s1 = __p1; \
|
|
uint8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 51); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x2_t __s1 = __p1; \
|
|
uint64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 32); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x2_t __s1 = __p1; \
|
|
int8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 42); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x2_t __s1 = __p1; \
|
|
float64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 35); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x2_t __s1 = __p1; \
|
|
int64x2x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __p2, 44); \
|
|
})
|
|
#else
|
|
#define vst2q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x2_t __s1 = __p1; \
|
|
mfloat8x16x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__builtin_neon_vst2q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __p2, 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 19); \
|
|
})
|
|
#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 10); \
|
|
})
|
|
#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __p2, 12); \
|
|
})
|
|
#else
|
|
#define vst2_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x2_t __s1 = __p1; \
|
|
mfloat8x8x2_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__builtin_neon_vst2_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __p2, 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 38); \
|
|
})
|
|
#else
|
|
#define vst3q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
poly64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 51); \
|
|
})
|
|
#else
|
|
#define vst3q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
uint64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 42); \
|
|
})
|
|
#else
|
|
#define vst3q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
float64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 35); \
|
|
})
|
|
#else
|
|
#define vst3q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
int64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), 44); \
|
|
})
|
|
#else
|
|
#define vst3q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
mfloat8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), 12); \
|
|
})
|
|
#else
|
|
#define vst3_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
mfloat8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 36); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x3_t __s1 = __p1; \
|
|
poly8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 38); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x3_t __s1 = __p1; \
|
|
poly64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 48); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x3_t __s1 = __p1; \
|
|
uint8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 51); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x3_t __s1 = __p1; \
|
|
uint64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 32); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x3_t __s1 = __p1; \
|
|
int8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 42); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x3_t __s1 = __p1; \
|
|
float64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 35); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x3_t __s1 = __p1; \
|
|
int64x2x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __p2, 44); \
|
|
})
|
|
#else
|
|
#define vst3q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x3_t __s1 = __p1; \
|
|
mfloat8x16x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__builtin_neon_vst3q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __p2, 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 19); \
|
|
})
|
|
#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 10); \
|
|
})
|
|
#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst3_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __p2, 12); \
|
|
})
|
|
#else
|
|
#define vst3_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x3_t __s1 = __p1; \
|
|
mfloat8x8x3_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__builtin_neon_vst3_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __p2, 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 38); \
|
|
})
|
|
#else
|
|
#define vst4q_p64(__p0, __p1) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
poly64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 51); \
|
|
})
|
|
#else
|
|
#define vst4q_u64(__p0, __p1) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
uint64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 42); \
|
|
})
|
|
#else
|
|
#define vst4q_f64(__p0, __p1) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
float64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 35); \
|
|
})
|
|
#else
|
|
#define vst4q_s64(__p0, __p1) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
int64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), 44); \
|
|
})
|
|
#else
|
|
#define vst4q_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
mfloat8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_f64(__p0, __p1) __extension__ ({ \
|
|
float64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 10); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), 12); \
|
|
})
|
|
#else
|
|
#define vst4_mf8(__p0, __p1) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
mfloat8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 36); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
|
|
poly8x16x4_t __s1 = __p1; \
|
|
poly8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 36); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 38); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2x4_t __s1 = __p1; \
|
|
poly64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 48); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
|
|
uint8x16x4_t __s1 = __p1; \
|
|
uint8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 48); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 51); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2x4_t __s1 = __p1; \
|
|
uint64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 32); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
|
|
int8x16x4_t __s1 = __p1; \
|
|
int8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 32); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 42); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2x4_t __s1 = __p1; \
|
|
float64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 35); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2x4_t __s1 = __p1; \
|
|
int64x2x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_64); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_64); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_64); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_64); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 35); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __s1.val[0]), __builtin_bit_cast(int8x16_t, __s1.val[1]), __builtin_bit_cast(int8x16_t, __s1.val[2]), __builtin_bit_cast(int8x16_t, __s1.val[3]), __p2, 44); \
|
|
})
|
|
#else
|
|
#define vst4q_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x16x4_t __s1 = __p1; \
|
|
mfloat8x16x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_128_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_128_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_128_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_128_8); \
|
|
__builtin_neon_vst4q_lane_v(__p0, __builtin_bit_cast(int8x16_t, __rev1.val[0]), __builtin_bit_cast(int8x16_t, __rev1.val[1]), __builtin_bit_cast(int8x16_t, __rev1.val[2]), __builtin_bit_cast(int8x16_t, __rev1.val[3]), __p2, 44); \
|
|
})
|
|
#endif
|
|
|
|
#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 19); \
|
|
})
|
|
#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 10); \
|
|
})
|
|
#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vst4_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __s1.val[0]), __builtin_bit_cast(int8x8_t, __s1.val[1]), __builtin_bit_cast(int8x8_t, __s1.val[2]), __builtin_bit_cast(int8x8_t, __s1.val[3]), __p2, 12); \
|
|
})
|
|
#else
|
|
#define vst4_lane_mf8(__p0, __p1, __p2) __extension__ ({ \
|
|
mfloat8x8x4_t __s1 = __p1; \
|
|
mfloat8x8x4_t __rev1; \
|
|
__rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], __lane_reverse_64_8); \
|
|
__rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], __lane_reverse_64_8); \
|
|
__rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], __lane_reverse_64_8); \
|
|
__rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], __lane_reverse_64_8); \
|
|
__builtin_neon_vst4_lane_v(__p0, __builtin_bit_cast(int8x8_t, __rev1.val[0]), __builtin_bit_cast(int8x8_t, __rev1.val[1]), __builtin_bit_cast(int8x8_t, __rev1.val[2]), __builtin_bit_cast(int8x8_t, __rev1.val[3]), __p2, 12); \
|
|
})
|
|
#endif
|
|
|
|
#define vstrq_p128(__p0, __p1) __extension__ ({ \
|
|
poly128_t __s1 = __p1; \
|
|
__builtin_neon_vstrq_p128(__p0, __s1); \
|
|
})
|
|
__ai __attribute__((target("neon"))) uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vsubd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vsubd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __rev0 - __rev1;
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
|
|
float64x1_t __ret;
|
|
__ret = __p0 - __p1;
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - vmovl_high_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - vmovl_high_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - vmovl_high_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - vmovl_high_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - vmovl_high_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - vmovl_high_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vtrn1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vtrn1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtrn1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtrn1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vtrn2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vtrn2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtrn2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vtrn2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vtstq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
|
|
uint64x1_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vtst_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 19));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vtstd_u64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64_t vtstd_s64(int64_t __p0, int64_t __p1) {
|
|
uint64_t __ret;
|
|
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vtstd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
|
|
int8_t __ret;
|
|
__ret = __builtin_bit_cast(int8_t, __builtin_neon_vuqaddb_s8(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vuqadds_s32(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
|
|
int64_t __ret;
|
|
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vuqaddd_s64(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vuqaddh_s16(__p0, __p1));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vuqaddq_v(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int8x8_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int32x2_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("neon"))) int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
|
|
int64x1_t __ret;
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 3));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __p0), __builtin_bit_cast(int8x8_t, __p1), 1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int16x4_t, __builtin_neon_vuqadd_v(__builtin_bit_cast(int8x8_t, __rev0), __builtin_bit_cast(int8x8_t, __rev1), 1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vuzp1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vuzp1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vuzp1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vuzp1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vuzp2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vuzp2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vuzp2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vuzp2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vzip1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vzip1q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vzip1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vzip1_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
|
|
poly8x8_t __ret;
|
|
poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
|
|
poly16x4_t __ret;
|
|
poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
|
|
poly8x16_t __ret;
|
|
poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly64x2_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
|
|
poly16x8_t __ret;
|
|
poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vzip2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x16_t vzip2q_mf8(mfloat8x16_t __p0, mfloat8x16_t __p1) {
|
|
mfloat8x16_t __ret;
|
|
mfloat8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
mfloat8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 1, 3);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vzip2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) mfloat8x8_t vzip2_mf8(mfloat8x8_t __p0, mfloat8x8_t __p1) {
|
|
mfloat8x8_t __ret;
|
|
mfloat8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
mfloat8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
|
|
float16x8_t __ret;
|
|
float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
__ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
|
|
float16x4_t __ret;
|
|
float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __ret; \
|
|
poly64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x1_t, __builtin_neon_vldap1_lane_p64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 6)); \
|
|
__ret; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vldap1q_lane_p64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 38)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __ret; \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(poly64x2_t, __builtin_neon_vldap1q_lane_p64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 38)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vldap1q_lane_u64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vldap1q_lane_u64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vldap1q_lane_f64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 42)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __ret; \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vldap1q_lane_f64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 42)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vldap1q_lane_s64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 35)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __ret; \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vldap1q_lane_s64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 35)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#define vldap1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __ret; \
|
|
uint64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x1_t, __builtin_neon_vldap1_lane_u64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 19)); \
|
|
__ret; \
|
|
})
|
|
#define vldap1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __ret; \
|
|
float64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vldap1_lane_f64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 10)); \
|
|
__ret; \
|
|
})
|
|
#define vldap1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __ret; \
|
|
int64x1_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(int64x1_t, __builtin_neon_vldap1_lane_s64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 3)); \
|
|
__ret; \
|
|
})
|
|
#define vstl1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x1_t __s1 = __p1; \
|
|
__builtin_neon_vstl1_lane_p64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 6); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
__builtin_neon_vstl1q_lane_p64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 38); \
|
|
})
|
|
#else
|
|
#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
|
|
poly64x2_t __s1 = __p1; \
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vstl1q_lane_p64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 38); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
__builtin_neon_vstl1q_lane_u64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 51); \
|
|
})
|
|
#else
|
|
#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vstl1q_lane_u64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 51); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
__builtin_neon_vstl1q_lane_f64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 42); \
|
|
})
|
|
#else
|
|
#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x2_t __s1 = __p1; \
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vstl1q_lane_f64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 42); \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
__builtin_neon_vstl1q_lane_s64(__p0, __builtin_bit_cast(int8x16_t, __s1), __p2, 35); \
|
|
})
|
|
#else
|
|
#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x2_t __s1 = __p1; \
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__builtin_neon_vstl1q_lane_s64(__p0, __builtin_bit_cast(int8x16_t, __rev1), __p2, 35); \
|
|
})
|
|
#endif
|
|
|
|
#define vstl1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x1_t __s1 = __p1; \
|
|
__builtin_neon_vstl1_lane_u64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 19); \
|
|
})
|
|
#define vstl1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
|
|
float64x1_t __s1 = __p1; \
|
|
__builtin_neon_vstl1_lane_f64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 10); \
|
|
})
|
|
#define vstl1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
|
|
int64x1_t __s1 = __p1; \
|
|
__builtin_neon_vstl1_lane_s64(__p0, __builtin_bit_cast(int8x8_t, __s1), __p2, 3); \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vbcaxq_u8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_vbcaxq_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vbcaxq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vbcaxq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vbcaxq_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vbcaxq_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vbcaxq_u16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_vbcaxq_u16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vbcaxq_s8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_vbcaxq_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vbcaxq_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_vbcaxq_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vbcaxq_s64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_vbcaxq_s64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vbcaxq_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_vbcaxq_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_veor3q_u8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 48));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(uint8x16_t, __builtin_neon_veor3q_u8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 48));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_veor3q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_veor3q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_veor3q_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_veor3q_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_veor3q_u16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 49));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __builtin_neon_veor3q_u16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 49));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_veor3q_s8(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 32));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __builtin_bit_cast(int8x16_t, __builtin_neon_veor3q_s8(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 32));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_veor3q_s32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 34));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(int32x4_t, __builtin_neon_veor3q_s32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 34));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_veor3q_s64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 35));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(int64x2_t, __builtin_neon_veor3q_s64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 35));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_veor3q_s16(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 33));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __builtin_bit_cast(int16x8_t, __builtin_neon_veor3q_s16(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 33));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrax1q_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vrax1q_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512hq_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512hq_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512h2q_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512h2q_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512su0q_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512su0q_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512su1q_u64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 51));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vsha512su1q_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 51));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vxarq_u64(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __p2, 51)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \
|
|
uint64x2_t __ret; \
|
|
uint64x2_t __s0 = __p0; \
|
|
uint64x2_t __s1 = __p1; \
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_64); \
|
|
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_64); \
|
|
__ret = __builtin_bit_cast(uint64x2_t, __builtin_neon_vxarq_u64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __p2, 51)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3partw1q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3partw1q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3partw2q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3partw2q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3ss1q_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3ss1q_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt1aq_u32(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt1aq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt1bq_u32(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt1bq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt2aq_u32(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt2aq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt2bq_u32(__builtin_bit_cast(int8x16_t, __s0), __builtin_bit_cast(int8x16_t, __s1), __builtin_bit_cast(int8x16_t, __s2), __p3, 50)); \
|
|
__ret; \
|
|
})
|
|
#else
|
|
#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
|
|
uint32x4_t __ret; \
|
|
uint32x4_t __s0 = __p0; \
|
|
uint32x4_t __s1 = __p1; \
|
|
uint32x4_t __s2 = __p2; \
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, __lane_reverse_128_32); \
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, __lane_reverse_128_32); \
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, __lane_reverse_128_32); \
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm3tt2bq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), __p3, 50)); \
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm4eq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm4eq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm4ekeyq_u32(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 50));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __builtin_neon_vsm4ekeyq_u32(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 50));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahs_lane_s32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \
|
|
int32_t __ret_776; \
|
|
int32_t __s0_776 = __p0_776; \
|
|
int32_t __s1_776 = __p1_776; \
|
|
int32x2_t __s2_776 = __p2_776; \
|
|
__ret_776 = vqrdmlahs_s32(__s0_776, __s1_776, vget_lane_s32(__s2_776, __p3_776)); \
|
|
__ret_776; \
|
|
})
|
|
#else
|
|
#define vqrdmlahs_lane_s32(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \
|
|
int32_t __ret_777; \
|
|
int32_t __s0_777 = __p0_777; \
|
|
int32_t __s1_777 = __p1_777; \
|
|
int32x2_t __s2_777 = __p2_777; \
|
|
int32x2_t __rev2_777; __rev2_777 = __builtin_shufflevector(__s2_777, __s2_777, __lane_reverse_64_32); \
|
|
__ret_777 = vqrdmlahs_s32(__s0_777, __s1_777, __noswap_vget_lane_s32(__rev2_777, __p3_777)); \
|
|
__ret_777; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahh_lane_s16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \
|
|
int16_t __ret_778; \
|
|
int16_t __s0_778 = __p0_778; \
|
|
int16_t __s1_778 = __p1_778; \
|
|
int16x4_t __s2_778 = __p2_778; \
|
|
__ret_778 = vqrdmlahh_s16(__s0_778, __s1_778, vget_lane_s16(__s2_778, __p3_778)); \
|
|
__ret_778; \
|
|
})
|
|
#else
|
|
#define vqrdmlahh_lane_s16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \
|
|
int16_t __ret_779; \
|
|
int16_t __s0_779 = __p0_779; \
|
|
int16_t __s1_779 = __p1_779; \
|
|
int16x4_t __s2_779 = __p2_779; \
|
|
int16x4_t __rev2_779; __rev2_779 = __builtin_shufflevector(__s2_779, __s2_779, __lane_reverse_64_16); \
|
|
__ret_779 = vqrdmlahh_s16(__s0_779, __s1_779, __noswap_vget_lane_s16(__rev2_779, __p3_779)); \
|
|
__ret_779; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahs_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \
|
|
int32_t __ret_780; \
|
|
int32_t __s0_780 = __p0_780; \
|
|
int32_t __s1_780 = __p1_780; \
|
|
int32x4_t __s2_780 = __p2_780; \
|
|
__ret_780 = vqrdmlahs_s32(__s0_780, __s1_780, vgetq_lane_s32(__s2_780, __p3_780)); \
|
|
__ret_780; \
|
|
})
|
|
#else
|
|
#define vqrdmlahs_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \
|
|
int32_t __ret_781; \
|
|
int32_t __s0_781 = __p0_781; \
|
|
int32_t __s1_781 = __p1_781; \
|
|
int32x4_t __s2_781 = __p2_781; \
|
|
int32x4_t __rev2_781; __rev2_781 = __builtin_shufflevector(__s2_781, __s2_781, __lane_reverse_128_32); \
|
|
__ret_781 = vqrdmlahs_s32(__s0_781, __s1_781, __noswap_vgetq_lane_s32(__rev2_781, __p3_781)); \
|
|
__ret_781; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahh_laneq_s16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \
|
|
int16_t __ret_782; \
|
|
int16_t __s0_782 = __p0_782; \
|
|
int16_t __s1_782 = __p1_782; \
|
|
int16x8_t __s2_782 = __p2_782; \
|
|
__ret_782 = vqrdmlahh_s16(__s0_782, __s1_782, vgetq_lane_s16(__s2_782, __p3_782)); \
|
|
__ret_782; \
|
|
})
|
|
#else
|
|
#define vqrdmlahh_laneq_s16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \
|
|
int16_t __ret_783; \
|
|
int16_t __s0_783 = __p0_783; \
|
|
int16_t __s1_783 = __p1_783; \
|
|
int16x8_t __s2_783 = __p2_783; \
|
|
int16x8_t __rev2_783; __rev2_783 = __builtin_shufflevector(__s2_783, __s2_783, __lane_reverse_128_16); \
|
|
__ret_783 = vqrdmlahh_s16(__s0_783, __s1_783, __noswap_vgetq_lane_s16(__rev2_783, __p3_783)); \
|
|
__ret_783; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \
|
|
int32x4_t __ret_784; \
|
|
int32x4_t __s0_784 = __p0_784; \
|
|
int32x4_t __s1_784 = __p1_784; \
|
|
int32x4_t __s2_784 = __p2_784; \
|
|
__ret_784 = vqrdmlahq_s32(__s0_784, __s1_784, splatq_laneq_s32(__s2_784, __p3_784)); \
|
|
__ret_784; \
|
|
})
|
|
#else
|
|
#define vqrdmlahq_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \
|
|
int32x4_t __ret_785; \
|
|
int32x4_t __s0_785 = __p0_785; \
|
|
int32x4_t __s1_785 = __p1_785; \
|
|
int32x4_t __s2_785 = __p2_785; \
|
|
int32x4_t __rev0_785; __rev0_785 = __builtin_shufflevector(__s0_785, __s0_785, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_785; __rev1_785 = __builtin_shufflevector(__s1_785, __s1_785, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_785; __rev2_785 = __builtin_shufflevector(__s2_785, __s2_785, __lane_reverse_128_32); \
|
|
__ret_785 = __noswap_vqrdmlahq_s32(__rev0_785, __rev1_785, __noswap_splatq_laneq_s32(__rev2_785, __p3_785)); \
|
|
__ret_785 = __builtin_shufflevector(__ret_785, __ret_785, __lane_reverse_128_32); \
|
|
__ret_785; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlahq_laneq_s16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \
|
|
int16x8_t __ret_786; \
|
|
int16x8_t __s0_786 = __p0_786; \
|
|
int16x8_t __s1_786 = __p1_786; \
|
|
int16x8_t __s2_786 = __p2_786; \
|
|
__ret_786 = vqrdmlahq_s16(__s0_786, __s1_786, splatq_laneq_s16(__s2_786, __p3_786)); \
|
|
__ret_786; \
|
|
})
|
|
#else
|
|
#define vqrdmlahq_laneq_s16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \
|
|
int16x8_t __ret_787; \
|
|
int16x8_t __s0_787 = __p0_787; \
|
|
int16x8_t __s1_787 = __p1_787; \
|
|
int16x8_t __s2_787 = __p2_787; \
|
|
int16x8_t __rev0_787; __rev0_787 = __builtin_shufflevector(__s0_787, __s0_787, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_787; __rev1_787 = __builtin_shufflevector(__s1_787, __s1_787, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_787; __rev2_787 = __builtin_shufflevector(__s2_787, __s2_787, __lane_reverse_128_16); \
|
|
__ret_787 = __noswap_vqrdmlahq_s16(__rev0_787, __rev1_787, __noswap_splatq_laneq_s16(__rev2_787, __p3_787)); \
|
|
__ret_787 = __builtin_shufflevector(__ret_787, __ret_787, __lane_reverse_128_16); \
|
|
__ret_787; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlah_laneq_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \
|
|
int32x2_t __ret_788; \
|
|
int32x2_t __s0_788 = __p0_788; \
|
|
int32x2_t __s1_788 = __p1_788; \
|
|
int32x4_t __s2_788 = __p2_788; \
|
|
__ret_788 = vqrdmlah_s32(__s0_788, __s1_788, splat_laneq_s32(__s2_788, __p3_788)); \
|
|
__ret_788; \
|
|
})
|
|
#else
|
|
#define vqrdmlah_laneq_s32(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \
|
|
int32x2_t __ret_789; \
|
|
int32x2_t __s0_789 = __p0_789; \
|
|
int32x2_t __s1_789 = __p1_789; \
|
|
int32x4_t __s2_789 = __p2_789; \
|
|
int32x2_t __rev0_789; __rev0_789 = __builtin_shufflevector(__s0_789, __s0_789, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_789; __rev1_789 = __builtin_shufflevector(__s1_789, __s1_789, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_789; __rev2_789 = __builtin_shufflevector(__s2_789, __s2_789, __lane_reverse_128_32); \
|
|
__ret_789 = __noswap_vqrdmlah_s32(__rev0_789, __rev1_789, __noswap_splat_laneq_s32(__rev2_789, __p3_789)); \
|
|
__ret_789 = __builtin_shufflevector(__ret_789, __ret_789, __lane_reverse_64_32); \
|
|
__ret_789; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlah_laneq_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \
|
|
int16x4_t __ret_790; \
|
|
int16x4_t __s0_790 = __p0_790; \
|
|
int16x4_t __s1_790 = __p1_790; \
|
|
int16x8_t __s2_790 = __p2_790; \
|
|
__ret_790 = vqrdmlah_s16(__s0_790, __s1_790, splat_laneq_s16(__s2_790, __p3_790)); \
|
|
__ret_790; \
|
|
})
|
|
#else
|
|
#define vqrdmlah_laneq_s16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \
|
|
int16x4_t __ret_791; \
|
|
int16x4_t __s0_791 = __p0_791; \
|
|
int16x4_t __s1_791 = __p1_791; \
|
|
int16x8_t __s2_791 = __p2_791; \
|
|
int16x4_t __rev0_791; __rev0_791 = __builtin_shufflevector(__s0_791, __s0_791, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_791; __rev1_791 = __builtin_shufflevector(__s1_791, __s1_791, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_791; __rev2_791 = __builtin_shufflevector(__s2_791, __s2_791, __lane_reverse_128_16); \
|
|
__ret_791 = __noswap_vqrdmlah_s16(__rev0_791, __rev1_791, __noswap_splat_laneq_s16(__rev2_791, __p3_791)); \
|
|
__ret_791 = __builtin_shufflevector(__ret_791, __ret_791, __lane_reverse_64_16); \
|
|
__ret_791; \
|
|
})
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
|
|
int32_t __ret;
|
|
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
|
|
int16_t __ret;
|
|
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshs_lane_s32(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \
|
|
int32_t __ret_792; \
|
|
int32_t __s0_792 = __p0_792; \
|
|
int32_t __s1_792 = __p1_792; \
|
|
int32x2_t __s2_792 = __p2_792; \
|
|
__ret_792 = vqrdmlshs_s32(__s0_792, __s1_792, vget_lane_s32(__s2_792, __p3_792)); \
|
|
__ret_792; \
|
|
})
|
|
#else
|
|
#define vqrdmlshs_lane_s32(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \
|
|
int32_t __ret_793; \
|
|
int32_t __s0_793 = __p0_793; \
|
|
int32_t __s1_793 = __p1_793; \
|
|
int32x2_t __s2_793 = __p2_793; \
|
|
int32x2_t __rev2_793; __rev2_793 = __builtin_shufflevector(__s2_793, __s2_793, __lane_reverse_64_32); \
|
|
__ret_793 = vqrdmlshs_s32(__s0_793, __s1_793, __noswap_vget_lane_s32(__rev2_793, __p3_793)); \
|
|
__ret_793; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshh_lane_s16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \
|
|
int16_t __ret_794; \
|
|
int16_t __s0_794 = __p0_794; \
|
|
int16_t __s1_794 = __p1_794; \
|
|
int16x4_t __s2_794 = __p2_794; \
|
|
__ret_794 = vqrdmlshh_s16(__s0_794, __s1_794, vget_lane_s16(__s2_794, __p3_794)); \
|
|
__ret_794; \
|
|
})
|
|
#else
|
|
#define vqrdmlshh_lane_s16(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \
|
|
int16_t __ret_795; \
|
|
int16_t __s0_795 = __p0_795; \
|
|
int16_t __s1_795 = __p1_795; \
|
|
int16x4_t __s2_795 = __p2_795; \
|
|
int16x4_t __rev2_795; __rev2_795 = __builtin_shufflevector(__s2_795, __s2_795, __lane_reverse_64_16); \
|
|
__ret_795 = vqrdmlshh_s16(__s0_795, __s1_795, __noswap_vget_lane_s16(__rev2_795, __p3_795)); \
|
|
__ret_795; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshs_laneq_s32(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \
|
|
int32_t __ret_796; \
|
|
int32_t __s0_796 = __p0_796; \
|
|
int32_t __s1_796 = __p1_796; \
|
|
int32x4_t __s2_796 = __p2_796; \
|
|
__ret_796 = vqrdmlshs_s32(__s0_796, __s1_796, vgetq_lane_s32(__s2_796, __p3_796)); \
|
|
__ret_796; \
|
|
})
|
|
#else
|
|
#define vqrdmlshs_laneq_s32(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \
|
|
int32_t __ret_797; \
|
|
int32_t __s0_797 = __p0_797; \
|
|
int32_t __s1_797 = __p1_797; \
|
|
int32x4_t __s2_797 = __p2_797; \
|
|
int32x4_t __rev2_797; __rev2_797 = __builtin_shufflevector(__s2_797, __s2_797, __lane_reverse_128_32); \
|
|
__ret_797 = vqrdmlshs_s32(__s0_797, __s1_797, __noswap_vgetq_lane_s32(__rev2_797, __p3_797)); \
|
|
__ret_797; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshh_laneq_s16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \
|
|
int16_t __ret_798; \
|
|
int16_t __s0_798 = __p0_798; \
|
|
int16_t __s1_798 = __p1_798; \
|
|
int16x8_t __s2_798 = __p2_798; \
|
|
__ret_798 = vqrdmlshh_s16(__s0_798, __s1_798, vgetq_lane_s16(__s2_798, __p3_798)); \
|
|
__ret_798; \
|
|
})
|
|
#else
|
|
#define vqrdmlshh_laneq_s16(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \
|
|
int16_t __ret_799; \
|
|
int16_t __s0_799 = __p0_799; \
|
|
int16_t __s1_799 = __p1_799; \
|
|
int16x8_t __s2_799 = __p2_799; \
|
|
int16x8_t __rev2_799; __rev2_799 = __builtin_shufflevector(__s2_799, __s2_799, __lane_reverse_128_16); \
|
|
__ret_799 = vqrdmlshh_s16(__s0_799, __s1_799, __noswap_vgetq_lane_s16(__rev2_799, __p3_799)); \
|
|
__ret_799; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshq_laneq_s32(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \
|
|
int32x4_t __ret_800; \
|
|
int32x4_t __s0_800 = __p0_800; \
|
|
int32x4_t __s1_800 = __p1_800; \
|
|
int32x4_t __s2_800 = __p2_800; \
|
|
__ret_800 = vqrdmlshq_s32(__s0_800, __s1_800, splatq_laneq_s32(__s2_800, __p3_800)); \
|
|
__ret_800; \
|
|
})
|
|
#else
|
|
#define vqrdmlshq_laneq_s32(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \
|
|
int32x4_t __ret_801; \
|
|
int32x4_t __s0_801 = __p0_801; \
|
|
int32x4_t __s1_801 = __p1_801; \
|
|
int32x4_t __s2_801 = __p2_801; \
|
|
int32x4_t __rev0_801; __rev0_801 = __builtin_shufflevector(__s0_801, __s0_801, __lane_reverse_128_32); \
|
|
int32x4_t __rev1_801; __rev1_801 = __builtin_shufflevector(__s1_801, __s1_801, __lane_reverse_128_32); \
|
|
int32x4_t __rev2_801; __rev2_801 = __builtin_shufflevector(__s2_801, __s2_801, __lane_reverse_128_32); \
|
|
__ret_801 = __noswap_vqrdmlshq_s32(__rev0_801, __rev1_801, __noswap_splatq_laneq_s32(__rev2_801, __p3_801)); \
|
|
__ret_801 = __builtin_shufflevector(__ret_801, __ret_801, __lane_reverse_128_32); \
|
|
__ret_801; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlshq_laneq_s16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \
|
|
int16x8_t __ret_802; \
|
|
int16x8_t __s0_802 = __p0_802; \
|
|
int16x8_t __s1_802 = __p1_802; \
|
|
int16x8_t __s2_802 = __p2_802; \
|
|
__ret_802 = vqrdmlshq_s16(__s0_802, __s1_802, splatq_laneq_s16(__s2_802, __p3_802)); \
|
|
__ret_802; \
|
|
})
|
|
#else
|
|
#define vqrdmlshq_laneq_s16(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \
|
|
int16x8_t __ret_803; \
|
|
int16x8_t __s0_803 = __p0_803; \
|
|
int16x8_t __s1_803 = __p1_803; \
|
|
int16x8_t __s2_803 = __p2_803; \
|
|
int16x8_t __rev0_803; __rev0_803 = __builtin_shufflevector(__s0_803, __s0_803, __lane_reverse_128_16); \
|
|
int16x8_t __rev1_803; __rev1_803 = __builtin_shufflevector(__s1_803, __s1_803, __lane_reverse_128_16); \
|
|
int16x8_t __rev2_803; __rev2_803 = __builtin_shufflevector(__s2_803, __s2_803, __lane_reverse_128_16); \
|
|
__ret_803 = __noswap_vqrdmlshq_s16(__rev0_803, __rev1_803, __noswap_splatq_laneq_s16(__rev2_803, __p3_803)); \
|
|
__ret_803 = __builtin_shufflevector(__ret_803, __ret_803, __lane_reverse_128_16); \
|
|
__ret_803; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlsh_laneq_s32(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \
|
|
int32x2_t __ret_804; \
|
|
int32x2_t __s0_804 = __p0_804; \
|
|
int32x2_t __s1_804 = __p1_804; \
|
|
int32x4_t __s2_804 = __p2_804; \
|
|
__ret_804 = vqrdmlsh_s32(__s0_804, __s1_804, splat_laneq_s32(__s2_804, __p3_804)); \
|
|
__ret_804; \
|
|
})
|
|
#else
|
|
#define vqrdmlsh_laneq_s32(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \
|
|
int32x2_t __ret_805; \
|
|
int32x2_t __s0_805 = __p0_805; \
|
|
int32x2_t __s1_805 = __p1_805; \
|
|
int32x4_t __s2_805 = __p2_805; \
|
|
int32x2_t __rev0_805; __rev0_805 = __builtin_shufflevector(__s0_805, __s0_805, __lane_reverse_64_32); \
|
|
int32x2_t __rev1_805; __rev1_805 = __builtin_shufflevector(__s1_805, __s1_805, __lane_reverse_64_32); \
|
|
int32x4_t __rev2_805; __rev2_805 = __builtin_shufflevector(__s2_805, __s2_805, __lane_reverse_128_32); \
|
|
__ret_805 = __noswap_vqrdmlsh_s32(__rev0_805, __rev1_805, __noswap_splat_laneq_s32(__rev2_805, __p3_805)); \
|
|
__ret_805 = __builtin_shufflevector(__ret_805, __ret_805, __lane_reverse_64_32); \
|
|
__ret_805; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vqrdmlsh_laneq_s16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \
|
|
int16x4_t __ret_806; \
|
|
int16x4_t __s0_806 = __p0_806; \
|
|
int16x4_t __s1_806 = __p1_806; \
|
|
int16x8_t __s2_806 = __p2_806; \
|
|
__ret_806 = vqrdmlsh_s16(__s0_806, __s1_806, splat_laneq_s16(__s2_806, __p3_806)); \
|
|
__ret_806; \
|
|
})
|
|
#else
|
|
#define vqrdmlsh_laneq_s16(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \
|
|
int16x4_t __ret_807; \
|
|
int16x4_t __s0_807 = __p0_807; \
|
|
int16x4_t __s1_807 = __p1_807; \
|
|
int16x8_t __s2_807 = __p2_807; \
|
|
int16x4_t __rev0_807; __rev0_807 = __builtin_shufflevector(__s0_807, __s0_807, __lane_reverse_64_16); \
|
|
int16x4_t __rev1_807; __rev1_807 = __builtin_shufflevector(__s1_807, __s1_807, __lane_reverse_64_16); \
|
|
int16x8_t __rev2_807; __rev2_807 = __builtin_shufflevector(__s2_807, __s2_807, __lane_reverse_128_16); \
|
|
__ret_807 = __noswap_vqrdmlsh_s16(__rev0_807, __rev1_807, __noswap_splat_laneq_s16(__rev2_807, __p3_807)); \
|
|
__ret_807 = __builtin_shufflevector(__ret_807, __ret_807, __lane_reverse_64_16); \
|
|
__ret_807; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcaddq_rot270_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcaddq_rot270_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcaddq_rot90_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcaddq_rot90_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot180_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot180_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot270_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot270_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot90_f64(__builtin_bit_cast(int8x16_t, __p0), __builtin_bit_cast(int8x16_t, __p1), __builtin_bit_cast(int8x16_t, __p2), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vcmlaq_rot90_f64(__builtin_bit_cast(int8x16_t, __rev0), __builtin_bit_cast(int8x16_t, __rev1), __builtin_bit_cast(int8x16_t, __rev2), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd32xq_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd32xq_f32(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd32x_f32(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd32x_f32(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd32xq_f64(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd32xq_f64(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32x_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnd32x_f64(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd32zq_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd32zq_f32(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd32z_f32(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd32z_f32(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd32zq_f64(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd32zq_f64(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32z_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnd32z_f64(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd64xq_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd64xq_f32(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd64x_f32(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd64x_f32(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd64xq_f64(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd64xq_f64(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64x_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnd64x_f64(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd64zq_f32(__builtin_bit_cast(int8x16_t, __p0), 41));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) {
|
|
float32x4_t __ret;
|
|
float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
__ret = __builtin_bit_cast(float32x4_t, __builtin_neon_vrnd64zq_f32(__builtin_bit_cast(int8x16_t, __rev0), 41));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd64z_f32(__builtin_bit_cast(int8x8_t, __p0), 9));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) {
|
|
float32x2_t __ret;
|
|
float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(float32x2_t, __builtin_neon_vrnd64z_f32(__builtin_bit_cast(int8x8_t, __rev0), 9));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd64zq_f64(__builtin_bit_cast(int8x16_t, __p0), 42));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) {
|
|
float64x2_t __ret;
|
|
float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
__ret = __builtin_bit_cast(float64x2_t, __builtin_neon_vrnd64zq_f64(__builtin_bit_cast(int8x16_t, __rev0), 42));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64z_f64(float64x1_t __p0) {
|
|
float64x1_t __ret;
|
|
__ret = __builtin_bit_cast(float64x1_t, __builtin_neon_vrnd64z_f64(__builtin_bit_cast(int8x8_t, __p0), 10));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfdotq_lane_f32(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \
|
|
float32x4_t __ret_808; \
|
|
float32x4_t __s0_808 = __p0_808; \
|
|
bfloat16x8_t __s1_808 = __p1_808; \
|
|
bfloat16x4_t __s2_808 = __p2_808; \
|
|
__ret_808 = vbfdotq_f32(__s0_808, __s1_808, __builtin_bit_cast(bfloat16x8_t, splatq_lane_f32(__builtin_bit_cast(float32x2_t, __s2_808), __p3_808))); \
|
|
__ret_808; \
|
|
})
|
|
#else
|
|
#define vbfdotq_lane_f32(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \
|
|
float32x4_t __ret_809; \
|
|
float32x4_t __s0_809 = __p0_809; \
|
|
bfloat16x8_t __s1_809 = __p1_809; \
|
|
bfloat16x4_t __s2_809 = __p2_809; \
|
|
float32x4_t __rev0_809; __rev0_809 = __builtin_shufflevector(__s0_809, __s0_809, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_809; __rev1_809 = __builtin_shufflevector(__s1_809, __s1_809, __lane_reverse_128_16); \
|
|
bfloat16x4_t __rev2_809; __rev2_809 = __builtin_shufflevector(__s2_809, __s2_809, __lane_reverse_64_16); \
|
|
__ret_809 = __noswap_vbfdotq_f32(__rev0_809, __rev1_809, __builtin_bit_cast(bfloat16x8_t, __noswap_splatq_lane_f32(__builtin_bit_cast(float32x2_t, __rev2_809), __p3_809))); \
|
|
__ret_809 = __builtin_shufflevector(__ret_809, __ret_809, __lane_reverse_128_32); \
|
|
__ret_809; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfdot_lane_f32(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \
|
|
float32x2_t __ret_810; \
|
|
float32x2_t __s0_810 = __p0_810; \
|
|
bfloat16x4_t __s1_810 = __p1_810; \
|
|
bfloat16x4_t __s2_810 = __p2_810; \
|
|
__ret_810 = vbfdot_f32(__s0_810, __s1_810, __builtin_bit_cast(bfloat16x4_t, splat_lane_f32(__builtin_bit_cast(float32x2_t, __s2_810), __p3_810))); \
|
|
__ret_810; \
|
|
})
|
|
#else
|
|
#define vbfdot_lane_f32(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \
|
|
float32x2_t __ret_811; \
|
|
float32x2_t __s0_811 = __p0_811; \
|
|
bfloat16x4_t __s1_811 = __p1_811; \
|
|
bfloat16x4_t __s2_811 = __p2_811; \
|
|
float32x2_t __rev0_811; __rev0_811 = __builtin_shufflevector(__s0_811, __s0_811, __lane_reverse_64_32); \
|
|
bfloat16x4_t __rev1_811; __rev1_811 = __builtin_shufflevector(__s1_811, __s1_811, __lane_reverse_64_16); \
|
|
bfloat16x4_t __rev2_811; __rev2_811 = __builtin_shufflevector(__s2_811, __s2_811, __lane_reverse_64_16); \
|
|
__ret_811 = __noswap_vbfdot_f32(__rev0_811, __rev1_811, __builtin_bit_cast(bfloat16x4_t, __noswap_splat_lane_f32(__builtin_bit_cast(float32x2_t, __rev2_811), __p3_811))); \
|
|
__ret_811 = __builtin_shufflevector(__ret_811, __ret_811, __lane_reverse_64_32); \
|
|
__ret_811; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfdotq_laneq_f32(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \
|
|
float32x4_t __ret_812; \
|
|
float32x4_t __s0_812 = __p0_812; \
|
|
bfloat16x8_t __s1_812 = __p1_812; \
|
|
bfloat16x8_t __s2_812 = __p2_812; \
|
|
__ret_812 = vbfdotq_f32(__s0_812, __s1_812, __builtin_bit_cast(bfloat16x8_t, splatq_laneq_f32(__builtin_bit_cast(float32x4_t, __s2_812), __p3_812))); \
|
|
__ret_812; \
|
|
})
|
|
#else
|
|
#define vbfdotq_laneq_f32(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \
|
|
float32x4_t __ret_813; \
|
|
float32x4_t __s0_813 = __p0_813; \
|
|
bfloat16x8_t __s1_813 = __p1_813; \
|
|
bfloat16x8_t __s2_813 = __p2_813; \
|
|
float32x4_t __rev0_813; __rev0_813 = __builtin_shufflevector(__s0_813, __s0_813, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_813; __rev1_813 = __builtin_shufflevector(__s1_813, __s1_813, __lane_reverse_128_16); \
|
|
bfloat16x8_t __rev2_813; __rev2_813 = __builtin_shufflevector(__s2_813, __s2_813, __lane_reverse_128_16); \
|
|
__ret_813 = __noswap_vbfdotq_f32(__rev0_813, __rev1_813, __builtin_bit_cast(bfloat16x8_t, __noswap_splatq_laneq_f32(__builtin_bit_cast(float32x4_t, __rev2_813), __p3_813))); \
|
|
__ret_813 = __builtin_shufflevector(__ret_813, __ret_813, __lane_reverse_128_32); \
|
|
__ret_813; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfdot_laneq_f32(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \
|
|
float32x2_t __ret_814; \
|
|
float32x2_t __s0_814 = __p0_814; \
|
|
bfloat16x4_t __s1_814 = __p1_814; \
|
|
bfloat16x8_t __s2_814 = __p2_814; \
|
|
__ret_814 = vbfdot_f32(__s0_814, __s1_814, __builtin_bit_cast(bfloat16x4_t, splat_laneq_f32(__builtin_bit_cast(float32x4_t, __s2_814), __p3_814))); \
|
|
__ret_814; \
|
|
})
|
|
#else
|
|
#define vbfdot_laneq_f32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \
|
|
float32x2_t __ret_815; \
|
|
float32x2_t __s0_815 = __p0_815; \
|
|
bfloat16x4_t __s1_815 = __p1_815; \
|
|
bfloat16x8_t __s2_815 = __p2_815; \
|
|
float32x2_t __rev0_815; __rev0_815 = __builtin_shufflevector(__s0_815, __s0_815, __lane_reverse_64_32); \
|
|
bfloat16x4_t __rev1_815; __rev1_815 = __builtin_shufflevector(__s1_815, __s1_815, __lane_reverse_64_16); \
|
|
bfloat16x8_t __rev2_815; __rev2_815 = __builtin_shufflevector(__s2_815, __s2_815, __lane_reverse_128_16); \
|
|
__ret_815 = __noswap_vbfdot_f32(__rev0_815, __rev1_815, __builtin_bit_cast(bfloat16x4_t, __noswap_splat_laneq_f32(__builtin_bit_cast(float32x4_t, __rev2_815), __p3_815))); \
|
|
__ret_815 = __builtin_shufflevector(__ret_815, __ret_815, __lane_reverse_64_32); \
|
|
__ret_815; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfmlalbq_lane_f32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \
|
|
float32x4_t __ret_816; \
|
|
float32x4_t __s0_816 = __p0_816; \
|
|
bfloat16x8_t __s1_816 = __p1_816; \
|
|
bfloat16x4_t __s2_816 = __p2_816; \
|
|
__ret_816 = vbfmlalbq_f32(__s0_816, __s1_816, (bfloat16x8_t) {vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816), vget_lane_bf16(__s2_816, __p3_816)}); \
|
|
__ret_816; \
|
|
})
|
|
#else
|
|
#define vbfmlalbq_lane_f32(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \
|
|
float32x4_t __ret_817; \
|
|
float32x4_t __s0_817 = __p0_817; \
|
|
bfloat16x8_t __s1_817 = __p1_817; \
|
|
bfloat16x4_t __s2_817 = __p2_817; \
|
|
float32x4_t __rev0_817; __rev0_817 = __builtin_shufflevector(__s0_817, __s0_817, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_817; __rev1_817 = __builtin_shufflevector(__s1_817, __s1_817, __lane_reverse_128_16); \
|
|
bfloat16x4_t __rev2_817; __rev2_817 = __builtin_shufflevector(__s2_817, __s2_817, __lane_reverse_64_16); \
|
|
__ret_817 = __noswap_vbfmlalbq_f32(__rev0_817, __rev1_817, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817), __noswap_vget_lane_bf16(__rev2_817, __p3_817)}); \
|
|
__ret_817 = __builtin_shufflevector(__ret_817, __ret_817, __lane_reverse_128_32); \
|
|
__ret_817; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfmlalbq_laneq_f32(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \
|
|
float32x4_t __ret_818; \
|
|
float32x4_t __s0_818 = __p0_818; \
|
|
bfloat16x8_t __s1_818 = __p1_818; \
|
|
bfloat16x8_t __s2_818 = __p2_818; \
|
|
__ret_818 = vbfmlalbq_f32(__s0_818, __s1_818, (bfloat16x8_t) {vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818), vgetq_lane_bf16(__s2_818, __p3_818)}); \
|
|
__ret_818; \
|
|
})
|
|
#else
|
|
#define vbfmlalbq_laneq_f32(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \
|
|
float32x4_t __ret_819; \
|
|
float32x4_t __s0_819 = __p0_819; \
|
|
bfloat16x8_t __s1_819 = __p1_819; \
|
|
bfloat16x8_t __s2_819 = __p2_819; \
|
|
float32x4_t __rev0_819; __rev0_819 = __builtin_shufflevector(__s0_819, __s0_819, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_819; __rev1_819 = __builtin_shufflevector(__s1_819, __s1_819, __lane_reverse_128_16); \
|
|
bfloat16x8_t __rev2_819; __rev2_819 = __builtin_shufflevector(__s2_819, __s2_819, __lane_reverse_128_16); \
|
|
__ret_819 = __noswap_vbfmlalbq_f32(__rev0_819, __rev1_819, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819), __noswap_vgetq_lane_bf16(__rev2_819, __p3_819)}); \
|
|
__ret_819 = __builtin_shufflevector(__ret_819, __ret_819, __lane_reverse_128_32); \
|
|
__ret_819; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfmlaltq_lane_f32(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \
|
|
float32x4_t __ret_820; \
|
|
float32x4_t __s0_820 = __p0_820; \
|
|
bfloat16x8_t __s1_820 = __p1_820; \
|
|
bfloat16x4_t __s2_820 = __p2_820; \
|
|
__ret_820 = vbfmlaltq_f32(__s0_820, __s1_820, (bfloat16x8_t) {vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820), vget_lane_bf16(__s2_820, __p3_820)}); \
|
|
__ret_820; \
|
|
})
|
|
#else
|
|
#define vbfmlaltq_lane_f32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \
|
|
float32x4_t __ret_821; \
|
|
float32x4_t __s0_821 = __p0_821; \
|
|
bfloat16x8_t __s1_821 = __p1_821; \
|
|
bfloat16x4_t __s2_821 = __p2_821; \
|
|
float32x4_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, __lane_reverse_128_16); \
|
|
bfloat16x4_t __rev2_821; __rev2_821 = __builtin_shufflevector(__s2_821, __s2_821, __lane_reverse_64_16); \
|
|
__ret_821 = __noswap_vbfmlaltq_f32(__rev0_821, __rev1_821, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821), __noswap_vget_lane_bf16(__rev2_821, __p3_821)}); \
|
|
__ret_821 = __builtin_shufflevector(__ret_821, __ret_821, __lane_reverse_128_32); \
|
|
__ret_821; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vbfmlaltq_laneq_f32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \
|
|
float32x4_t __ret_822; \
|
|
float32x4_t __s0_822 = __p0_822; \
|
|
bfloat16x8_t __s1_822 = __p1_822; \
|
|
bfloat16x8_t __s2_822 = __p2_822; \
|
|
__ret_822 = vbfmlaltq_f32(__s0_822, __s1_822, (bfloat16x8_t) {vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822), vgetq_lane_bf16(__s2_822, __p3_822)}); \
|
|
__ret_822; \
|
|
})
|
|
#else
|
|
#define vbfmlaltq_laneq_f32(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \
|
|
float32x4_t __ret_823; \
|
|
float32x4_t __s0_823 = __p0_823; \
|
|
bfloat16x8_t __s1_823 = __p1_823; \
|
|
bfloat16x8_t __s2_823 = __p2_823; \
|
|
float32x4_t __rev0_823; __rev0_823 = __builtin_shufflevector(__s0_823, __s0_823, __lane_reverse_128_32); \
|
|
bfloat16x8_t __rev1_823; __rev1_823 = __builtin_shufflevector(__s1_823, __s1_823, __lane_reverse_128_16); \
|
|
bfloat16x8_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, __lane_reverse_128_16); \
|
|
__ret_823 = __noswap_vbfmlaltq_f32(__rev0_823, __rev1_823, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823), __noswap_vgetq_lane_bf16(__rev2_823, __p3_823)}); \
|
|
__ret_823 = __builtin_shufflevector(__ret_823, __ret_823, __lane_reverse_128_32); \
|
|
__ret_823; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_824) {
|
|
float32x4_t __ret_824;
|
|
__ret_824 = __builtin_bit_cast(float32x4_t, vshll_n_u16(__builtin_bit_cast(uint16x4_t, __p0_824), 16));
|
|
return __ret_824;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_825) {
|
|
float32x4_t __ret_825;
|
|
bfloat16x4_t __rev0_825; __rev0_825 = __builtin_shufflevector(__p0_825, __p0_825, __lane_reverse_64_16);
|
|
__ret_825 = __builtin_bit_cast(float32x4_t, __noswap_vshll_n_u16(__builtin_bit_cast(uint16x4_t, __rev0_825), 16));
|
|
__ret_825 = __builtin_shufflevector(__ret_825, __ret_825, __lane_reverse_128_32);
|
|
return __ret_825;
|
|
}
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_826) {
|
|
float32x4_t __ret_826;
|
|
__ret_826 = __builtin_bit_cast(float32x4_t, __noswap_vshll_n_u16(__builtin_bit_cast(uint16x4_t, __p0_826), 16));
|
|
return __ret_826;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = vcvt_f32_bf16(vget_high_bf16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
__ret = vcvt_f32_bf16(vget_low_bf16(__p0));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
|
|
float32x4_t __ret;
|
|
bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
__ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_lane_u32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
|
|
uint32x4_t __ret_827; \
|
|
uint32x4_t __s0_827 = __p0_827; \
|
|
uint8x16_t __s1_827 = __p1_827; \
|
|
uint8x8_t __s2_827 = __p2_827; \
|
|
__ret_827 = vdotq_u32(__s0_827, __s1_827, __builtin_bit_cast(uint8x16_t, splatq_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_827), __p3_827))); \
|
|
__ret_827; \
|
|
})
|
|
#else
|
|
#define vdotq_lane_u32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
|
|
uint32x4_t __ret_828; \
|
|
uint32x4_t __s0_828 = __p0_828; \
|
|
uint8x16_t __s1_828 = __p1_828; \
|
|
uint8x8_t __s2_828 = __p2_828; \
|
|
uint32x4_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, __lane_reverse_128_32); \
|
|
uint8x16_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, __lane_reverse_128_8); \
|
|
uint8x8_t __rev2_828; __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, __lane_reverse_64_8); \
|
|
__ret_828 = __noswap_vdotq_u32(__rev0_828, __rev1_828, __builtin_bit_cast(uint8x16_t, __noswap_splatq_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_828), __p3_828))); \
|
|
__ret_828 = __builtin_shufflevector(__ret_828, __ret_828, __lane_reverse_128_32); \
|
|
__ret_828; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdotq_lane_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
|
|
int32x4_t __ret_829; \
|
|
int32x4_t __s0_829 = __p0_829; \
|
|
int8x16_t __s1_829 = __p1_829; \
|
|
int8x8_t __s2_829 = __p2_829; \
|
|
__ret_829 = vdotq_s32(__s0_829, __s1_829, __builtin_bit_cast(int8x16_t, splatq_lane_s32(__builtin_bit_cast(int32x2_t, __s2_829), __p3_829))); \
|
|
__ret_829; \
|
|
})
|
|
#else
|
|
#define vdotq_lane_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
|
|
int32x4_t __ret_830; \
|
|
int32x4_t __s0_830 = __p0_830; \
|
|
int8x16_t __s1_830 = __p1_830; \
|
|
int8x8_t __s2_830 = __p2_830; \
|
|
int32x4_t __rev0_830; __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, __lane_reverse_128_32); \
|
|
int8x16_t __rev1_830; __rev1_830 = __builtin_shufflevector(__s1_830, __s1_830, __lane_reverse_128_8); \
|
|
int8x8_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, __lane_reverse_64_8); \
|
|
__ret_830 = __noswap_vdotq_s32(__rev0_830, __rev1_830, __builtin_bit_cast(int8x16_t, __noswap_splatq_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_830), __p3_830))); \
|
|
__ret_830 = __builtin_shufflevector(__ret_830, __ret_830, __lane_reverse_128_32); \
|
|
__ret_830; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_lane_u32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
|
|
uint32x2_t __ret_831; \
|
|
uint32x2_t __s0_831 = __p0_831; \
|
|
uint8x8_t __s1_831 = __p1_831; \
|
|
uint8x8_t __s2_831 = __p2_831; \
|
|
__ret_831 = vdot_u32(__s0_831, __s1_831, __builtin_bit_cast(uint8x8_t, splat_lane_u32(__builtin_bit_cast(uint32x2_t, __s2_831), __p3_831))); \
|
|
__ret_831; \
|
|
})
|
|
#else
|
|
#define vdot_lane_u32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
|
|
uint32x2_t __ret_832; \
|
|
uint32x2_t __s0_832 = __p0_832; \
|
|
uint8x8_t __s1_832 = __p1_832; \
|
|
uint8x8_t __s2_832 = __p2_832; \
|
|
uint32x2_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, __lane_reverse_64_32); \
|
|
uint8x8_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, __lane_reverse_64_8); \
|
|
uint8x8_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, __lane_reverse_64_8); \
|
|
__ret_832 = __noswap_vdot_u32(__rev0_832, __rev1_832, __builtin_bit_cast(uint8x8_t, __noswap_splat_lane_u32(__builtin_bit_cast(uint32x2_t, __rev2_832), __p3_832))); \
|
|
__ret_832 = __builtin_shufflevector(__ret_832, __ret_832, __lane_reverse_64_32); \
|
|
__ret_832; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vdot_lane_s32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
|
|
int32x2_t __ret_833; \
|
|
int32x2_t __s0_833 = __p0_833; \
|
|
int8x8_t __s1_833 = __p1_833; \
|
|
int8x8_t __s2_833 = __p2_833; \
|
|
__ret_833 = vdot_s32(__s0_833, __s1_833, __builtin_bit_cast(int8x8_t, splat_lane_s32(__builtin_bit_cast(int32x2_t, __s2_833), __p3_833))); \
|
|
__ret_833; \
|
|
})
|
|
#else
|
|
#define vdot_lane_s32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
|
|
int32x2_t __ret_834; \
|
|
int32x2_t __s0_834 = __p0_834; \
|
|
int8x8_t __s1_834 = __p1_834; \
|
|
int8x8_t __s2_834 = __p2_834; \
|
|
int32x2_t __rev0_834; __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, __lane_reverse_64_32); \
|
|
int8x8_t __rev1_834; __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, __lane_reverse_64_8); \
|
|
int8x8_t __rev2_834; __rev2_834 = __builtin_shufflevector(__s2_834, __s2_834, __lane_reverse_64_8); \
|
|
__ret_834 = __noswap_vdot_s32(__rev0_834, __rev1_834, __builtin_bit_cast(int8x8_t, __noswap_splat_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_834), __p3_834))); \
|
|
__ret_834 = __builtin_shufflevector(__ret_834, __ret_834, __lane_reverse_64_32); \
|
|
__ret_834; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulq_lane_f16(__p0_835, __p1_835, __p2_835) __extension__ ({ \
|
|
float16x8_t __ret_835; \
|
|
float16x8_t __s0_835 = __p0_835; \
|
|
float16x4_t __s1_835 = __p1_835; \
|
|
__ret_835 = __s0_835 * splatq_lane_f16(__s1_835, __p2_835); \
|
|
__ret_835; \
|
|
})
|
|
#else
|
|
#define vmulq_lane_f16(__p0_836, __p1_836, __p2_836) __extension__ ({ \
|
|
float16x8_t __ret_836; \
|
|
float16x8_t __s0_836 = __p0_836; \
|
|
float16x4_t __s1_836 = __p1_836; \
|
|
float16x8_t __rev0_836; __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, __lane_reverse_128_16); \
|
|
float16x4_t __rev1_836; __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, __lane_reverse_64_16); \
|
|
__ret_836 = __rev0_836 * __noswap_splatq_lane_f16(__rev1_836, __p2_836); \
|
|
__ret_836 = __builtin_shufflevector(__ret_836, __ret_836, __lane_reverse_128_16); \
|
|
__ret_836; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmul_lane_f16(__p0_837, __p1_837, __p2_837) __extension__ ({ \
|
|
float16x4_t __ret_837; \
|
|
float16x4_t __s0_837 = __p0_837; \
|
|
float16x4_t __s1_837 = __p1_837; \
|
|
__ret_837 = __s0_837 * splat_lane_f16(__s1_837, __p2_837); \
|
|
__ret_837; \
|
|
})
|
|
#else
|
|
#define vmul_lane_f16(__p0_838, __p1_838, __p2_838) __extension__ ({ \
|
|
float16x4_t __ret_838; \
|
|
float16x4_t __s0_838 = __p0_838; \
|
|
float16x4_t __s1_838 = __p1_838; \
|
|
float16x4_t __rev0_838; __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, __lane_reverse_64_16); \
|
|
float16x4_t __rev1_838; __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, __lane_reverse_64_16); \
|
|
__ret_838 = __rev0_838 * __noswap_splat_lane_f16(__rev1_838, __p2_838); \
|
|
__ret_838 = __builtin_shufflevector(__ret_838, __ret_838, __lane_reverse_64_16); \
|
|
__ret_838; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsudotq_lane_s32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
|
|
int32x4_t __ret_839; \
|
|
int32x4_t __s0_839 = __p0_839; \
|
|
int8x16_t __s1_839 = __p1_839; \
|
|
uint8x8_t __s2_839 = __p2_839; \
|
|
__ret_839 = vusdotq_s32(__s0_839, __builtin_bit_cast(uint8x16_t, splatq_lane_s32(__builtin_bit_cast(int32x2_t, __s2_839), __p3_839)), __s1_839); \
|
|
__ret_839; \
|
|
})
|
|
#else
|
|
#define vsudotq_lane_s32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
|
|
int32x4_t __ret_840; \
|
|
int32x4_t __s0_840 = __p0_840; \
|
|
int8x16_t __s1_840 = __p1_840; \
|
|
uint8x8_t __s2_840 = __p2_840; \
|
|
int32x4_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, __lane_reverse_128_32); \
|
|
int8x16_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, __lane_reverse_128_8); \
|
|
uint8x8_t __rev2_840; __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, __lane_reverse_64_8); \
|
|
__ret_840 = __noswap_vusdotq_s32(__rev0_840, __builtin_bit_cast(uint8x16_t, __noswap_splatq_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_840), __p3_840)), __rev1_840); \
|
|
__ret_840 = __builtin_shufflevector(__ret_840, __ret_840, __lane_reverse_128_32); \
|
|
__ret_840; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsudot_lane_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
|
|
int32x2_t __ret_841; \
|
|
int32x2_t __s0_841 = __p0_841; \
|
|
int8x8_t __s1_841 = __p1_841; \
|
|
uint8x8_t __s2_841 = __p2_841; \
|
|
__ret_841 = vusdot_s32(__s0_841, __builtin_bit_cast(uint8x8_t, splat_lane_s32(__builtin_bit_cast(int32x2_t, __s2_841), __p3_841)), __s1_841); \
|
|
__ret_841; \
|
|
})
|
|
#else
|
|
#define vsudot_lane_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
|
|
int32x2_t __ret_842; \
|
|
int32x2_t __s0_842 = __p0_842; \
|
|
int8x8_t __s1_842 = __p1_842; \
|
|
uint8x8_t __s2_842 = __p2_842; \
|
|
int32x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, __lane_reverse_64_32); \
|
|
int8x8_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, __lane_reverse_64_8); \
|
|
uint8x8_t __rev2_842; __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, __lane_reverse_64_8); \
|
|
__ret_842 = __noswap_vusdot_s32(__rev0_842, __builtin_bit_cast(uint8x8_t, __noswap_splat_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_842), __p3_842)), __rev1_842); \
|
|
__ret_842 = __builtin_shufflevector(__ret_842, __ret_842, __lane_reverse_64_32); \
|
|
__ret_842; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vusdotq_lane_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
|
|
int32x4_t __ret_843; \
|
|
int32x4_t __s0_843 = __p0_843; \
|
|
uint8x16_t __s1_843 = __p1_843; \
|
|
int8x8_t __s2_843 = __p2_843; \
|
|
__ret_843 = vusdotq_s32(__s0_843, __s1_843, __builtin_bit_cast(int8x16_t, splatq_lane_s32(__builtin_bit_cast(int32x2_t, __s2_843), __p3_843))); \
|
|
__ret_843; \
|
|
})
|
|
#else
|
|
#define vusdotq_lane_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
|
|
int32x4_t __ret_844; \
|
|
int32x4_t __s0_844 = __p0_844; \
|
|
uint8x16_t __s1_844 = __p1_844; \
|
|
int8x8_t __s2_844 = __p2_844; \
|
|
int32x4_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, __lane_reverse_128_32); \
|
|
uint8x16_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, __lane_reverse_128_8); \
|
|
int8x8_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, __lane_reverse_64_8); \
|
|
__ret_844 = __noswap_vusdotq_s32(__rev0_844, __rev1_844, __builtin_bit_cast(int8x16_t, __noswap_splatq_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_844), __p3_844))); \
|
|
__ret_844 = __builtin_shufflevector(__ret_844, __ret_844, __lane_reverse_128_32); \
|
|
__ret_844; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vusdot_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
|
|
int32x2_t __ret_845; \
|
|
int32x2_t __s0_845 = __p0_845; \
|
|
uint8x8_t __s1_845 = __p1_845; \
|
|
int8x8_t __s2_845 = __p2_845; \
|
|
__ret_845 = vusdot_s32(__s0_845, __s1_845, __builtin_bit_cast(int8x8_t, splat_lane_s32(__builtin_bit_cast(int32x2_t, __s2_845), __p3_845))); \
|
|
__ret_845; \
|
|
})
|
|
#else
|
|
#define vusdot_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
|
|
int32x2_t __ret_846; \
|
|
int32x2_t __s0_846 = __p0_846; \
|
|
uint8x8_t __s1_846 = __p1_846; \
|
|
int8x8_t __s2_846 = __p2_846; \
|
|
int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, __lane_reverse_64_32); \
|
|
uint8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, __lane_reverse_64_8); \
|
|
int8x8_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, __lane_reverse_64_8); \
|
|
__ret_846 = __noswap_vusdot_s32(__rev0_846, __rev1_846, __builtin_bit_cast(int8x8_t, __noswap_splat_lane_s32(__builtin_bit_cast(int32x2_t, __rev2_846), __p3_846))); \
|
|
__ret_846 = __builtin_shufflevector(__ret_846, __ret_846, __lane_reverse_64_32); \
|
|
__ret_846; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
__ret = __p0 + vabdq_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint8x16_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vabdq_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + vabdq_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
__ret = __p0 + vabdq_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int8x16_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vabdq_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + vabdq_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
__ret = __p0 + vabd_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint8x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
__ret = __p0 + vabd_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint32x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
__ret = __p0 + vabd_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint16x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
__ret = __p0 + vabd_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int8x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_8);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
__ret = __p0 + vabd_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int32x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
__ret = __p0 + vabd_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int16x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_64_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, vmovl_u8(__builtin_bit_cast(uint8x8_t, vabd_u8(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(uint16x8_t, __noswap_vmovl_u8(__builtin_bit_cast(uint8x8_t, __noswap_vabd_u8(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __builtin_bit_cast(uint16x8_t, __noswap_vmovl_u8(__builtin_bit_cast(uint8x8_t, __noswap_vabd_u8(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, vmovl_u32(__builtin_bit_cast(uint32x2_t, vabd_u32(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(uint64x2_t, __noswap_vmovl_u32(__builtin_bit_cast(uint32x2_t, __noswap_vabd_u32(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __builtin_bit_cast(uint64x2_t, __noswap_vmovl_u32(__builtin_bit_cast(uint32x2_t, __noswap_vabd_u32(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, vmovl_u16(__builtin_bit_cast(uint16x4_t, vabd_u16(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(uint32x4_t, __noswap_vmovl_u16(__builtin_bit_cast(uint16x4_t, __noswap_vabd_u16(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __builtin_bit_cast(uint32x4_t, __noswap_vmovl_u16(__builtin_bit_cast(uint16x4_t, __noswap_vabd_u16(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, vmovl_u8(__builtin_bit_cast(uint8x8_t, vabd_s8(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __builtin_bit_cast(int16x8_t, __noswap_vmovl_u8(__builtin_bit_cast(uint8x8_t, __noswap_vabd_s8(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __builtin_bit_cast(int16x8_t, __noswap_vmovl_u8(__builtin_bit_cast(uint8x8_t, __noswap_vabd_s8(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, vmovl_u32(__builtin_bit_cast(uint32x2_t, vabd_s32(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __builtin_bit_cast(int64x2_t, __noswap_vmovl_u32(__builtin_bit_cast(uint32x2_t, __noswap_vabd_s32(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __builtin_bit_cast(int64x2_t, __noswap_vmovl_u32(__builtin_bit_cast(uint32x2_t, __noswap_vabd_s32(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, vmovl_u16(__builtin_bit_cast(uint16x4_t, vabd_s16(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __builtin_bit_cast(int32x4_t, __noswap_vmovl_u16(__builtin_bit_cast(uint16x4_t, __noswap_vabd_s16(__rev0, __rev1))));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __builtin_bit_cast(int32x4_t, __noswap_vmovl_u16(__builtin_bit_cast(uint16x4_t, __noswap_vabd_s16(__p0, __p1))));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vmovl_u8(__p0) + vmovl_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmovl_u32(__p0) + vmovl_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmovl_u16(__p0) + vmovl_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vmovl_s8(__p0) + vmovl_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_8);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmovl_s32(__p0) + vmovl_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_32);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmovl_s16(__p0) + vmovl_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_64_16);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + vmovl_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vmovl_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + vmovl_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmovl_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vmovl_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmovl_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + vmovl_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vmovl_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + vmovl_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmovl_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vmovl_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmovl_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \
|
|
float16_t __ret_847; \
|
|
float16x4_t __s0_847 = __p0_847; \
|
|
__ret_847 = __builtin_bit_cast(float16_t, vget_lane_s16(__builtin_bit_cast(int16x4_t, __s0_847), __p1_847)); \
|
|
__ret_847; \
|
|
})
|
|
#else
|
|
#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \
|
|
float16_t __ret_848; \
|
|
float16x4_t __s0_848 = __p0_848; \
|
|
float16x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, __lane_reverse_64_16); \
|
|
__ret_848 = __builtin_bit_cast(float16_t, __noswap_vget_lane_s16(__builtin_bit_cast(int16x4_t, __rev0_848), __p1_848)); \
|
|
__ret_848; \
|
|
})
|
|
#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \
|
|
float16_t __ret_849; \
|
|
float16x4_t __s0_849 = __p0_849; \
|
|
__ret_849 = __builtin_bit_cast(float16_t, __noswap_vget_lane_s16(__builtin_bit_cast(int16x4_t, __s0_849), __p1_849)); \
|
|
__ret_849; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \
|
|
float16_t __ret_850; \
|
|
float16x8_t __s0_850 = __p0_850; \
|
|
__ret_850 = __builtin_bit_cast(float16_t, vgetq_lane_s16(__builtin_bit_cast(int16x8_t, __s0_850), __p1_850)); \
|
|
__ret_850; \
|
|
})
|
|
#else
|
|
#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \
|
|
float16_t __ret_851; \
|
|
float16x8_t __s0_851 = __p0_851; \
|
|
float16x8_t __rev0_851; __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, __lane_reverse_128_16); \
|
|
__ret_851 = __builtin_bit_cast(float16_t, __noswap_vgetq_lane_s16(__builtin_bit_cast(int16x8_t, __rev0_851), __p1_851)); \
|
|
__ret_851; \
|
|
})
|
|
#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \
|
|
float16_t __ret_852; \
|
|
float16x8_t __s0_852 = __p0_852; \
|
|
__ret_852 = __builtin_bit_cast(float16_t, __noswap_vgetq_lane_s16(__builtin_bit_cast(int16x8_t, __s0_852), __p1_852)); \
|
|
__ret_852; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + vmull_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + __noswap_vmull_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + vmull_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + __noswap_vmull_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vmull_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __noswap_vmull_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + vmull_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + __noswap_vmull_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + vmull_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + __noswap_vmull_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vmull_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __noswap_vmull_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \
|
|
uint64x2_t __ret_853; \
|
|
uint64x2_t __s0_853 = __p0_853; \
|
|
uint32x2_t __s1_853 = __p1_853; \
|
|
uint32x2_t __s2_853 = __p2_853; \
|
|
__ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \
|
|
__ret_853; \
|
|
})
|
|
#else
|
|
#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \
|
|
uint64x2_t __ret_854; \
|
|
uint64x2_t __s0_854 = __p0_854; \
|
|
uint32x2_t __s1_854 = __p1_854; \
|
|
uint32x2_t __s2_854 = __p2_854; \
|
|
uint64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, __lane_reverse_128_64); \
|
|
uint32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, __lane_reverse_64_32); \
|
|
uint32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, __lane_reverse_64_32); \
|
|
__ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \
|
|
__ret_854 = __builtin_shufflevector(__ret_854, __ret_854, __lane_reverse_128_64); \
|
|
__ret_854; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \
|
|
uint32x4_t __ret_855; \
|
|
uint32x4_t __s0_855 = __p0_855; \
|
|
uint16x4_t __s1_855 = __p1_855; \
|
|
uint16x4_t __s2_855 = __p2_855; \
|
|
__ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \
|
|
__ret_855; \
|
|
})
|
|
#else
|
|
#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \
|
|
uint32x4_t __ret_856; \
|
|
uint32x4_t __s0_856 = __p0_856; \
|
|
uint16x4_t __s1_856 = __p1_856; \
|
|
uint16x4_t __s2_856 = __p2_856; \
|
|
uint32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, __lane_reverse_128_32); \
|
|
uint16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, __lane_reverse_64_16); \
|
|
uint16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, __lane_reverse_64_16); \
|
|
__ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \
|
|
__ret_856 = __builtin_shufflevector(__ret_856, __ret_856, __lane_reverse_128_32); \
|
|
__ret_856; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \
|
|
int64x2_t __ret_857; \
|
|
int64x2_t __s0_857 = __p0_857; \
|
|
int32x2_t __s1_857 = __p1_857; \
|
|
int32x2_t __s2_857 = __p2_857; \
|
|
__ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \
|
|
__ret_857; \
|
|
})
|
|
#else
|
|
#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \
|
|
int64x2_t __ret_858; \
|
|
int64x2_t __s0_858 = __p0_858; \
|
|
int32x2_t __s1_858 = __p1_858; \
|
|
int32x2_t __s2_858 = __p2_858; \
|
|
int64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, __lane_reverse_64_32); \
|
|
__ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \
|
|
__ret_858 = __builtin_shufflevector(__ret_858, __ret_858, __lane_reverse_128_64); \
|
|
__ret_858; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \
|
|
int32x4_t __ret_859; \
|
|
int32x4_t __s0_859 = __p0_859; \
|
|
int16x4_t __s1_859 = __p1_859; \
|
|
int16x4_t __s2_859 = __p2_859; \
|
|
__ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \
|
|
__ret_859; \
|
|
})
|
|
#else
|
|
#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \
|
|
int32x4_t __ret_860; \
|
|
int32x4_t __s0_860 = __p0_860; \
|
|
int16x4_t __s1_860 = __p1_860; \
|
|
int16x4_t __s2_860 = __p2_860; \
|
|
int32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, __lane_reverse_64_16); \
|
|
__ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \
|
|
__ret_860 = __builtin_shufflevector(__ret_860, __ret_860, __lane_reverse_128_32); \
|
|
__ret_860; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - vmull_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 - __noswap_vmull_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - vmull_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - __noswap_vmull_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - vmull_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - __noswap_vmull_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - vmull_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 - __noswap_vmull_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - vmull_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - __noswap_vmull_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - vmull_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - __noswap_vmull_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \
|
|
uint64x2_t __ret_861; \
|
|
uint64x2_t __s0_861 = __p0_861; \
|
|
uint32x2_t __s1_861 = __p1_861; \
|
|
uint32x2_t __s2_861 = __p2_861; \
|
|
__ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \
|
|
__ret_861; \
|
|
})
|
|
#else
|
|
#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \
|
|
uint64x2_t __ret_862; \
|
|
uint64x2_t __s0_862 = __p0_862; \
|
|
uint32x2_t __s1_862 = __p1_862; \
|
|
uint32x2_t __s2_862 = __p2_862; \
|
|
uint64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, __lane_reverse_128_64); \
|
|
uint32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, __lane_reverse_64_32); \
|
|
uint32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, __lane_reverse_64_32); \
|
|
__ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \
|
|
__ret_862 = __builtin_shufflevector(__ret_862, __ret_862, __lane_reverse_128_64); \
|
|
__ret_862; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \
|
|
uint32x4_t __ret_863; \
|
|
uint32x4_t __s0_863 = __p0_863; \
|
|
uint16x4_t __s1_863 = __p1_863; \
|
|
uint16x4_t __s2_863 = __p2_863; \
|
|
__ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \
|
|
__ret_863; \
|
|
})
|
|
#else
|
|
#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \
|
|
uint32x4_t __ret_864; \
|
|
uint32x4_t __s0_864 = __p0_864; \
|
|
uint16x4_t __s1_864 = __p1_864; \
|
|
uint16x4_t __s2_864 = __p2_864; \
|
|
uint32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, __lane_reverse_128_32); \
|
|
uint16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, __lane_reverse_64_16); \
|
|
uint16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, __lane_reverse_64_16); \
|
|
__ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \
|
|
__ret_864 = __builtin_shufflevector(__ret_864, __ret_864, __lane_reverse_128_32); \
|
|
__ret_864; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \
|
|
int64x2_t __ret_865; \
|
|
int64x2_t __s0_865 = __p0_865; \
|
|
int32x2_t __s1_865 = __p1_865; \
|
|
int32x2_t __s2_865 = __p2_865; \
|
|
__ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \
|
|
__ret_865; \
|
|
})
|
|
#else
|
|
#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \
|
|
int64x2_t __ret_866; \
|
|
int64x2_t __s0_866 = __p0_866; \
|
|
int32x2_t __s1_866 = __p1_866; \
|
|
int32x2_t __s2_866 = __p2_866; \
|
|
int64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, __lane_reverse_128_64); \
|
|
int32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, __lane_reverse_64_32); \
|
|
int32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, __lane_reverse_64_32); \
|
|
__ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \
|
|
__ret_866 = __builtin_shufflevector(__ret_866, __ret_866, __lane_reverse_128_64); \
|
|
__ret_866; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \
|
|
int32x4_t __ret_867; \
|
|
int32x4_t __s0_867 = __p0_867; \
|
|
int16x4_t __s1_867 = __p1_867; \
|
|
int16x4_t __s2_867 = __p2_867; \
|
|
__ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \
|
|
__ret_867; \
|
|
})
|
|
#else
|
|
#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \
|
|
int32x4_t __ret_868; \
|
|
int32x4_t __s0_868 = __p0_868; \
|
|
int16x4_t __s1_868 = __p1_868; \
|
|
int16x4_t __s2_868 = __p2_868; \
|
|
int32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, __lane_reverse_128_32); \
|
|
int16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, __lane_reverse_64_16); \
|
|
int16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, __lane_reverse_64_16); \
|
|
__ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \
|
|
__ret_868 = __builtin_shufflevector(__ret_868, __ret_868, __lane_reverse_128_32); \
|
|
__ret_868; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
__ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
__ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \
|
|
float16x4_t __ret_869; \
|
|
float16_t __s0_869 = __p0_869; \
|
|
float16x4_t __s1_869 = __p1_869; \
|
|
__ret_869 = __builtin_bit_cast(float16x4_t, vset_lane_s16(__builtin_bit_cast(int16_t, __s0_869), __builtin_bit_cast(int16x4_t, __s1_869), __p2_869)); \
|
|
__ret_869; \
|
|
})
|
|
#else
|
|
#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \
|
|
float16x4_t __ret_870; \
|
|
float16_t __s0_870 = __p0_870; \
|
|
float16x4_t __s1_870 = __p1_870; \
|
|
float16x4_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, __lane_reverse_64_16); \
|
|
__ret_870 = __builtin_bit_cast(float16x4_t, __noswap_vset_lane_s16(__builtin_bit_cast(int16_t, __s0_870), __builtin_bit_cast(int16x4_t, __rev1_870), __p2_870)); \
|
|
__ret_870 = __builtin_shufflevector(__ret_870, __ret_870, __lane_reverse_64_16); \
|
|
__ret_870; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \
|
|
float16x8_t __ret_871; \
|
|
float16_t __s0_871 = __p0_871; \
|
|
float16x8_t __s1_871 = __p1_871; \
|
|
__ret_871 = __builtin_bit_cast(float16x8_t, vsetq_lane_s16(__builtin_bit_cast(int16_t, __s0_871), __builtin_bit_cast(int16x8_t, __s1_871), __p2_871)); \
|
|
__ret_871; \
|
|
})
|
|
#else
|
|
#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \
|
|
float16x8_t __ret_872; \
|
|
float16_t __s0_872 = __p0_872; \
|
|
float16x8_t __s1_872 = __p1_872; \
|
|
float16x8_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, __lane_reverse_128_16); \
|
|
__ret_872 = __builtin_bit_cast(float16x8_t, __noswap_vsetq_lane_s16(__builtin_bit_cast(int16_t, __s0_872), __builtin_bit_cast(int16x8_t, __rev1_872), __p2_872)); \
|
|
__ret_872 = __builtin_shufflevector(__ret_872, __ret_872, __lane_reverse_128_16); \
|
|
__ret_872; \
|
|
})
|
|
#endif
|
|
|
|
#if defined(__aarch64__) || defined(__arm64ec__)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly128_t __ret;
|
|
__ret = vmull_p64(__builtin_bit_cast(poly64_t, vget_high_p64(__p0)), __builtin_bit_cast(poly64_t, vget_high_p64(__p1)));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
|
|
poly128_t __ret;
|
|
poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_64);
|
|
__ret = vmull_p64(__builtin_bit_cast(poly64_t, __noswap_vget_high_p64(__rev0)), __builtin_bit_cast(poly64_t, __noswap_vget_high_p64(__rev1)));
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlalq_lane_high_f16(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \
|
|
float32x4_t __ret_873; \
|
|
float32x4_t __s0_873 = __p0_873; \
|
|
float16x8_t __s1_873 = __p1_873; \
|
|
float16x4_t __s2_873 = __p2_873; \
|
|
__ret_873 = vfmlalq_high_f16(__s0_873, __s1_873, (float16x8_t) {vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873), vget_lane_f16(__s2_873, __p3_873)}); \
|
|
__ret_873; \
|
|
})
|
|
#else
|
|
#define vfmlalq_lane_high_f16(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \
|
|
float32x4_t __ret_874; \
|
|
float32x4_t __s0_874 = __p0_874; \
|
|
float16x8_t __s1_874 = __p1_874; \
|
|
float16x4_t __s2_874 = __p2_874; \
|
|
float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, __lane_reverse_64_16); \
|
|
__ret_874 = __noswap_vfmlalq_high_f16(__rev0_874, __rev1_874, (float16x8_t) {__noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874), __noswap_vget_lane_f16(__rev2_874, __p3_874)}); \
|
|
__ret_874 = __builtin_shufflevector(__ret_874, __ret_874, __lane_reverse_128_32); \
|
|
__ret_874; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlal_lane_high_f16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \
|
|
float32x2_t __ret_875; \
|
|
float32x2_t __s0_875 = __p0_875; \
|
|
float16x4_t __s1_875 = __p1_875; \
|
|
float16x4_t __s2_875 = __p2_875; \
|
|
__ret_875 = vfmlal_high_f16(__s0_875, __s1_875, (float16x4_t) {vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875), vget_lane_f16(__s2_875, __p3_875)}); \
|
|
__ret_875; \
|
|
})
|
|
#else
|
|
#define vfmlal_lane_high_f16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \
|
|
float32x2_t __ret_876; \
|
|
float32x2_t __s0_876 = __p0_876; \
|
|
float16x4_t __s1_876 = __p1_876; \
|
|
float16x4_t __s2_876 = __p2_876; \
|
|
float32x2_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, __lane_reverse_64_16); \
|
|
__ret_876 = __noswap_vfmlal_high_f16(__rev0_876, __rev1_876, (float16x4_t) {__noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876), __noswap_vget_lane_f16(__rev2_876, __p3_876)}); \
|
|
__ret_876 = __builtin_shufflevector(__ret_876, __ret_876, __lane_reverse_64_32); \
|
|
__ret_876; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlalq_lane_low_f16(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \
|
|
float32x4_t __ret_877; \
|
|
float32x4_t __s0_877 = __p0_877; \
|
|
float16x8_t __s1_877 = __p1_877; \
|
|
float16x4_t __s2_877 = __p2_877; \
|
|
__ret_877 = vfmlalq_low_f16(__s0_877, __s1_877, (float16x8_t) {vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877), vget_lane_f16(__s2_877, __p3_877)}); \
|
|
__ret_877; \
|
|
})
|
|
#else
|
|
#define vfmlalq_lane_low_f16(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \
|
|
float32x4_t __ret_878; \
|
|
float32x4_t __s0_878 = __p0_878; \
|
|
float16x8_t __s1_878 = __p1_878; \
|
|
float16x4_t __s2_878 = __p2_878; \
|
|
float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, __lane_reverse_64_16); \
|
|
__ret_878 = __noswap_vfmlalq_low_f16(__rev0_878, __rev1_878, (float16x8_t) {__noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878), __noswap_vget_lane_f16(__rev2_878, __p3_878)}); \
|
|
__ret_878 = __builtin_shufflevector(__ret_878, __ret_878, __lane_reverse_128_32); \
|
|
__ret_878; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlal_lane_low_f16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \
|
|
float32x2_t __ret_879; \
|
|
float32x2_t __s0_879 = __p0_879; \
|
|
float16x4_t __s1_879 = __p1_879; \
|
|
float16x4_t __s2_879 = __p2_879; \
|
|
__ret_879 = vfmlal_low_f16(__s0_879, __s1_879, (float16x4_t) {vget_lane_f16(__s2_879, __p3_879), vget_lane_f16(__s2_879, __p3_879), vget_lane_f16(__s2_879, __p3_879), vget_lane_f16(__s2_879, __p3_879)}); \
|
|
__ret_879; \
|
|
})
|
|
#else
|
|
#define vfmlal_lane_low_f16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \
|
|
float32x2_t __ret_880; \
|
|
float32x2_t __s0_880 = __p0_880; \
|
|
float16x4_t __s1_880 = __p1_880; \
|
|
float16x4_t __s2_880 = __p2_880; \
|
|
float32x2_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, __lane_reverse_64_16); \
|
|
__ret_880 = __noswap_vfmlal_low_f16(__rev0_880, __rev1_880, (float16x4_t) {__noswap_vget_lane_f16(__rev2_880, __p3_880), __noswap_vget_lane_f16(__rev2_880, __p3_880), __noswap_vget_lane_f16(__rev2_880, __p3_880), __noswap_vget_lane_f16(__rev2_880, __p3_880)}); \
|
|
__ret_880 = __builtin_shufflevector(__ret_880, __ret_880, __lane_reverse_64_32); \
|
|
__ret_880; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlalq_laneq_high_f16(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \
|
|
float32x4_t __ret_881; \
|
|
float32x4_t __s0_881 = __p0_881; \
|
|
float16x8_t __s1_881 = __p1_881; \
|
|
float16x8_t __s2_881 = __p2_881; \
|
|
__ret_881 = vfmlalq_high_f16(__s0_881, __s1_881, (float16x8_t) {vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881), vgetq_lane_f16(__s2_881, __p3_881)}); \
|
|
__ret_881; \
|
|
})
|
|
#else
|
|
#define vfmlalq_laneq_high_f16(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \
|
|
float32x4_t __ret_882; \
|
|
float32x4_t __s0_882 = __p0_882; \
|
|
float16x8_t __s1_882 = __p1_882; \
|
|
float16x8_t __s2_882 = __p2_882; \
|
|
float32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, __lane_reverse_128_16); \
|
|
__ret_882 = __noswap_vfmlalq_high_f16(__rev0_882, __rev1_882, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882), __noswap_vgetq_lane_f16(__rev2_882, __p3_882)}); \
|
|
__ret_882 = __builtin_shufflevector(__ret_882, __ret_882, __lane_reverse_128_32); \
|
|
__ret_882; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlal_laneq_high_f16(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \
|
|
float32x2_t __ret_883; \
|
|
float32x2_t __s0_883 = __p0_883; \
|
|
float16x4_t __s1_883 = __p1_883; \
|
|
float16x8_t __s2_883 = __p2_883; \
|
|
__ret_883 = vfmlal_high_f16(__s0_883, __s1_883, (float16x4_t) {vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883), vgetq_lane_f16(__s2_883, __p3_883)}); \
|
|
__ret_883; \
|
|
})
|
|
#else
|
|
#define vfmlal_laneq_high_f16(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \
|
|
float32x2_t __ret_884; \
|
|
float32x2_t __s0_884 = __p0_884; \
|
|
float16x4_t __s1_884 = __p1_884; \
|
|
float16x8_t __s2_884 = __p2_884; \
|
|
float32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, __lane_reverse_128_16); \
|
|
__ret_884 = __noswap_vfmlal_high_f16(__rev0_884, __rev1_884, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884), __noswap_vgetq_lane_f16(__rev2_884, __p3_884)}); \
|
|
__ret_884 = __builtin_shufflevector(__ret_884, __ret_884, __lane_reverse_64_32); \
|
|
__ret_884; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlalq_laneq_low_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \
|
|
float32x4_t __ret_885; \
|
|
float32x4_t __s0_885 = __p0_885; \
|
|
float16x8_t __s1_885 = __p1_885; \
|
|
float16x8_t __s2_885 = __p2_885; \
|
|
__ret_885 = vfmlalq_low_f16(__s0_885, __s1_885, (float16x8_t) {vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885), vgetq_lane_f16(__s2_885, __p3_885)}); \
|
|
__ret_885; \
|
|
})
|
|
#else
|
|
#define vfmlalq_laneq_low_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \
|
|
float32x4_t __ret_886; \
|
|
float32x4_t __s0_886 = __p0_886; \
|
|
float16x8_t __s1_886 = __p1_886; \
|
|
float16x8_t __s2_886 = __p2_886; \
|
|
float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, __lane_reverse_128_16); \
|
|
__ret_886 = __noswap_vfmlalq_low_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886), __noswap_vgetq_lane_f16(__rev2_886, __p3_886)}); \
|
|
__ret_886 = __builtin_shufflevector(__ret_886, __ret_886, __lane_reverse_128_32); \
|
|
__ret_886; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlal_laneq_low_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \
|
|
float32x2_t __ret_887; \
|
|
float32x2_t __s0_887 = __p0_887; \
|
|
float16x4_t __s1_887 = __p1_887; \
|
|
float16x8_t __s2_887 = __p2_887; \
|
|
__ret_887 = vfmlal_low_f16(__s0_887, __s1_887, (float16x4_t) {vgetq_lane_f16(__s2_887, __p3_887), vgetq_lane_f16(__s2_887, __p3_887), vgetq_lane_f16(__s2_887, __p3_887), vgetq_lane_f16(__s2_887, __p3_887)}); \
|
|
__ret_887; \
|
|
})
|
|
#else
|
|
#define vfmlal_laneq_low_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \
|
|
float32x2_t __ret_888; \
|
|
float32x2_t __s0_888 = __p0_888; \
|
|
float16x4_t __s1_888 = __p1_888; \
|
|
float16x8_t __s2_888 = __p2_888; \
|
|
float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, __lane_reverse_128_16); \
|
|
__ret_888 = __noswap_vfmlal_low_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_888, __p3_888), __noswap_vgetq_lane_f16(__rev2_888, __p3_888), __noswap_vgetq_lane_f16(__rev2_888, __p3_888), __noswap_vgetq_lane_f16(__rev2_888, __p3_888)}); \
|
|
__ret_888 = __builtin_shufflevector(__ret_888, __ret_888, __lane_reverse_64_32); \
|
|
__ret_888; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlslq_lane_high_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \
|
|
float32x4_t __ret_889; \
|
|
float32x4_t __s0_889 = __p0_889; \
|
|
float16x8_t __s1_889 = __p1_889; \
|
|
float16x4_t __s2_889 = __p2_889; \
|
|
__ret_889 = vfmlslq_high_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \
|
|
__ret_889; \
|
|
})
|
|
#else
|
|
#define vfmlslq_lane_high_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \
|
|
float32x4_t __ret_890; \
|
|
float32x4_t __s0_890 = __p0_890; \
|
|
float16x8_t __s1_890 = __p1_890; \
|
|
float16x4_t __s2_890 = __p2_890; \
|
|
float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, __lane_reverse_64_16); \
|
|
__ret_890 = __noswap_vfmlslq_high_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \
|
|
__ret_890 = __builtin_shufflevector(__ret_890, __ret_890, __lane_reverse_128_32); \
|
|
__ret_890; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlsl_lane_high_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \
|
|
float32x2_t __ret_891; \
|
|
float32x2_t __s0_891 = __p0_891; \
|
|
float16x4_t __s1_891 = __p1_891; \
|
|
float16x4_t __s2_891 = __p2_891; \
|
|
__ret_891 = vfmlsl_high_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \
|
|
__ret_891; \
|
|
})
|
|
#else
|
|
#define vfmlsl_lane_high_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \
|
|
float32x2_t __ret_892; \
|
|
float32x2_t __s0_892 = __p0_892; \
|
|
float16x4_t __s1_892 = __p1_892; \
|
|
float16x4_t __s2_892 = __p2_892; \
|
|
float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, __lane_reverse_64_16); \
|
|
__ret_892 = __noswap_vfmlsl_high_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \
|
|
__ret_892 = __builtin_shufflevector(__ret_892, __ret_892, __lane_reverse_64_32); \
|
|
__ret_892; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlslq_lane_low_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \
|
|
float32x4_t __ret_893; \
|
|
float32x4_t __s0_893 = __p0_893; \
|
|
float16x8_t __s1_893 = __p1_893; \
|
|
float16x4_t __s2_893 = __p2_893; \
|
|
__ret_893 = vfmlslq_low_f16(__s0_893, __s1_893, (float16x8_t) {vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893), vget_lane_f16(__s2_893, __p3_893)}); \
|
|
__ret_893; \
|
|
})
|
|
#else
|
|
#define vfmlslq_lane_low_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \
|
|
float32x4_t __ret_894; \
|
|
float32x4_t __s0_894 = __p0_894; \
|
|
float16x8_t __s1_894 = __p1_894; \
|
|
float16x4_t __s2_894 = __p2_894; \
|
|
float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, __lane_reverse_128_16); \
|
|
float16x4_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, __lane_reverse_64_16); \
|
|
__ret_894 = __noswap_vfmlslq_low_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894), __noswap_vget_lane_f16(__rev2_894, __p3_894)}); \
|
|
__ret_894 = __builtin_shufflevector(__ret_894, __ret_894, __lane_reverse_128_32); \
|
|
__ret_894; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlsl_lane_low_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \
|
|
float32x2_t __ret_895; \
|
|
float32x2_t __s0_895 = __p0_895; \
|
|
float16x4_t __s1_895 = __p1_895; \
|
|
float16x4_t __s2_895 = __p2_895; \
|
|
__ret_895 = vfmlsl_low_f16(__s0_895, __s1_895, (float16x4_t) {vget_lane_f16(__s2_895, __p3_895), vget_lane_f16(__s2_895, __p3_895), vget_lane_f16(__s2_895, __p3_895), vget_lane_f16(__s2_895, __p3_895)}); \
|
|
__ret_895; \
|
|
})
|
|
#else
|
|
#define vfmlsl_lane_low_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \
|
|
float32x2_t __ret_896; \
|
|
float32x2_t __s0_896 = __p0_896; \
|
|
float16x4_t __s1_896 = __p1_896; \
|
|
float16x4_t __s2_896 = __p2_896; \
|
|
float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, __lane_reverse_64_16); \
|
|
float16x4_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, __lane_reverse_64_16); \
|
|
__ret_896 = __noswap_vfmlsl_low_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vget_lane_f16(__rev2_896, __p3_896), __noswap_vget_lane_f16(__rev2_896, __p3_896), __noswap_vget_lane_f16(__rev2_896, __p3_896), __noswap_vget_lane_f16(__rev2_896, __p3_896)}); \
|
|
__ret_896 = __builtin_shufflevector(__ret_896, __ret_896, __lane_reverse_64_32); \
|
|
__ret_896; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlslq_laneq_high_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \
|
|
float32x4_t __ret_897; \
|
|
float32x4_t __s0_897 = __p0_897; \
|
|
float16x8_t __s1_897 = __p1_897; \
|
|
float16x8_t __s2_897 = __p2_897; \
|
|
__ret_897 = vfmlslq_high_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \
|
|
__ret_897; \
|
|
})
|
|
#else
|
|
#define vfmlslq_laneq_high_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \
|
|
float32x4_t __ret_898; \
|
|
float32x4_t __s0_898 = __p0_898; \
|
|
float16x8_t __s1_898 = __p1_898; \
|
|
float16x8_t __s2_898 = __p2_898; \
|
|
float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, __lane_reverse_128_16); \
|
|
__ret_898 = __noswap_vfmlslq_high_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \
|
|
__ret_898 = __builtin_shufflevector(__ret_898, __ret_898, __lane_reverse_128_32); \
|
|
__ret_898; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlsl_laneq_high_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \
|
|
float32x2_t __ret_899; \
|
|
float32x2_t __s0_899 = __p0_899; \
|
|
float16x4_t __s1_899 = __p1_899; \
|
|
float16x8_t __s2_899 = __p2_899; \
|
|
__ret_899 = vfmlsl_high_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \
|
|
__ret_899; \
|
|
})
|
|
#else
|
|
#define vfmlsl_laneq_high_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \
|
|
float32x2_t __ret_900; \
|
|
float32x2_t __s0_900 = __p0_900; \
|
|
float16x4_t __s1_900 = __p1_900; \
|
|
float16x8_t __s2_900 = __p2_900; \
|
|
float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, __lane_reverse_128_16); \
|
|
__ret_900 = __noswap_vfmlsl_high_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \
|
|
__ret_900 = __builtin_shufflevector(__ret_900, __ret_900, __lane_reverse_64_32); \
|
|
__ret_900; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlslq_laneq_low_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \
|
|
float32x4_t __ret_901; \
|
|
float32x4_t __s0_901 = __p0_901; \
|
|
float16x8_t __s1_901 = __p1_901; \
|
|
float16x8_t __s2_901 = __p2_901; \
|
|
__ret_901 = vfmlslq_low_f16(__s0_901, __s1_901, (float16x8_t) {vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901), vgetq_lane_f16(__s2_901, __p3_901)}); \
|
|
__ret_901; \
|
|
})
|
|
#else
|
|
#define vfmlslq_laneq_low_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \
|
|
float32x4_t __ret_902; \
|
|
float32x4_t __s0_902 = __p0_902; \
|
|
float16x8_t __s1_902 = __p1_902; \
|
|
float16x8_t __s2_902 = __p2_902; \
|
|
float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, __lane_reverse_128_32); \
|
|
float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, __lane_reverse_128_16); \
|
|
float16x8_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, __lane_reverse_128_16); \
|
|
__ret_902 = __noswap_vfmlslq_low_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902), __noswap_vgetq_lane_f16(__rev2_902, __p3_902)}); \
|
|
__ret_902 = __builtin_shufflevector(__ret_902, __ret_902, __lane_reverse_128_32); \
|
|
__ret_902; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vfmlsl_laneq_low_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \
|
|
float32x2_t __ret_903; \
|
|
float32x2_t __s0_903 = __p0_903; \
|
|
float16x4_t __s1_903 = __p1_903; \
|
|
float16x8_t __s2_903 = __p2_903; \
|
|
__ret_903 = vfmlsl_low_f16(__s0_903, __s1_903, (float16x4_t) {vgetq_lane_f16(__s2_903, __p3_903), vgetq_lane_f16(__s2_903, __p3_903), vgetq_lane_f16(__s2_903, __p3_903), vgetq_lane_f16(__s2_903, __p3_903)}); \
|
|
__ret_903; \
|
|
})
|
|
#else
|
|
#define vfmlsl_laneq_low_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \
|
|
float32x2_t __ret_904; \
|
|
float32x2_t __s0_904 = __p0_904; \
|
|
float16x4_t __s1_904 = __p1_904; \
|
|
float16x8_t __s2_904 = __p2_904; \
|
|
float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, __lane_reverse_64_32); \
|
|
float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, __lane_reverse_64_16); \
|
|
float16x8_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, __lane_reverse_128_16); \
|
|
__ret_904 = __noswap_vfmlsl_low_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_904, __p3_904), __noswap_vgetq_lane_f16(__rev2_904, __p3_904), __noswap_vgetq_lane_f16(__rev2_904, __p3_904), __noswap_vgetq_lane_f16(__rev2_904, __p3_904)}); \
|
|
__ret_904 = __builtin_shufflevector(__ret_904, __ret_904, __lane_reverse_64_32); \
|
|
__ret_904; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulh_lane_f16(__p0_905, __p1_905, __p2_905) __extension__ ({ \
|
|
float16_t __ret_905; \
|
|
float16_t __s0_905 = __p0_905; \
|
|
float16x4_t __s1_905 = __p1_905; \
|
|
__ret_905 = __s0_905 * vget_lane_f16(__s1_905, __p2_905); \
|
|
__ret_905; \
|
|
})
|
|
#else
|
|
#define vmulh_lane_f16(__p0_906, __p1_906, __p2_906) __extension__ ({ \
|
|
float16_t __ret_906; \
|
|
float16_t __s0_906 = __p0_906; \
|
|
float16x4_t __s1_906 = __p1_906; \
|
|
float16x4_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, __lane_reverse_64_16); \
|
|
__ret_906 = __s0_906 * __noswap_vget_lane_f16(__rev1_906, __p2_906); \
|
|
__ret_906; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulh_laneq_f16(__p0_907, __p1_907, __p2_907) __extension__ ({ \
|
|
float16_t __ret_907; \
|
|
float16_t __s0_907 = __p0_907; \
|
|
float16x8_t __s1_907 = __p1_907; \
|
|
__ret_907 = __s0_907 * vgetq_lane_f16(__s1_907, __p2_907); \
|
|
__ret_907; \
|
|
})
|
|
#else
|
|
#define vmulh_laneq_f16(__p0_908, __p1_908, __p2_908) __extension__ ({ \
|
|
float16_t __ret_908; \
|
|
float16_t __s0_908 = __p0_908; \
|
|
float16x8_t __s1_908 = __p1_908; \
|
|
float16x8_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, __lane_reverse_128_16); \
|
|
__ret_908 = __s0_908 * __noswap_vgetq_lane_f16(__rev1_908, __p2_908); \
|
|
__ret_908; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_8);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + vmovl_high_u8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + vmovl_high_u32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vmovl_high_u16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + vmovl_high_s8(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
__ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + vmovl_high_s32(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vmovl_high_s16(__p1);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_p64(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \
|
|
poly64x2_t __ret_909; \
|
|
poly64x2_t __s0_909 = __p0_909; \
|
|
poly64x1_t __s2_909 = __p2_909; \
|
|
__ret_909 = vsetq_lane_p64(vget_lane_p64(__s2_909, __p3_909), __s0_909, __p1_909); \
|
|
__ret_909; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_p64(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \
|
|
poly64x2_t __ret_910; \
|
|
poly64x2_t __s0_910 = __p0_910; \
|
|
poly64x1_t __s2_910 = __p2_910; \
|
|
poly64x2_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, __lane_reverse_128_64); \
|
|
__ret_910 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_910, __p3_910), __rev0_910, __p1_910); \
|
|
__ret_910 = __builtin_shufflevector(__ret_910, __ret_910, __lane_reverse_128_64); \
|
|
__ret_910; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_lane_f64(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \
|
|
float64x2_t __ret_911; \
|
|
float64x2_t __s0_911 = __p0_911; \
|
|
float64x1_t __s2_911 = __p2_911; \
|
|
__ret_911 = vsetq_lane_f64(vget_lane_f64(__s2_911, __p3_911), __s0_911, __p1_911); \
|
|
__ret_911; \
|
|
})
|
|
#else
|
|
#define vcopyq_lane_f64(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \
|
|
float64x2_t __ret_912; \
|
|
float64x2_t __s0_912 = __p0_912; \
|
|
float64x1_t __s2_912 = __p2_912; \
|
|
float64x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, __lane_reverse_128_64); \
|
|
__ret_912 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_912, __p3_912), __rev0_912, __p1_912); \
|
|
__ret_912 = __builtin_shufflevector(__ret_912, __ret_912, __lane_reverse_128_64); \
|
|
__ret_912; \
|
|
})
|
|
#endif
|
|
|
|
#define vcopy_lane_p64(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \
|
|
poly64x1_t __ret_913; \
|
|
poly64x1_t __s0_913 = __p0_913; \
|
|
poly64x1_t __s2_913 = __p2_913; \
|
|
__ret_913 = vset_lane_p64(vget_lane_p64(__s2_913, __p3_913), __s0_913, __p1_913); \
|
|
__ret_913; \
|
|
})
|
|
#define vcopy_lane_f64(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \
|
|
float64x1_t __ret_914; \
|
|
float64x1_t __s0_914 = __p0_914; \
|
|
float64x1_t __s2_914 = __p2_914; \
|
|
__ret_914 = vset_lane_f64(vget_lane_f64(__s2_914, __p3_914), __s0_914, __p1_914); \
|
|
__ret_914; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_p64(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \
|
|
poly64x2_t __ret_915; \
|
|
poly64x2_t __s0_915 = __p0_915; \
|
|
poly64x2_t __s2_915 = __p2_915; \
|
|
__ret_915 = vsetq_lane_p64(vgetq_lane_p64(__s2_915, __p3_915), __s0_915, __p1_915); \
|
|
__ret_915; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_p64(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \
|
|
poly64x2_t __ret_916; \
|
|
poly64x2_t __s0_916 = __p0_916; \
|
|
poly64x2_t __s2_916 = __p2_916; \
|
|
poly64x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, __lane_reverse_128_64); \
|
|
poly64x2_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, __lane_reverse_128_64); \
|
|
__ret_916 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_916, __p3_916), __rev0_916, __p1_916); \
|
|
__ret_916 = __builtin_shufflevector(__ret_916, __ret_916, __lane_reverse_128_64); \
|
|
__ret_916; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopyq_laneq_f64(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \
|
|
float64x2_t __ret_917; \
|
|
float64x2_t __s0_917 = __p0_917; \
|
|
float64x2_t __s2_917 = __p2_917; \
|
|
__ret_917 = vsetq_lane_f64(vgetq_lane_f64(__s2_917, __p3_917), __s0_917, __p1_917); \
|
|
__ret_917; \
|
|
})
|
|
#else
|
|
#define vcopyq_laneq_f64(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \
|
|
float64x2_t __ret_918; \
|
|
float64x2_t __s0_918 = __p0_918; \
|
|
float64x2_t __s2_918 = __p2_918; \
|
|
float64x2_t __rev0_918; __rev0_918 = __builtin_shufflevector(__s0_918, __s0_918, __lane_reverse_128_64); \
|
|
float64x2_t __rev2_918; __rev2_918 = __builtin_shufflevector(__s2_918, __s2_918, __lane_reverse_128_64); \
|
|
__ret_918 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_918, __p3_918), __rev0_918, __p1_918); \
|
|
__ret_918 = __builtin_shufflevector(__ret_918, __ret_918, __lane_reverse_128_64); \
|
|
__ret_918; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_p64(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \
|
|
poly64x1_t __ret_919; \
|
|
poly64x1_t __s0_919 = __p0_919; \
|
|
poly64x2_t __s2_919 = __p2_919; \
|
|
__ret_919 = vset_lane_p64(vgetq_lane_p64(__s2_919, __p3_919), __s0_919, __p1_919); \
|
|
__ret_919; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_p64(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \
|
|
poly64x1_t __ret_920; \
|
|
poly64x1_t __s0_920 = __p0_920; \
|
|
poly64x2_t __s2_920 = __p2_920; \
|
|
poly64x2_t __rev2_920; __rev2_920 = __builtin_shufflevector(__s2_920, __s2_920, __lane_reverse_128_64); \
|
|
__ret_920 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_920, __p3_920), __s0_920, __p1_920); \
|
|
__ret_920; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vcopy_laneq_f64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \
|
|
float64x1_t __ret_921; \
|
|
float64x1_t __s0_921 = __p0_921; \
|
|
float64x2_t __s2_921 = __p2_921; \
|
|
__ret_921 = vset_lane_f64(vgetq_lane_f64(__s2_921, __p3_921), __s0_921, __p1_921); \
|
|
__ret_921; \
|
|
})
|
|
#else
|
|
#define vcopy_laneq_f64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \
|
|
float64x1_t __ret_922; \
|
|
float64x1_t __s0_922 = __p0_922; \
|
|
float64x2_t __s2_922 = __p2_922; \
|
|
float64x2_t __rev2_922; __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, __lane_reverse_128_64); \
|
|
__ret_922 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_922, __p3_922), __s0_922, __p1_922); \
|
|
__ret_922; \
|
|
})
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
__ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
__ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#define vmulx_lane_f64(__p0_923, __p1_923, __p2_923) __extension__ ({ \
|
|
float64x1_t __ret_923; \
|
|
float64x1_t __s0_923 = __p0_923; \
|
|
float64x1_t __s1_923 = __p1_923; \
|
|
float64_t __x_923 = vget_lane_f64(__s0_923, 0); \
|
|
float64_t __y_923 = vget_lane_f64(__s1_923, __p2_923); \
|
|
float64_t __z_923 = vmulxd_f64(__x_923, __y_923); \
|
|
__ret_923 = vset_lane_f64(__z_923, __s0_923, __p2_923); \
|
|
__ret_923; \
|
|
})
|
|
#ifdef __LITTLE_ENDIAN__
|
|
#define vmulx_laneq_f64(__p0_924, __p1_924, __p2_924) __extension__ ({ \
|
|
float64x1_t __ret_924; \
|
|
float64x1_t __s0_924 = __p0_924; \
|
|
float64x2_t __s1_924 = __p1_924; \
|
|
float64_t __x_924 = vget_lane_f64(__s0_924, 0); \
|
|
float64_t __y_924 = vgetq_lane_f64(__s1_924, __p2_924); \
|
|
float64_t __z_924 = vmulxd_f64(__x_924, __y_924); \
|
|
__ret_924 = vset_lane_f64(__z_924, __s0_924, 0); \
|
|
__ret_924; \
|
|
})
|
|
#else
|
|
#define vmulx_laneq_f64(__p0_925, __p1_925, __p2_925) __extension__ ({ \
|
|
float64x1_t __ret_925; \
|
|
float64x1_t __s0_925 = __p0_925; \
|
|
float64x2_t __s1_925 = __p1_925; \
|
|
float64x2_t __rev1_925; __rev1_925 = __builtin_shufflevector(__s1_925, __s1_925, __lane_reverse_128_64); \
|
|
float64_t __x_925 = vget_lane_f64(__s0_925, 0); \
|
|
float64_t __y_925 = __noswap_vgetq_lane_f64(__rev1_925, __p2_925); \
|
|
float64_t __z_925 = vmulxd_f64(__x_925, __y_925); \
|
|
__ret_925 = vset_lane_f64(__z_925, __s0_925, 0); \
|
|
__ret_925; \
|
|
})
|
|
#endif
|
|
|
|
#endif
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + vabdl_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + vabdl_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + vabdl_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + vabdl_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_8);
|
|
int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_8);
|
|
__ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + vabdl_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_32);
|
|
int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_32);
|
|
__ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + vabdl_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_64_16);
|
|
int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_64_16);
|
|
__ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
__ai __attribute__((target("neon"))) int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#if defined(__aarch64__) || defined(__arm64ec__)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
__ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
|
|
uint16x8_t __ret;
|
|
uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
__ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
|
|
uint64x2_t __ret;
|
|
uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
__ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
|
|
uint32x4_t __ret;
|
|
uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
__ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
|
|
int16x8_t __ret;
|
|
int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_16);
|
|
int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_8);
|
|
int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_8);
|
|
__ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_16);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
__ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
|
|
int64x2_t __ret;
|
|
int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_64);
|
|
int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_32);
|
|
int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_32);
|
|
__ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_64);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
__ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
|
|
return __ret;
|
|
}
|
|
#else
|
|
__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
|
|
int32x4_t __ret;
|
|
int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, __lane_reverse_128_32);
|
|
int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, __lane_reverse_128_16);
|
|
int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, __lane_reverse_128_16);
|
|
__ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
|
|
__ret = __builtin_shufflevector(__ret, __ret, __lane_reverse_128_32);
|
|
return __ret;
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#undef __ai
|
|
|
|
#endif /* if !defined(__ARM_NEON) */
|
|
#endif /* ifndef __ARM_FP */
|