25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
26#define _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
28#if __cplusplus >= 201703L
34_GLIBCXX_SIMD_BEGIN_NAMESPACE
37 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_allbits
38 =
reinterpret_cast<_V
>(~__vector_type_t<char,
sizeof(_V) /
sizeof(
char)>());
42template <
typename _V,
typename = _VectorTraits<_V>>
43 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_signmask
44 = __xor(_V() + 1, _V() - 1);
46template <
typename _V,
typename = _VectorTraits<_V>>
47 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_absmask
48 = __andnot(_S_signmask<_V>, _S_allbits<_V>);
53template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>,
54 typename = __detail::__odr_helper>
56 __vector_permute(_Tp __x)
58 static_assert(
sizeof...(_Indices) == _TVT::_S_full_size);
59 return __make_vector<typename _TVT::value_type>(
60 (_Indices == -1 ? 0 : __x[_Indices == -1 ? 0 : _Indices])...);
66template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>,
67 typename = __detail::__odr_helper>
69 __vector_shuffle(_Tp __x, _Tp __y)
71 return _Tp{(_Indices == -1 ? 0
72 : _Indices < _TVT::_S_full_size
74 : __y[_Indices - _TVT::_S_full_size])...};
79template <
typename _Tp,
typename... _Args>
80 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<_Tp,
sizeof...(_Args)>
81 __make_wrapper(
const _Args&... __args)
82 {
return __make_vector<_Tp>(__args...); }
86template <
typename _Tp,
size_t _ToN = 0,
typename _Up,
size_t _M,
87 size_t _Np = _ToN != 0 ? _ToN :
sizeof(_Up) * _M /
sizeof(_Tp)>
88 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<_Tp, _Np>
89 __wrapper_bitcast(_SimdWrapper<_Up, _M> __x)
91 static_assert(_Np > 1);
92 return __intrin_bitcast<__vector_type_t<_Tp, _Np>>(__x._M_data);
98template <
unsigned __shift,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
99 _GLIBCXX_SIMD_INTRINSIC _Tp
100 __shift_elements_right(_Tp __v)
102 [[maybe_unused]]
const auto __iv = __to_intrin(__v);
103 static_assert(__shift <=
sizeof(_Tp));
104 if constexpr (__shift == 0)
106 else if constexpr (__shift ==
sizeof(_Tp))
108#if _GLIBCXX_SIMD_X86INTRIN
109 else if constexpr (__have_sse && __shift == 8
110 && _TVT::template _S_is<float, 4>)
111 return _mm_movehl_ps(__iv, __iv);
112 else if constexpr (__have_sse2 && __shift == 8
113 && _TVT::template _S_is<double, 2>)
114 return _mm_unpackhi_pd(__iv, __iv);
115 else if constexpr (__have_sse2 &&
sizeof(_Tp) == 16)
116 return reinterpret_cast<typename _TVT::type
>(
117 _mm_srli_si128(
reinterpret_cast<__m128i
>(__iv), __shift));
118 else if constexpr (__shift == 16 &&
sizeof(_Tp) == 32)
128 return __zero_extend(__hi128(__v));
130 else if constexpr (__have_avx2 &&
sizeof(_Tp) == 32 && __shift < 16)
132 const auto __vll = __vector_bitcast<_LLong>(__v);
133 return reinterpret_cast<typename _TVT::type
>(
134 _mm256_alignr_epi8(_mm256_permute2x128_si256(__vll, __vll, 0x81),
137 else if constexpr (__have_avx &&
sizeof(_Tp) == 32 && __shift < 16)
139 const auto __vll = __vector_bitcast<_LLong>(__v);
140 return reinterpret_cast<typename _TVT::type
>(
141 __concat(_mm_alignr_epi8(__hi128(__vll), __lo128(__vll), __shift),
142 _mm_srli_si128(__hi128(__vll), __shift)));
144 else if constexpr (
sizeof(_Tp) == 32 && __shift > 16)
145 return __zero_extend(__shift_elements_right<__shift - 16>(__hi128(__v)));
146 else if constexpr (
sizeof(_Tp) == 64 && __shift == 32)
147 return __zero_extend(__hi256(__v));
148 else if constexpr (__have_avx512f &&
sizeof(_Tp) == 64)
150 if constexpr (__shift >= 48)
151 return __zero_extend(
152 __shift_elements_right<__shift - 48>(__extract<3, 4>(__v)));
153 else if constexpr (__shift >= 32)
154 return __zero_extend(
155 __shift_elements_right<__shift - 32>(__hi256(__v)));
156 else if constexpr (__shift % 8 == 0)
157 return reinterpret_cast<typename _TVT::type
>(
158 _mm512_alignr_epi64(__m512i(), __intrin_bitcast<__m512i>(__v),
160 else if constexpr (__shift % 4 == 0)
161 return reinterpret_cast<typename _TVT::type
>(
162 _mm512_alignr_epi32(__m512i(), __intrin_bitcast<__m512i>(__v),
164 else if constexpr (__have_avx512bw && __shift < 16)
166 const auto __vll = __vector_bitcast<_LLong>(__v);
167 return reinterpret_cast<typename _TVT::type
>(
168 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __vll, 0xf9),
171 else if constexpr (__have_avx512bw && __shift < 32)
173 const auto __vll = __vector_bitcast<_LLong>(__v);
174 return reinterpret_cast<typename _TVT::type
>(
175 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __m512i(), 0xee),
176 _mm512_shuffle_i32x4(__vll, __vll, 0xf9),
180 __assert_unreachable<_Tp>();
189 constexpr int __chunksize = __shift % 8 == 0 ? 8
190 : __shift % 4 == 0 ? 4
191 : __shift % 2 == 0 ? 2
193 auto __w = __vector_bitcast<__int_with_sizeof_t<__chunksize>>(__v);
194 using _Up =
decltype(__w);
195 return __intrin_bitcast<_Tp>(
196 __call_with_n_evaluations<(
sizeof(_Tp) - __shift) / __chunksize>(
197 [](
auto... __chunks) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
198 return _Up{__chunks...};
199 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
200 return __w[__shift / __chunksize + __i];
207template <
int _Index,
int _Total,
int _Combine,
typename _Tp,
size_t _Np>
208 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
constexpr
209 _SimdWrapper<_Tp, _Np / _Total * _Combine>
210 __extract_part(
const _SimdWrapper<_Tp, _Np> __x)
212 if constexpr (_Index % 2 == 0 && _Total % 2 == 0 && _Combine % 2 == 0)
213 return __extract_part<_Index / 2, _Total / 2, _Combine / 2>(__x);
216 constexpr size_t __values_per_part = _Np / _Total;
217 constexpr size_t __values_to_skip = _Index * __values_per_part;
218 constexpr size_t __return_size = __values_per_part * _Combine;
219 using _R = __vector_type_t<_Tp, __return_size>;
220 static_assert((_Index + _Combine) * __values_per_part *
sizeof(_Tp)
222 "out of bounds __extract_part");
229 if (__x._M_is_constprop())
230 return __generate_from_n_evaluations<__return_size, _R>(
231 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
232 return __x[__values_to_skip + __i];
234 if constexpr (_Index == 0 && _Total == 1)
236 else if constexpr (_Index == 0)
237 return __intrin_bitcast<_R>(__as_vector(__x));
238#if _GLIBCXX_SIMD_X86INTRIN
239 else if constexpr (
sizeof(__x) == 32
240 && __return_size *
sizeof(_Tp) <= 16)
242 constexpr size_t __bytes_to_skip = __values_to_skip *
sizeof(_Tp);
243 if constexpr (__bytes_to_skip == 16)
244 return __vector_bitcast<_Tp, __return_size>(
245 __hi128(__as_vector(__x)));
247 return __vector_bitcast<_Tp, __return_size>(
248 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
249 __lo128(__vector_bitcast<_LLong>(__x)),
253 else if constexpr (_Index > 0
254 && (__values_to_skip % __return_size != 0
256 && (__values_to_skip + __return_size) *
sizeof(_Tp)
258 &&
sizeof(__x) >= 16)
259 return __intrin_bitcast<_R>(
260 __shift_elements_right<__values_to_skip *
sizeof(_Tp)>(
265 __builtin_memcpy(&__r,
266 reinterpret_cast<const char*
>(&__x)
267 +
sizeof(_Tp) * __values_to_skip,
268 __return_size *
sizeof(_Tp));
276template <
int _Index,
int _Total,
int _Combine = 1,
size_t _Np>
277 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<bool, _Np / _Total * _Combine>
278 __extract_part(
const _SimdWrapper<bool, _Np> __x)
280 static_assert(_Combine == 1,
"_Combine != 1 not implemented");
281 static_assert(__have_avx512f && _Np == _Np);
282 static_assert(_Total >= 2 && _Index + _Combine <= _Total && _Index >= 0);
283 return __x._M_data >> (_Index * _Np / _Total);
290template <
typename _To,
typename _From,
size_t... _I>
291 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
292 __vector_convert(_From __a, index_sequence<_I...>)
294 using _Tp =
typename _VectorTraits<_To>::value_type;
295 return _To{
static_cast<_Tp
>(__a[_I])...};
298template <
typename _To,
typename _From,
size_t... _I>
299 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
300 __vector_convert(_From __a, _From __b, index_sequence<_I...>)
302 using _Tp =
typename _VectorTraits<_To>::value_type;
303 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...};
306template <
typename _To,
typename _From,
size_t... _I>
307 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
308 __vector_convert(_From __a, _From __b, _From __c, index_sequence<_I...>)
310 using _Tp =
typename _VectorTraits<_To>::value_type;
311 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
312 static_cast<_Tp
>(__c[_I])...};
315template <
typename _To,
typename _From,
size_t... _I>
316 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
317 __vector_convert(_From __a, _From __b, _From __c, _From __d,
318 index_sequence<_I...>)
320 using _Tp =
typename _VectorTraits<_To>::value_type;
321 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
322 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...};
325template <
typename _To,
typename _From,
size_t... _I>
326 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
327 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
328 index_sequence<_I...>)
330 using _Tp =
typename _VectorTraits<_To>::value_type;
331 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
332 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
333 static_cast<_Tp
>(__e[_I])...};
336template <
typename _To,
typename _From,
size_t... _I>
337 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
338 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
339 _From __f, index_sequence<_I...>)
341 using _Tp =
typename _VectorTraits<_To>::value_type;
342 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
343 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
344 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...};
347template <
typename _To,
typename _From,
size_t... _I>
348 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
349 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
350 _From __f, _From __g, index_sequence<_I...>)
352 using _Tp =
typename _VectorTraits<_To>::value_type;
353 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
354 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
355 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
356 static_cast<_Tp
>(__g[_I])...};
359template <
typename _To,
typename _From,
size_t... _I>
360 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
361 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
362 _From __f, _From __g, _From __h, index_sequence<_I...>)
364 using _Tp =
typename _VectorTraits<_To>::value_type;
365 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
366 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
367 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
368 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...};
371template <
typename _To,
typename _From,
size_t... _I>
372 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
373 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
374 _From __f, _From __g, _From __h, _From __i,
375 index_sequence<_I...>)
377 using _Tp =
typename _VectorTraits<_To>::value_type;
378 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
379 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
380 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
381 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
382 static_cast<_Tp
>(__i[_I])...};
385template <
typename _To,
typename _From,
size_t... _I>
386 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
387 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
388 _From __f, _From __g, _From __h, _From __i, _From __j,
389 index_sequence<_I...>)
391 using _Tp =
typename _VectorTraits<_To>::value_type;
392 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
393 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
394 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
395 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
396 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...};
399template <
typename _To,
typename _From,
size_t... _I>
400 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
401 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
402 _From __f, _From __g, _From __h, _From __i, _From __j,
403 _From __k, index_sequence<_I...>)
405 using _Tp =
typename _VectorTraits<_To>::value_type;
406 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
407 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
408 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
409 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
410 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
411 static_cast<_Tp
>(__k[_I])...};
414template <
typename _To,
typename _From,
size_t... _I>
415 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
416 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
417 _From __f, _From __g, _From __h, _From __i, _From __j,
418 _From __k, _From __l, index_sequence<_I...>)
420 using _Tp =
typename _VectorTraits<_To>::value_type;
421 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
422 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
423 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
424 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
425 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
426 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...};
429template <
typename _To,
typename _From,
size_t... _I>
430 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
431 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
432 _From __f, _From __g, _From __h, _From __i, _From __j,
433 _From __k, _From __l, _From __m, index_sequence<_I...>)
435 using _Tp =
typename _VectorTraits<_To>::value_type;
436 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
437 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
438 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
439 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
440 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
441 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
442 static_cast<_Tp
>(__m[_I])...};
445template <
typename _To,
typename _From,
size_t... _I>
446 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
447 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
448 _From __f, _From __g, _From __h, _From __i, _From __j,
449 _From __k, _From __l, _From __m, _From __n,
450 index_sequence<_I...>)
452 using _Tp =
typename _VectorTraits<_To>::value_type;
453 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
454 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
455 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
456 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
457 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
458 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
459 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...};
462template <
typename _To,
typename _From,
size_t... _I>
463 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
464 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
465 _From __f, _From __g, _From __h, _From __i, _From __j,
466 _From __k, _From __l, _From __m, _From __n, _From __o,
467 index_sequence<_I...>)
469 using _Tp =
typename _VectorTraits<_To>::value_type;
470 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
471 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
472 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
473 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
474 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
475 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
476 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
477 static_cast<_Tp
>(__o[_I])...};
480template <
typename _To,
typename _From,
size_t... _I>
481 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
482 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
483 _From __f, _From __g, _From __h, _From __i, _From __j,
484 _From __k, _From __l, _From __m, _From __n, _From __o,
485 _From __p, index_sequence<_I...>)
487 using _Tp =
typename _VectorTraits<_To>::value_type;
488 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
489 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
490 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
491 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
492 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
493 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
494 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
495 static_cast<_Tp
>(__o[_I])...,
static_cast<_Tp
>(__p[_I])...};
501template <
typename _To,
typename... _From,
size_t _FromSize>
502 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
503 __vector_convert(_SimdWrapper<_From, _FromSize>... __xs)
505#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
506 using _From0 = __first_of_pack_t<_From...>;
507 using _FW = _SimdWrapper<_From0, _FromSize>;
508 if (!_FW::_S_is_partial && !(... && __xs._M_is_constprop()))
510 if constexpr ((
sizeof...(_From) & (
sizeof...(_From) - 1))
512 return __convert_x86<_To>(__as_vector(__xs)...);
514 return __vector_convert<_To>(__xs..., _FW{});
518 return __vector_convert<_To>(
519 __as_vector(__xs)...,
520 make_index_sequence<(
sizeof...(__xs) == 1 ?
std::min(
521 _VectorTraits<_To>::_S_full_size,
int(_FromSize))
527template <
typename _To,
typename _From,
typename... _More>
528 _GLIBCXX_SIMD_INTRINSIC
constexpr auto
529 __convert(_From __v0, _More... __vs)
531 static_assert((
true && ... && is_same_v<_From, _More>) );
532 if constexpr (__is_vectorizable_v<_From>)
534 using _V =
typename _VectorTraits<_To>::type;
535 using _Tp =
typename _VectorTraits<_To>::value_type;
536 return _V{
static_cast<_Tp
>(__v0),
static_cast<_Tp
>(__vs)...};
538 else if constexpr (__is_vector_type_v<_From>)
539 return __convert<_To>(__as_wrapper(__v0), __as_wrapper(__vs)...);
542 constexpr size_t __input_size = _From::_S_size * (1 +
sizeof...(_More));
543 if constexpr (__is_vectorizable_v<_To>)
544 return __convert<__vector_type_t<_To, __input_size>>(__v0, __vs...);
545 else if constexpr (!__is_vector_type_v<_To>)
546 return _To(__convert<typename _To::_BuiltinType>(__v0, __vs...));
550 sizeof...(_More) == 0
551 || _VectorTraits<_To>::_S_full_size >= __input_size,
552 "__convert(...) requires the input to fit into the output");
553 return __vector_convert<_To>(__v0, __vs...);
563template <
typename _To,
568 typename _From,
typename _FromVT = _VectorTraits<_From>>
569 _GLIBCXX_SIMD_INTRINSIC
auto
570 __convert_all(_From __v)
572 if constexpr (is_arithmetic_v<_To> && _NParts != 1)
574 static_assert(_Offset < _FromVT::_S_full_size);
576 = _NParts == 0 ? _FromVT::_S_partial_width - _Offset : _NParts;
577 return __generate_from_n_evaluations<_Np, array<_To, _Np>>(
578 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
579 return static_cast<_To
>(__v[__i + _Offset]);
584 static_assert(__is_vector_type_v<_To>);
585 using _ToVT = _VectorTraits<_To>;
586 if constexpr (__is_vector_type_v<_From>)
587 return __convert_all<_To, _NParts>(__as_wrapper(__v));
588 else if constexpr (_NParts == 1)
590 static_assert(_Offset % _ToVT::_S_full_size == 0);
591 return array<_To, 1>{__vector_convert<_To>(
592 __extract_part<_Offset / _ToVT::_S_full_size,
593 __div_roundup(_FromVT::_S_partial_width,
594 _ToVT::_S_full_size)>(__v))};
596#if _GLIBCXX_SIMD_X86INTRIN
597 else if constexpr (!__have_sse4_1 && _Offset == 0
598 && is_integral_v<typename _FromVT::value_type>
599 &&
sizeof(
typename _FromVT::value_type)
600 <
sizeof(
typename _ToVT::value_type)
601 && !(
sizeof(
typename _FromVT::value_type) == 4
602 && is_same_v<typename _ToVT::value_type, double>))
604 using _ToT =
typename _ToVT::value_type;
605 using _FromT =
typename _FromVT::value_type;
609 : (_FromVT::_S_partial_width / _ToVT::_S_full_size);
610 using _R = array<_To, _Np>;
615 [[maybe_unused]]
auto __adjust
617 auto __vv) -> _SimdWrapper<_FromT,
decltype(__n)::value> {
618 return __vector_bitcast<_FromT, decltype(__n)::value>(__vv);
620 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
622 = [](
auto __x0, [[maybe_unused]]
auto __x1) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
623 if constexpr (_Np == 1)
624 return _R{__intrin_bitcast<_To>(__x0)};
626 return _R{__intrin_bitcast<_To>(__x0),
627 __intrin_bitcast<_To>(__x1)};
630 if constexpr (_Np == 0)
632 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 2)
634 static_assert(is_integral_v<_FromT>);
635 static_assert(is_integral_v<_ToT>);
636 if constexpr (is_unsigned_v<_FromT>)
637 return __make_array(_mm_unpacklo_epi8(__vi, __m128i()),
638 _mm_unpackhi_epi8(__vi, __m128i()));
641 _mm_srai_epi16(_mm_unpacklo_epi8(__vi, __vi), 8),
642 _mm_srai_epi16(_mm_unpackhi_epi8(__vi, __vi), 8));
644 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 4)
646 static_assert(is_integral_v<_FromT>);
647 if constexpr (is_floating_point_v<_ToT>)
650 = __convert_all<__vector_type16_t<int>, _Np>(
651 __adjust(_SizeConstant<_Np * 4>(), __v));
652 return __generate_from_n_evaluations<_Np, _R>(
653 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
654 return __vector_convert<_To>(__as_wrapper(__ints[__i]));
657 else if constexpr (is_unsigned_v<_FromT>)
658 return __make_array(_mm_unpacklo_epi16(__vi, __m128i()),
659 _mm_unpackhi_epi16(__vi, __m128i()));
662 _mm_srai_epi32(_mm_unpacklo_epi16(__vi, __vi), 16),
663 _mm_srai_epi32(_mm_unpackhi_epi16(__vi, __vi), 16));
665 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
666 && is_integral_v<_FromT> && is_integral_v<_ToT>)
668 if constexpr (is_unsigned_v<_FromT>)
669 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
670 _mm_unpackhi_epi32(__vi, __m128i()));
673 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
674 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
676 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
677 && is_integral_v<_FromT> && is_integral_v<_ToT>)
679 if constexpr (is_unsigned_v<_FromT>)
680 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
681 _mm_unpackhi_epi32(__vi, __m128i()));
684 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
685 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
687 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) >= 4
688 && is_signed_v<_FromT>)
690 const __m128i __vv[2] = {_mm_unpacklo_epi8(__vi, __vi),
691 _mm_unpackhi_epi8(__vi, __vi)};
692 const __vector_type_t<int, 4> __vvvv[4] = {
693 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[0], __vv[0])),
694 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[0], __vv[0])),
695 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[1], __vv[1])),
696 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[1], __vv[1]))};
697 if constexpr (
sizeof(_ToT) == 4)
698 return __generate_from_n_evaluations<_Np, _R>(
699 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
700 return __vector_convert<_To>(
701 _SimdWrapper<int, 4>(__vvvv[__i] >> 24));
703 else if constexpr (is_integral_v<_ToT>)
704 return __generate_from_n_evaluations<_Np, _R>(
705 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
706 const auto __signbits = __to_intrin(__vvvv[__i / 2] >> 31);
707 const auto __sx32 = __to_intrin(__vvvv[__i / 2] >> 24);
708 return __vector_bitcast<_ToT>(
709 __i % 2 == 0 ? _mm_unpacklo_epi32(__sx32, __signbits)
710 : _mm_unpackhi_epi32(__sx32, __signbits));
713 return __generate_from_n_evaluations<_Np, _R>(
714 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
715 const _SimdWrapper<int, 4> __int4 = __vvvv[__i / 2] >> 24;
716 return __vector_convert<_To>(
717 __i % 2 == 0 ? __int4
718 : _SimdWrapper<int, 4>(
719 _mm_unpackhi_epi64(__to_intrin(__int4),
720 __to_intrin(__int4))));
723 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 4)
725 const auto __shorts = __convert_all<__vector_type16_t<
726 conditional_t<is_signed_v<_FromT>, short,
unsigned short>>>(
727 __adjust(_SizeConstant<(_Np + 1) / 2 * 8>(), __v));
728 return __generate_from_n_evaluations<_Np, _R>(
729 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
730 return __convert_all<_To>(__shorts[__i / 2])[__i % 2];
733 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 8
734 && is_signed_v<_FromT> && is_integral_v<_ToT>)
736 const __m128i __vv[2] = {_mm_unpacklo_epi16(__vi, __vi),
737 _mm_unpackhi_epi16(__vi, __vi)};
738 const __vector_type16_t<int> __vvvv[4]
739 = {__vector_bitcast<int>(
740 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[0], 16),
741 _mm_srai_epi32(__vv[0], 31))),
742 __vector_bitcast<int>(
743 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[0], 16),
744 _mm_srai_epi32(__vv[0], 31))),
745 __vector_bitcast<int>(
746 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[1], 16),
747 _mm_srai_epi32(__vv[1], 31))),
748 __vector_bitcast<int>(
749 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[1], 16),
750 _mm_srai_epi32(__vv[1], 31)))};
751 return __generate_from_n_evaluations<_Np, _R>(
752 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
753 return __vector_bitcast<_ToT>(__vvvv[__i]);
756 else if constexpr (
sizeof(_FromT) <= 2 &&
sizeof(_ToT) == 8)
760 is_signed_v<_FromT> || is_floating_point_v<_ToT>, int,
762 __adjust(_SizeConstant<(_Np + 1) / 2 * 4>(), __v));
763 return __generate_from_n_evaluations<_Np, _R>(
764 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
765 return __convert_all<_To>(__ints[__i / 2])[__i % 2];
769 __assert_unreachable<_To>();
772 else if constexpr ((_FromVT::_S_partial_width - _Offset)
773 > _ToVT::_S_full_size)
782 constexpr size_t _NTotal
783 = (_FromVT::_S_partial_width - _Offset) / _ToVT::_S_full_size;
784 constexpr size_t _Np = _NParts == 0 ? _NTotal : _NParts;
787 || (_Np == _NTotal + 1
788 && (_FromVT::_S_partial_width - _Offset) % _ToVT::_S_full_size
790 using _R = array<_To, _Np>;
791 if constexpr (_Np == 1)
792 return _R{__vector_convert<_To>(
793 __extract_part<_Offset, _FromVT::_S_partial_width,
794 _ToVT::_S_full_size>(__v))};
796 return __generate_from_n_evaluations<_Np, _R>(
797 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
799 = __extract_part<__i * _ToVT::_S_full_size + _Offset,
800 _FromVT::_S_partial_width,
801 _ToVT::_S_full_size>(__v);
802 return __vector_convert<_To>(__part);
805 else if constexpr (_Offset == 0)
806 return array<_To, 1>{__vector_convert<_To>(__v)};
808 return array<_To, 1>{__vector_convert<_To>(
809 __extract_part<_Offset, _FromVT::_S_partial_width,
810 _FromVT::_S_partial_width - _Offset>(__v))};
817template <
typename _Tp,
typename _Mp,
typename _Abi,
size_t _Np>
821 using _SimdImpl =
typename _Abi::_SimdImpl;
822 using _MaskImpl =
typename _Abi::_MaskImpl;
825 using _SimdMember = _SimdWrapper<_Tp, _Np>;
826 using _MaskMember = _SimdWrapper<_Mp, _Np>;
827 static constexpr size_t _S_simd_align =
alignof(_SimdMember);
828 static constexpr size_t _S_mask_align =
alignof(_MaskMember);
832 static constexpr size_t _S_full_size = _SimdMember::_S_full_size;
833 static constexpr bool _S_is_partial = _SimdMember::_S_is_partial;
839 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
840 operator __intrinsic_type_t<_Tp, _Np>()
const
841 {
return __to_intrin(
static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data); }
843 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
844 operator __vector_type_t<_Tp, _Np>()
const
845 {
return static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data.__builtin(); }
850 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
851 operator __intrinsic_type_t<_Tp, _Np>()
const
852 {
return __data(*
static_cast<const simd<_Tp, _Abi>*
>(
this)); }
856 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
857 _SimdBase1, _SimdBase2>;
863 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
864 operator __intrinsic_type_t<_Tp, _Np>()
const
865 {
return static_cast<const simd_mask<_Tp, _Abi>*
>(
this) ->_M_data.__intrin(); }
867 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
868 operator __vector_type_t<_Tp, _Np>()
const
869 {
return static_cast<const simd_mask<_Tp, _Abi>*
>(
this)->_M_data._M_data; }
874 _GLIBCXX_SIMD_ALWAYS_INLINE
explicit
875 operator __intrinsic_type_t<_Tp, _Np>()
const
876 {
return __data(*
static_cast<const simd_mask<_Tp, _Abi>*
>(
this)); }
880 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
881 _MaskBase1, _MaskBase2>;
888 using _Up = __intrinsic_type_t<_Tp, _Np>;
892 _GLIBCXX_SIMD_ALWAYS_INLINE
893 _MaskCastType(_Up __x) : _M_data(__x) {}
895 _GLIBCXX_SIMD_ALWAYS_INLINE
896 operator _MaskMember()
const {
return _M_data; }
904 using _Ap = __intrinsic_type_t<_Tp, _Np>;
908 _GLIBCXX_SIMD_ALWAYS_INLINE
constexpr
909 _SimdCastType1(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
911 _GLIBCXX_SIMD_ALWAYS_INLINE
constexpr
912 operator _SimdMember()
const {
return _M_data; }
917 using _Ap = __intrinsic_type_t<_Tp, _Np>;
918 using _Bp = __vector_type_t<_Tp, _Np>;
922 _GLIBCXX_SIMD_ALWAYS_INLINE
constexpr
923 _SimdCastType2(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
925 _GLIBCXX_SIMD_ALWAYS_INLINE
constexpr
926 _SimdCastType2(_Bp __b) : _M_data(__b) {}
928 _GLIBCXX_SIMD_ALWAYS_INLINE
constexpr
929 operator _SimdMember()
const {
return _M_data; }
933 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
934 _SimdCastType1, _SimdCastType2>;
939struct _CommonImplX86;
940struct _CommonImplNeon;
941struct _CommonImplBuiltin;
942template <
typename _Abi,
typename = __detail::__odr_helper>
struct _SimdImplBuiltin;
943template <
typename _Abi,
typename = __detail::__odr_helper>
struct _MaskImplBuiltin;
944template <
typename _Abi,
typename = __detail::__odr_helper>
struct _SimdImplX86;
945template <
typename _Abi,
typename = __detail::__odr_helper>
struct _MaskImplX86;
946template <
typename _Abi,
typename = __detail::__odr_helper>
struct _SimdImplNeon;
947template <
typename _Abi,
typename = __detail::__odr_helper>
struct _MaskImplNeon;
948template <
typename _Abi,
typename = __detail::__odr_helper>
struct _SimdImplPpc;
949template <
typename _Abi,
typename = __detail::__odr_helper>
struct _MaskImplPpc;
952template <
int _UsedBytes>
953 struct simd_abi::_VecBuiltin
955 template <
typename _Tp>
956 static constexpr size_t _S_size = _UsedBytes /
sizeof(_Tp);
959 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
961 template <
typename _Tp>
962 struct _IsValidSizeFor
963 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
964 && _UsedBytes % sizeof(_Tp) == 0
965 && _UsedBytes <= __vectorized_sizeof<_Tp>()
966 && (!__have_avx512f || _UsedBytes <= 32))> {};
968 template <typename _Tp>
969 struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
970 _IsValidSizeFor<_Tp>> {};
972 template <typename _Tp>
973 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
977#if _GLIBCXX_SIMD_X86INTRIN
978 using _CommonImpl = _CommonImplX86;
979 using _SimdImpl = _SimdImplX86<_VecBuiltin<_UsedBytes>>;
980 using _MaskImpl = _MaskImplX86<_VecBuiltin<_UsedBytes>>;
981#elif _GLIBCXX_SIMD_HAVE_NEON
982 using _CommonImpl = _CommonImplNeon;
983 using _SimdImpl = _SimdImplNeon<_VecBuiltin<_UsedBytes>>;
984 using _MaskImpl = _MaskImplNeon<_VecBuiltin<_UsedBytes>>;
986 using _CommonImpl = _CommonImplBuiltin;
988 using _SimdImpl = _SimdImplPpc<_VecBuiltin<_UsedBytes>>;
989 using _MaskImpl = _MaskImplPpc<_VecBuiltin<_UsedBytes>>;
991 using _SimdImpl = _SimdImplBuiltin<_VecBuiltin<_UsedBytes>>;
992 using _MaskImpl = _MaskImplBuiltin<_VecBuiltin<_UsedBytes>>;
998 template <typename _Tp>
999 using _MaskValueType = __int_for_sizeof_t<_Tp>;
1001 template <typename _Tp>
1003 = conditional_t<_S_is_valid_v<_Tp>,
1004 _GnuTraits<_Tp, _MaskValueType<_Tp>,
1005 _VecBuiltin<_UsedBytes>, _S_size<_Tp>>,
1010 template <typename _Tp>
1011 static constexpr size_t _S_full_size = __traits<_Tp>::_S_full_size;
1013 template <typename _Tp>
1014 static constexpr bool _S_is_partial = __traits<_Tp>::_S_is_partial;
1018 template <typename _Tp>
1019 using _MaskMember = _SimdWrapper<_MaskValueType<_Tp>, _S_size<_Tp>>;
1021 template <typename _Tp>
1022 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
1025 using _UV = typename _MaskMember<_Tp>::_BuiltinType;
1026 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1030 constexpr auto __size = _S_size<_Tp>;
1031 _GLIBCXX_SIMD_USE_CONSTEXPR auto __r
1032 = __generate_vector<_UV>([](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
1033 { return __i < __size ? -1 : 0; });
1038 template <typename _Tp>
1039 _GLIBCXX_SIMD_INTRINSIC static constexpr __intrinsic_type_t<_Tp, _S_size<_Tp>>
1040 _S_implicit_mask_intrin()
1041 { return __to_intrin(__vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()._M_data)); }
1043 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1044 _GLIBCXX_SIMD_INTRINSIC static constexpr _TW
1047 using _Tp = typename _TVT::value_type;
1048 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1051 return __and(__as_vector(__x),
1052 __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()));
1055 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1056 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
1057 __make_padding_nonzero(_TW __x)
1059 using _Tp = typename _TVT::value_type;
1060 if constexpr (!_S_is_partial<_Tp>)
1064 _GLIBCXX_SIMD_USE_CONSTEXPR auto __implicit_mask
1065 = __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>());
1066 if constexpr (is_integral_v<_Tp>)
1067 return __or(__x, ~__implicit_mask);
1070 _GLIBCXX_SIMD_USE_CONSTEXPR auto __one
1071 = __andnot(__implicit_mask,
1072 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1)));
1076 return __or(__and(__x, __implicit_mask), __one);
1085template <int _UsedBytes>
1086 struct simd_abi::_VecBltnBtmsk
1088 template <typename _Tp>
1089 static constexpr size_t _S_size = _UsedBytes / sizeof(_Tp);
1092 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
1094 template <typename _Tp>
1095 struct _IsValidSizeFor
1096 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
1097 && _UsedBytes % sizeof(_Tp) == 0 && _UsedBytes <= 64
1098 && (_UsedBytes > 32 || __have_avx512vl))> {};
1102 template <
typename _Tp>
1105 _IsValidAbiTag, __bool_constant<__have_avx512f>,
1106 __bool_constant<__have_avx512bw || (sizeof(_Tp) >= 4)>,
1107 __bool_constant<(__vectorized_sizeof<_Tp>() > sizeof(_Tp))>,
1108 _IsValidSizeFor<_Tp>> {};
1110 template <
typename _Tp>
1111 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1115 #if _GLIBCXX_SIMD_X86INTRIN
1116 using _CommonImpl = _CommonImplX86;
1117 using _SimdImpl = _SimdImplX86<_VecBltnBtmsk<_UsedBytes>>;
1118 using _MaskImpl = _MaskImplX86<_VecBltnBtmsk<_UsedBytes>>;
1121 struct _MissingImpl;
1123 using _CommonImpl = _MissingImpl<_UsedBytes>;
1124 using _SimdImpl = _MissingImpl<_UsedBytes>;
1125 using _MaskImpl = _MissingImpl<_UsedBytes>;
1130 template <
typename _Tp>
1131 using _MaskMember = _SimdWrapper<bool, _S_size<_Tp>>;
1133 template <
typename _Tp>
1136 _GnuTraits<_Tp, bool, _VecBltnBtmsk<_UsedBytes>, _S_size<_Tp>>,
1141 template <
typename _Tp>
1142 static constexpr size_t _S_full_size = __traits<_Tp>::_S_full_size;
1143 template <
typename _Tp>
1144 static constexpr bool _S_is_partial = __traits<_Tp>::_S_is_partial;
1149 template <
typename _Tp>
1150 using _ImplicitMask = _SimdWrapper<bool, _S_size<_Tp>>;
1153 template <
size_t _Np>
1154 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<_Np>
1157 using _Tp = __bool_storage_member_type_t<_Np>;
1158 return _Np <
sizeof(_Tp) * __CHAR_BIT__ ? _Tp((1ULL << _Np) - 1) : ~_Tp();
1161 template <
typename _Tp>
1162 _GLIBCXX_SIMD_INTRINSIC
static constexpr _ImplicitMask<_Tp>
1164 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1166 template <
typename _Tp>
1167 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<_S_size<_Tp>>
1168 _S_implicit_mask_intrin()
1169 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1171 template <
typename _Tp,
size_t _Np>
1172 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1173 _S_masked(_SimdWrapper<_Tp, _Np> __x)
1175 if constexpr (is_same_v<_Tp, bool>)
1176 if constexpr (_Np < 8 || (_Np & (_Np - 1)) != 0)
1177 return _MaskImpl::_S_bit_and(
1178 __x, _SimdWrapper<_Tp, _Np>(
1179 __bool_storage_member_type_t<_Np>((1ULL << _Np) - 1)));
1183 return _S_masked(__x._M_data);
1186 template <
typename _TV>
1187 _GLIBCXX_SIMD_INTRINSIC
static constexpr _TV
1190 using _Tp =
typename _VectorTraits<_TV>::value_type;
1192 !__is_bitmask_v<_TV>,
1193 "_VecBltnBtmsk::_S_masked cannot work on bitmasks, since it doesn't "
1194 "know the number of elements. Use _SimdWrapper<bool, N> instead.");
1195 if constexpr (_S_is_partial<_Tp>)
1197 constexpr size_t _Np = _S_size<_Tp>;
1198 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1199 _S_implicit_mask<_Tp>(), _SimdWrapper<_Tp, _Np>(),
1200 _SimdWrapper<_Tp, _Np>(__x));
1206 template <
typename _TV,
typename _TVT = _VectorTraits<_TV>>
1207 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
1208 __make_padding_nonzero(_TV __x)
1210 using _Tp =
typename _TVT::value_type;
1211 if constexpr (!_S_is_partial<_Tp>)
1215 constexpr size_t _Np = _S_size<_Tp>;
1216 if constexpr (is_integral_v<typename _TVT::value_type>)
1218 | __generate_vector<_Tp, _S_full_size<_Tp>>(
1219 [](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Tp {
1226 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1227 _S_implicit_mask<_Tp>(),
1228 _SimdWrapper<_Tp, _Np>(
1229 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1))),
1230 _SimdWrapper<_Tp, _Np>(__x))
1240struct _CommonImplBuiltin
1247 template <
typename _From,
typename _To,
size_t _ToSize>
1248 static inline constexpr bool __converts_via_decomposition_v
1249 =
sizeof(_From) !=
sizeof(_To);
1253 template <
typename _Tp,
size_t _Np,
size_t _Bytes = _Np * sizeof(_Tp)>
1254 _GLIBCXX_SIMD_INTRINSIC
static __vector_type_t<_Tp, _Np>
1255 _S_load(
const void* __p)
1257 static_assert(_Np > 1);
1258 static_assert(_Bytes %
sizeof(_Tp) == 0);
1259 using _Rp = __vector_type_t<_Tp, _Np>;
1260 if constexpr (
sizeof(_Rp) == _Bytes)
1263 __builtin_memcpy(&__r, __p, _Bytes);
1268#ifdef _GLIBCXX_SIMD_WORKAROUND_PR90424
1272 conditional_t<_Bytes % 8 == 0, long long, int>,
1273 conditional_t<_Bytes % 2 == 0, short, signed char>>,
1274 conditional_t<(_Bytes < 8 || _Np % 2 == 1 || _Np == 2), _Tp,
1276 using _V = __vector_type_t<_Up, _Np *
sizeof(_Tp) /
sizeof(_Up)>;
1277 if constexpr (
sizeof(_V) !=
sizeof(_Rp))
1280 __builtin_memcpy(&__r, __p, _Bytes);
1289 static_assert(_Bytes <=
sizeof(_V));
1290 __builtin_memcpy(&__r, __p, _Bytes);
1291 return reinterpret_cast<_Rp
>(__r);
1298 template <
size_t _Bytes>
1299 _GLIBCXX_SIMD_INTRINSIC
static void
1300 _S_memcpy(
char* __dst,
const char* __src)
1302 if constexpr (_Bytes > 0)
1304 constexpr size_t _Ns = std::__bit_floor(_Bytes);
1305 __builtin_memcpy(__dst, __src, _Ns);
1306 _S_memcpy<_Bytes - _Ns>(__dst + _Ns, __src + _Ns);
1310 template <
size_t _ReqBytes = 0,
typename _TV>
1311 _GLIBCXX_SIMD_INTRINSIC
static void
1312 _S_store(_TV __x,
void* __addr)
1314 constexpr size_t _Bytes = _ReqBytes == 0 ?
sizeof(__x) : _ReqBytes;
1315 static_assert(
sizeof(__x) >= _Bytes);
1317#if !defined __clang__ && _GLIBCXX_SIMD_WORKAROUND_PR90424
1318 if constexpr (__is_vector_type_v<_TV>)
1319 _S_memcpy<_Bytes>(
reinterpret_cast<char*
>(__addr),
reinterpret_cast<const char*
>(&__x));
1322 __builtin_memcpy(__addr, &__x, _Bytes);
1325 template <
typename _Tp,
size_t _Np>
1326 _GLIBCXX_SIMD_INTRINSIC
static void
1327 _S_store(_SimdWrapper<_Tp, _Np> __x,
void* __addr)
1328 { _S_store<_Np * sizeof(_Tp)>(__x._M_data, __addr); }
1332 template <
size_t _Np,
bool _Sanitized>
1333 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1334 _S_store_bool_array(_BitMask<_Np, _Sanitized> __x,
bool* __mem)
1336 if constexpr (_Np == 1)
1338 else if (__builtin_is_constant_evaluated())
1340 for (
size_t __i = 0; __i < _Np; ++__i)
1341 __mem[__i] = __x[__i];
1343 else if constexpr (_Np == 2)
1345 short __bool2 = (__x._M_to_bits() * 0x81) & 0x0101;
1346 _S_store<_Np>(__bool2, __mem);
1348 else if constexpr (_Np == 3)
1350 int __bool3 = (__x._M_to_bits() * 0x4081) & 0x010101;
1351 _S_store<_Np>(__bool3, __mem);
1355 __execute_n_times<__div_roundup(_Np, 4)>(
1356 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1357 constexpr int __offset = __i * 4;
1358 constexpr int __remaining = _Np - __offset;
1359 if constexpr (__remaining > 4 && __remaining <= 7)
1361 const _ULLong __bool7
1362 = (__x.template _M_extract<__offset>()._M_to_bits()
1364 & 0x0101010101010101ULL;
1365 _S_store<__remaining>(__bool7, __mem + __offset);
1367 else if constexpr (__remaining >= 4)
1369 int __bits = __x.template _M_extract<__offset>()._M_to_bits();
1370 if constexpr (__remaining > 7)
1372 const int __bool4 = (__bits * 0x204081) & 0x01010101;
1373 _S_store<4>(__bool4, __mem + __offset);
1381 template <
typename _Tp,
size_t _Np>
1382 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
1383 _S_blend(_SimdWrapper<__int_for_sizeof_t<_Tp>, _Np> __k,
1384 _SimdWrapper<_Tp, _Np> __at0, _SimdWrapper<_Tp, _Np> __at1)
1385 {
return __k._M_data ? __at1._M_data : __at0._M_data; }
1392template <
typename _Abi,
typename>
1393 struct _SimdImplBuiltin
1396 template <
typename _Tp>
1397 static constexpr size_t _S_max_store_size = 16;
1399 using abi_type = _Abi;
1401 template <
typename _Tp>
1402 using _TypeTag = _Tp*;
1404 template <
typename _Tp>
1405 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
1407 template <
typename _Tp>
1408 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
1410 template <
typename _Tp>
1411 static constexpr size_t _S_size = _Abi::template _S_size<_Tp>;
1413 template <
typename _Tp>
1414 static constexpr size_t _S_full_size = _Abi::template _S_full_size<_Tp>;
1416 using _CommonImpl =
typename _Abi::_CommonImpl;
1417 using _SuperImpl =
typename _Abi::_SimdImpl;
1418 using _MaskImpl =
typename _Abi::_MaskImpl;
1421 template <
typename _Tp,
size_t _Np>
1422 _GLIBCXX_SIMD_INTRINSIC
static constexpr simd<_Tp, _Abi>
1423 _M_make_simd(_SimdWrapper<_Tp, _Np> __x)
1424 {
return {__private_init, __x}; }
1426 template <
typename _Tp,
size_t _Np>
1427 _GLIBCXX_SIMD_INTRINSIC
static constexpr simd<_Tp, _Abi>
1428 _M_make_simd(__intrinsic_type_t<_Tp, _Np> __x)
1429 {
return {__private_init, __vector_bitcast<_Tp>(__x)}; }
1432 template <
typename _Tp>
1433 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdMember<_Tp>
1434 _S_broadcast(_Tp __x)
noexcept
1435 {
return __vector_broadcast<_S_full_size<_Tp>>(__x); }
1438 template <
typename _Fp,
typename _Tp>
1439 inline static constexpr _SimdMember<_Tp>
1440 _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
1442 return __generate_vector<_Tp, _S_full_size<_Tp>>(
1443 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1444 if constexpr (__i < _S_size<_Tp>)
1452 template <
typename _Tp,
typename _Up>
1453 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdMember<_Tp>
1454 _S_load(
const _Up* __mem, _TypeTag<_Tp>)
noexcept
1456 constexpr size_t _Np = _S_size<_Tp>;
1457 constexpr size_t __max_load_size
1458 = (
sizeof(_Up) >= 4 && __have_avx512f) || __have_avx512bw ? 64
1459 : (is_floating_point_v<_Up> && __have_avx) || __have_avx2 ? 32
1461 constexpr size_t __bytes_to_load =
sizeof(_Up) * _Np;
1462 if (__builtin_is_constant_evaluated())
1463 return __generate_vector<_Tp, _S_full_size<_Tp>>(
1464 [&](
auto __i)
constexpr {
1465 return static_cast<_Tp
>(__i < _Np ? __mem[__i] : 0);
1467 else if constexpr (
sizeof(_Up) > 8)
1468 return __generate_vector<_Tp, _SimdMember<_Tp>::_S_full_size>(
1469 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1470 return static_cast<_Tp
>(__i < _Np ? __mem[__i] : 0);
1472 else if constexpr (is_same_v<_Up, _Tp>)
1473 return _CommonImpl::template _S_load<_Tp, _S_full_size<_Tp>,
1474 _Np *
sizeof(_Tp)>(__mem);
1475 else if constexpr (__bytes_to_load <= __max_load_size)
1476 return __convert<_SimdMember<_Tp>>(
1477 _CommonImpl::template _S_load<_Up, _Np>(__mem));
1478 else if constexpr (__bytes_to_load % __max_load_size == 0)
1480 constexpr size_t __n_loads = __bytes_to_load / __max_load_size;
1481 constexpr size_t __elements_per_load = _Np / __n_loads;
1482 return __call_with_n_evaluations<__n_loads>(
1483 [](
auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1484 return __convert<_SimdMember<_Tp>>(__uncvted...);
1485 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1486 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1487 __mem + __i * __elements_per_load);
1490 else if constexpr (__bytes_to_load % (__max_load_size / 2) == 0
1491 && __max_load_size > 16)
1493 constexpr size_t __n_loads
1494 = __bytes_to_load / (__max_load_size / 2);
1495 constexpr size_t __elements_per_load = _Np / __n_loads;
1496 return __call_with_n_evaluations<__n_loads>(
1497 [](
auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1498 return __convert<_SimdMember<_Tp>>(__uncvted...);
1499 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1500 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1501 __mem + __i * __elements_per_load);
1505 return __call_with_subscripts(
1506 __mem, make_index_sequence<_Np>(),
1507 [](
auto... __args) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1508 return __vector_type_t<_Tp, _S_full_size<_Tp>>{
static_cast<_Tp
>(__args)...};
1513 template <
typename _Tp,
size_t _Np,
typename _Up>
1514 static constexpr inline _SimdWrapper<_Tp, _Np>
1515 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
1516 const _Up* __mem)
noexcept
1518 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
1519 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1520 __merge._M_set(__i,
static_cast<_Tp
>(__mem[__i]));
1526 template <
typename _Tp,
typename _Up>
1527 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1528 _S_store(_SimdMember<_Tp> __v, _Up* __mem, _TypeTag<_Tp>)
noexcept
1531 constexpr size_t _Np = _S_size<_Tp>;
1532 constexpr size_t __max_store_size
1533 = _SuperImpl::template _S_max_store_size<_Up>;
1534 if (__builtin_is_constant_evaluated())
1536 for (
size_t __i = 0; __i < _Np; ++__i)
1537 __mem[__i] = __v[__i];
1539 else if constexpr (
sizeof(_Up) > 8)
1540 __execute_n_times<_Np>([&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1541 __mem[__i] = __v[__i];
1543 else if constexpr (is_same_v<_Up, _Tp>)
1544 _CommonImpl::_S_store(__v, __mem);
1545 else if constexpr (
sizeof(_Up) * _Np <= __max_store_size)
1546 _CommonImpl::_S_store(_SimdWrapper<_Up, _Np>(__convert<_Up>(__v)),
1550 constexpr size_t __vsize = __max_store_size /
sizeof(_Up);
1552 constexpr size_t __stores = __div_roundup(_Np, __vsize);
1553 constexpr size_t __full_stores = _Np / __vsize;
1554 using _V = __vector_type_t<_Up, __vsize>;
1555 const array<_V, __stores> __converted
1556 = __convert_all<_V, __stores>(__v);
1557 __execute_n_times<__full_stores>(
1558 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1559 _CommonImpl::_S_store(__converted[__i], __mem + __i * __vsize);
1561 if constexpr (__full_stores < __stores)
1562 _CommonImpl::template _S_store<(_Np - __full_stores * __vsize)
1564 __converted[__full_stores], __mem + __full_stores * __vsize);
1569 template <
typename _Tp,
size_t _Np>
1570 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1571 _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> __k)
1573 _BitOps::_S_bit_iteration(
1574 _MaskImpl::_S_to_bits(__k),
1575 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1576 __mem[__i] = __v[__i];
1581 template <
typename _TW,
typename _TVT = _VectorTraits<_TW>,
1582 typename _Tp =
typename _TVT::value_type,
typename _Up>
1583 static constexpr inline void
1584 _S_masked_store(
const _TW __v, _Up* __mem,
const _MaskMember<_Tp> __k)
noexcept
1586 constexpr size_t _TV_size = _S_size<_Tp>;
1587 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
1588 constexpr size_t __max_store_size
1589 = _SuperImpl::template _S_max_store_size<_Up>;
1593 _Up> || (is_integral_v<_Tp> && is_integral_v<_Up> &&
sizeof(_Tp) ==
sizeof(_Up)))
1596 const _MaskMember<_Up> __kk = [&]() _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1597 if constexpr (__is_bitmask_v<
decltype(__k)>)
1598 return _MaskMember<_Up>(__k._M_data);
1600 return __wrapper_bitcast<__int_for_sizeof_t<_Up>>(__k);
1602 _SuperImpl::_S_masked_store_nocvt(__wrapper_bitcast<_Up>(__v),
1605 else if constexpr (__vectorized_sizeof<_Up>() >
sizeof(_Up)
1607 template __converts_via_decomposition_v<
1608 _Tp, _Up, __max_store_size>)
1612 constexpr size_t _UW_size
1613 =
std::min(_TV_size, __max_store_size /
sizeof(_Up));
1614 static_assert(_UW_size <= _TV_size);
1615 using _UW = _SimdWrapper<_Up, _UW_size>;
1616 using _UV = __vector_type_t<_Up, _UW_size>;
1617 using _UAbi = simd_abi::deduce_t<_Up, _UW_size>;
1618 if constexpr (_UW_size == _TV_size)
1620 const _UW __converted = __convert<_UW>(__v);
1621 _UAbi::_SimdImpl::_S_masked_store_nocvt(
1623 _UAbi::_MaskImpl::template _S_convert<
1624 __int_for_sizeof_t<_Up>>(__k));
1628 static_assert(_UW_size *
sizeof(_Up) == __max_store_size);
1629 constexpr size_t _NFullStores = _TV_size / _UW_size;
1630 constexpr size_t _NAllStores
1631 = __div_roundup(_TV_size, _UW_size);
1632 constexpr size_t _NParts = _S_full_size<_Tp> / _UW_size;
1633 const array<_UV, _NAllStores> __converted
1634 = __convert_all<_UV, _NAllStores>(__v);
1635 __execute_n_times<_NFullStores>([&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1636 _UAbi::_SimdImpl::_S_masked_store_nocvt(
1637 _UW(__converted[__i]), __mem + __i * _UW_size,
1638 _UAbi::_MaskImpl::template _S_convert<
1639 __int_for_sizeof_t<_Up>>(
1640 __extract_part<__i, _NParts>(__k.__as_full_vector())));
1642 if constexpr (_NAllStores
1644 _UAbi::_SimdImpl::_S_masked_store_nocvt(
1645 _UW(__converted[_NFullStores]),
1646 __mem + _NFullStores * _UW_size,
1647 _UAbi::_MaskImpl::template _S_convert<
1648 __int_for_sizeof_t<_Up>>(
1649 __extract_part<_NFullStores, _NParts>(
1650 __k.__as_full_vector())));
1654 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
1655 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1656 __mem[__i] =
static_cast<_Up
>(__v[__i]);
1661 template <
typename _Tp,
size_t _Np>
1662 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1663 _S_complement(_SimdWrapper<_Tp, _Np> __x)
noexcept
1665 if constexpr (is_floating_point_v<_Tp>)
1666 return __vector_bitcast<_Tp>(~__vector_bitcast<__int_for_sizeof_t<_Tp>>(__x));
1668 return ~__x._M_data;
1672 template <
typename _Tp,
size_t _Np>
1673 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1674 _S_unary_minus(_SimdWrapper<_Tp, _Np> __x)
noexcept
1678 return -__x._M_data;
1682 template <
typename _Tp,
size_t _Np>
1683 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1684 _S_plus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1685 {
return __x._M_data + __y._M_data; }
1687 template <
typename _Tp,
size_t _Np>
1688 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1689 _S_minus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1690 {
return __x._M_data - __y._M_data; }
1692 template <
typename _Tp,
size_t _Np>
1693 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1694 _S_multiplies(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1695 {
return __x._M_data * __y._M_data; }
1697 template <
typename _Tp,
size_t _Np>
1698 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1699 _S_divides(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1703 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1704 return __x._M_data / __y._M_data;
1706 return __x._M_data / _Abi::__make_padding_nonzero(__y._M_data);
1709 template <
typename _Tp,
size_t _Np>
1710 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1711 _S_modulus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1713 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1714 return __x._M_data % __y._M_data;
1716 return __as_vector(__x)
1717 % _Abi::__make_padding_nonzero(__as_vector(__y));
1720 template <
typename _Tp,
size_t _Np>
1721 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1722 _S_bit_and(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1723 {
return __and(__x, __y); }
1725 template <
typename _Tp,
size_t _Np>
1726 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1727 _S_bit_or(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1728 {
return __or(__x, __y); }
1730 template <
typename _Tp,
size_t _Np>
1731 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1732 _S_bit_xor(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1733 {
return __xor(__x, __y); }
1735 template <
typename _Tp,
size_t _Np>
1736 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1737 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1738 {
return __x._M_data << __y._M_data; }
1740 template <
typename _Tp,
size_t _Np>
1741 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1742 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1743 {
return __x._M_data >> __y._M_data; }
1745 template <
typename _Tp,
size_t _Np>
1746 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1747 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x,
int __y)
1748 {
return __x._M_data << __y; }
1750 template <
typename _Tp,
size_t _Np>
1751 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1752 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x,
int __y)
1753 {
return __x._M_data >> __y; }
1757 template <
typename _Tp,
size_t _Np>
1758 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1759 _S_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1760 {
return __x._M_data == __y._M_data; }
1763 template <
typename _Tp,
size_t _Np>
1764 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1765 _S_not_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1766 {
return __x._M_data != __y._M_data; }
1769 template <
typename _Tp,
size_t _Np>
1770 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1771 _S_less(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1772 {
return __x._M_data < __y._M_data; }
1775 template <
typename _Tp,
size_t _Np>
1776 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1777 _S_less_equal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1778 {
return __x._M_data <= __y._M_data; }
1781 template <
typename _Tp,
size_t _Np>
1782 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1783 _S_negate(_SimdWrapper<_Tp, _Np> __x)
noexcept
1784 {
return !__x._M_data; }
1787 template <
typename _Tp,
size_t _Np>
1788 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1789 _SimdWrapper<_Tp, _Np>
1790 _S_min(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1791 {
return __a._M_data < __b._M_data ? __a._M_data : __b._M_data; }
1793 template <
typename _Tp,
size_t _Np>
1794 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1795 _SimdWrapper<_Tp, _Np>
1796 _S_max(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1797 {
return __a._M_data > __b._M_data ? __a._M_data : __b._M_data; }
1799 template <
typename _Tp,
size_t _Np>
1800 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1801 pair<_SimdWrapper<_Tp, _Np>, _SimdWrapper<_Tp, _Np>>
1802 _S_minmax(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1804 return {__a._M_data < __b._M_data ? __a._M_data : __b._M_data,
1805 __a._M_data < __b._M_data ? __b._M_data : __a._M_data};
1809 template <
size_t _Np,
size_t... _Is,
size_t... _Zeros,
typename _Tp,
1810 typename _BinaryOperation>
1811 _GLIBCXX_SIMD_INTRINSIC
static constexpr _Tp
1812 _S_reduce_partial(index_sequence<_Is...>, index_sequence<_Zeros...>,
1813 simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1815 using _V = __vector_type_t<_Tp, _Np / 2>;
1816 static_assert(
sizeof(_V) <=
sizeof(__x));
1819 using _FullSimd = __deduced_simd<_Tp, _VectorTraits<_V>::_S_full_size>;
1820 using _HalfSimd = __deduced_simd<_Tp, _Np / 2>;
1821 const auto __xx = __as_vector(__x);
1822 return _HalfSimd::abi_type::_SimdImpl::_S_reduce(
1823 static_cast<_HalfSimd
>(__as_vector(__binary_op(
1824 static_cast<_FullSimd
>(__intrin_bitcast<_V>(__xx)),
1825 static_cast<_FullSimd
>(__intrin_bitcast<_V>(
1826 __vector_permute<(_Np / 2 + _Is)..., (
int(_Zeros * 0) - 1)...>(
1831 template <
typename _Tp,
typename _BinaryOperation>
1832 _GLIBCXX_SIMD_INTRINSIC
static constexpr _Tp
1833 _S_reduce(simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1835 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
1836 if constexpr (_Np == 1)
1838 else if constexpr (_Np == 2)
1839 return __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1840 simd<_Tp, simd_abi::scalar>(__x[1]))[0];
1841 else if (__builtin_is_constant_evaluated())
1843 simd<_Tp, simd_abi::scalar> __acc = __x[0];
1844 for (
size_t __i = 1; __i < _Np; ++__i)
1845 __acc = __binary_op(__acc, simd<_Tp, simd_abi::scalar>(__x[__i]));
1848 else if constexpr (_Abi::template _S_is_partial<_Tp>)
1850 [[maybe_unused]]
constexpr auto __full_size
1851 = _Abi::template _S_full_size<_Tp>;
1852 if constexpr (_Np == 3)
1854 __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1855 simd<_Tp, simd_abi::scalar>(__x[1])),
1856 simd<_Tp, simd_abi::scalar>(__x[2]))[0];
1857 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1860 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1861 return _Ap::_SimdImpl::_S_reduce(
1862 simd<_Tp, _Ap>(__private_init,
1863 _Abi::_S_masked(__as_vector(__x))),
1866 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1869 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1870 using _TW = _SimdWrapper<_Tp, __full_size>;
1871 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __implicit_mask_full
1872 = _Abi::template _S_implicit_mask<_Tp>().__as_full_vector();
1873 _GLIBCXX_SIMD_USE_CONSTEXPR _TW __one
1874 = __vector_broadcast<__full_size>(_Tp(1));
1875 const _TW __x_full = __data(__x).__as_full_vector();
1876 const _TW __x_padded_with_ones
1877 = _Ap::_CommonImpl::_S_blend(__implicit_mask_full, __one,
1879 return _Ap::_SimdImpl::_S_reduce(
1880 simd<_Tp, _Ap>(__private_init, __x_padded_with_ones),
1883 else if constexpr (_Np & 1)
1885 using _Ap = simd_abi::deduce_t<_Tp, _Np - 1>;
1887 simd<_Tp, simd_abi::scalar>(_Ap::_SimdImpl::_S_reduce(
1889 __intrin_bitcast<__vector_type_t<_Tp, _Np - 1>>(
1892 simd<_Tp, simd_abi::scalar>(__x[_Np - 1]))[0];
1895 return _S_reduce_partial<_Np>(
1896 make_index_sequence<_Np / 2>(),
1897 make_index_sequence<__full_size - _Np / 2>(), __x, __binary_op);
1899 else if constexpr (
sizeof(__x) == 16)
1901 if constexpr (_Np == 16)
1903 const auto __y = __data(__x);
1905 _M_make_simd<_Tp, _Np>(
1906 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1908 _M_make_simd<_Tp, _Np>(
1909 __vector_permute<8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
1910 14, 14, 15, 15>(__y)));
1912 if constexpr (_Np >= 8)
1914 const auto __y = __vector_bitcast<short>(__data(__x));
1916 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1917 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3>(__y))),
1918 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1919 __vector_permute<4, 4, 5, 5, 6, 6, 7, 7>(__y))));
1921 if constexpr (_Np >= 4)
1923 using _Up = conditional_t<is_floating_point_v<_Tp>, float,
int>;
1924 const auto __y = __vector_bitcast<_Up>(__data(__x));
1925 __x = __binary_op(__x,
1926 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1927 __vector_permute<3, 2, 1, 0>(__y))));
1929 using _Up = conditional_t<is_floating_point_v<_Tp>, double, _LLong>;
1930 const auto __y = __vector_bitcast<_Up>(__data(__x));
1931 __x = __binary_op(__x, _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1932 __vector_permute<1, 1>(__y))));
1937 static_assert(
sizeof(__x) > __min_vector_size<_Tp>);
1938 static_assert((_Np & (_Np - 1)) == 0);
1939 using _Ap = simd_abi::deduce_t<_Tp, _Np / 2>;
1940 using _V = simd<_Tp, _Ap>;
1941 return _Ap::_SimdImpl::_S_reduce(
1942 __binary_op(_V(__private_init, __extract<0, 2>(__as_vector(__x))),
1944 __extract<1, 2>(__as_vector(__x)))),
1945 static_cast<_BinaryOperation&&
>(__binary_op));
1951#define _GLIBCXX_SIMD_MATH_FALLBACK(__name) \
1952 template <typename _Tp, typename... _More> \
1954 _S_##__name(const _Tp& __x, const _More&... __more) \
1956 return __generate_vector<_Tp>( \
1957 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1958 return __name(__x[__i], __more[__i]...); \
1962#define _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET(__name) \
1963 template <typename _Tp, typename... _More> \
1964 static typename _Tp::mask_type \
1965 _S_##__name(const _Tp& __x, const _More&... __more) \
1967 return __generate_vector<_Tp>( \
1968 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1969 return __name(__x[__i], __more[__i]...); \
1973#define _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(_RetTp, __name) \
1974 template <typename _Tp, typename... _More> \
1976 _S_##__name(const _Tp& __x, const _More&... __more) \
1978 return __fixed_size_storage_t<_RetTp, \
1979 _VectorTraits<_Tp>::_S_partial_width>:: \
1980 _S_generate([&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1981 return __meta._S_generator( \
1982 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1983 return __name(__x[__meta._S_offset + __i], \
1984 __more[__meta._S_offset + __i]...); \
1986 static_cast<_RetTp*>(nullptr)); \
1990 _GLIBCXX_SIMD_MATH_FALLBACK(acos)
1991 _GLIBCXX_SIMD_MATH_FALLBACK(asin)
1992 _GLIBCXX_SIMD_MATH_FALLBACK(atan)
1993 _GLIBCXX_SIMD_MATH_FALLBACK(atan2)
1994 _GLIBCXX_SIMD_MATH_FALLBACK(cos)
1995 _GLIBCXX_SIMD_MATH_FALLBACK(sin)
1996 _GLIBCXX_SIMD_MATH_FALLBACK(tan)
1997 _GLIBCXX_SIMD_MATH_FALLBACK(acosh)
1998 _GLIBCXX_SIMD_MATH_FALLBACK(asinh)
1999 _GLIBCXX_SIMD_MATH_FALLBACK(atanh)
2000 _GLIBCXX_SIMD_MATH_FALLBACK(cosh)
2001 _GLIBCXX_SIMD_MATH_FALLBACK(sinh)
2002 _GLIBCXX_SIMD_MATH_FALLBACK(tanh)
2003 _GLIBCXX_SIMD_MATH_FALLBACK(exp)
2004 _GLIBCXX_SIMD_MATH_FALLBACK(exp2)
2005 _GLIBCXX_SIMD_MATH_FALLBACK(expm1)
2006 _GLIBCXX_SIMD_MATH_FALLBACK(ldexp)
2007 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
int, ilogb)
2008 _GLIBCXX_SIMD_MATH_FALLBACK(log)
2009 _GLIBCXX_SIMD_MATH_FALLBACK(log10)
2010 _GLIBCXX_SIMD_MATH_FALLBACK(log1p)
2011 _GLIBCXX_SIMD_MATH_FALLBACK(log2)
2012 _GLIBCXX_SIMD_MATH_FALLBACK(logb)
2015 _GLIBCXX_SIMD_MATH_FALLBACK(scalbn)
2016 _GLIBCXX_SIMD_MATH_FALLBACK(scalbln)
2017 _GLIBCXX_SIMD_MATH_FALLBACK(cbrt)
2018 _GLIBCXX_SIMD_MATH_FALLBACK(fabs)
2019 _GLIBCXX_SIMD_MATH_FALLBACK(pow)
2020 _GLIBCXX_SIMD_MATH_FALLBACK(sqrt)
2021 _GLIBCXX_SIMD_MATH_FALLBACK(erf)
2022 _GLIBCXX_SIMD_MATH_FALLBACK(erfc)
2023 _GLIBCXX_SIMD_MATH_FALLBACK(lgamma)
2024 _GLIBCXX_SIMD_MATH_FALLBACK(tgamma)
2026 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lrint)
2027 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llrint)
2029 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lround)
2030 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llround)
2032 _GLIBCXX_SIMD_MATH_FALLBACK(fmod)
2033 _GLIBCXX_SIMD_MATH_FALLBACK(remainder)
2035 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2037 _S_remquo(
const _Tp __x,
const _Tp __y,
2038 __fixed_size_storage_t<int, _TVT::_S_partial_width>* __z)
2040 return __generate_vector<_Tp>([&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2042 auto __r = remquo(__x[__i], __y[__i], &__tmp);
2043 __z->_M_set(__i, __tmp);
2049 _GLIBCXX_SIMD_MATH_FALLBACK(nextafter)
2050 _GLIBCXX_SIMD_MATH_FALLBACK(fdim)
2051 _GLIBCXX_SIMD_MATH_FALLBACK(fmax)
2052 _GLIBCXX_SIMD_MATH_FALLBACK(fmin)
2053 _GLIBCXX_SIMD_MATH_FALLBACK(fma)
2055 template <
typename _Tp,
size_t _Np>
2056 static constexpr _MaskMember<_Tp>
2057 _S_isgreater(_SimdWrapper<_Tp, _Np> __x,
2058 _SimdWrapper<_Tp, _Np> __y)
noexcept
2060 using _Ip = __int_for_sizeof_t<_Tp>;
2061 const auto __xn = __vector_bitcast<_Ip>(__x);
2062 const auto __yn = __vector_bitcast<_Ip>(__y);
2063 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2064 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2065 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2069 template <
typename _Tp,
size_t _Np>
2070 static constexpr _MaskMember<_Tp>
2071 _S_isgreaterequal(_SimdWrapper<_Tp, _Np> __x,
2072 _SimdWrapper<_Tp, _Np> __y)
noexcept
2074 using _Ip = __int_for_sizeof_t<_Tp>;
2075 const auto __xn = __vector_bitcast<_Ip>(__x);
2076 const auto __yn = __vector_bitcast<_Ip>(__y);
2077 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2078 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2079 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2083 template <
typename _Tp,
size_t _Np>
2084 static constexpr _MaskMember<_Tp>
2085 _S_isless(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
noexcept
2087 using _Ip = __int_for_sizeof_t<_Tp>;
2088 const auto __xn = __vector_bitcast<_Ip>(__x);
2089 const auto __yn = __vector_bitcast<_Ip>(__y);
2090 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2091 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2092 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2096 template <
typename _Tp,
size_t _Np>
2097 static constexpr _MaskMember<_Tp>
2098 _S_islessequal(_SimdWrapper<_Tp, _Np> __x,
2099 _SimdWrapper<_Tp, _Np> __y)
noexcept
2101 using _Ip = __int_for_sizeof_t<_Tp>;
2102 const auto __xn = __vector_bitcast<_Ip>(__x);
2103 const auto __yn = __vector_bitcast<_Ip>(__y);
2104 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2105 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2106 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2110 template <
typename _Tp,
size_t _Np>
2111 static constexpr _MaskMember<_Tp>
2112 _S_islessgreater(_SimdWrapper<_Tp, _Np> __x,
2113 _SimdWrapper<_Tp, _Np> __y)
noexcept
2115 return __andnot(_SuperImpl::_S_isunordered(__x, __y),
2116 _SuperImpl::_S_not_equal_to(__x, __y));
2119#undef _GLIBCXX_SIMD_MATH_FALLBACK
2120#undef _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET
2121#undef _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET
2123 template <
typename _Tp,
size_t _Np>
2124 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2125 _S_abs(_SimdWrapper<_Tp, _Np> __x)
noexcept
2131 if constexpr (is_floating_point_v<_Tp>)
2136 return __and(_S_absmask<__vector_type_t<_Tp, _Np>>, __x._M_data);
2138 return __x._M_data < 0 ? -__x._M_data : __x._M_data;
2146 template <
typename _TV,
typename _UV>
2147 _GLIBCXX_SIMD_INTRINSIC
static constexpr _TV
2148 _S_plus_minus(_TV __x, _UV __y)
noexcept
2150#if defined __i386__ && !defined __SSE_MATH__
2151 if constexpr (
sizeof(__x) == 8)
2153 static_assert(is_same_v<_TV, __vector_type_t<float, 2>>);
2154 const auto __x4 = __vector_bitcast<float, 4>(__x);
2155 if constexpr (is_same_v<_TV, _UV>)
2156 return __vector_bitcast<float, 2>(
2157 _S_plus_minus(__x4, __vector_bitcast<float, 4>(__y)));
2159 return __vector_bitcast<float, 2>(_S_plus_minus(__x4, __y));
2162#if !defined __clang__ && __GCC_IEC_559 == 0
2163 if (__builtin_is_constant_evaluated()
2164 || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
2165 return (__x + __y) - __y;
2169 if constexpr(__have_sse)
2171 if constexpr (
sizeof(__x) >= 16)
2172 asm(
"" :
"+x"(__x));
2173 else if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
2174 asm(
"" :
"+x"(__x[0]),
"+x"(__x[1]));
2176 __assert_unreachable<_TV>();
2178 else if constexpr(__have_neon)
2179 asm(
"" :
"+w"(__x));
2180 else if constexpr (__have_power_vmx)
2182 if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
2183 asm(
"" :
"+fgr"(__x[0]),
"+fgr"(__x[1]));
2185 asm(
"" :
"+v"(__x));
2188 asm(
"" :
"+g"(__x));
2192 return (__x + __y) - __y;
2198 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2199 _GLIBCXX_SIMD_INTRINSIC
static _Tp
2200 _S_nearbyint(_Tp __x_)
noexcept
2202 using value_type =
typename _TVT::value_type;
2203 using _V =
typename _TVT::type;
2204 const _V __x = __x_;
2205 const _V __absx = __and(__x, _S_absmask<_V>);
2206 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<value_type>);
2207 _GLIBCXX_SIMD_USE_CONSTEXPR _V __shifter_abs
2208 = _V() + (1ull << (__digits_v<value_type> - 1));
2209 const _V __shifter = __or(__and(_S_signmask<_V>, __x), __shifter_abs);
2210 const _V __shifted = _S_plus_minus(__x, __shifter);
2211 return __absx < __shifter_abs ? __shifted : __x;
2215 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2216 _GLIBCXX_SIMD_INTRINSIC
static _Tp
2217 _S_rint(_Tp __x)
noexcept
2218 {
return _SuperImpl::_S_nearbyint(__x); }
2221 template <
typename _Tp,
size_t _Np>
2222 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2223 _S_trunc(_SimdWrapper<_Tp, _Np> __x)
2225 using _V = __vector_type_t<_Tp, _Np>;
2226 const _V __absx = __and(__x._M_data, _S_absmask<_V>);
2227 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<_Tp>);
2228 constexpr _Tp __shifter = 1ull << (__digits_v<_Tp> - 1);
2229 _V __truncated = _S_plus_minus(__absx, __shifter);
2230 __truncated -= __truncated > __absx ? _V() + 1 : _V();
2231 return __absx < __shifter ? __or(__xor(__absx, __x._M_data), __truncated)
2236 template <
typename _Tp,
size_t _Np>
2237 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2238 _S_round(_SimdWrapper<_Tp, _Np> __x)
2240 const auto __abs_x = _SuperImpl::_S_abs(__x);
2241 const auto __t_abs = _SuperImpl::_S_trunc(__abs_x)._M_data;
2243 = __t_abs + (__abs_x._M_data - __t_abs >= _Tp(.5) ? _Tp(1) : 0);
2244 return __or(__xor(__abs_x._M_data, __x._M_data), __r_abs);
2248 template <
typename _Tp,
size_t _Np>
2249 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2250 _S_floor(_SimdWrapper<_Tp, _Np> __x)
2252 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2253 const auto __negative_input
2254 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2256 = __andnot(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2257 return __or(__andnot(__mask, __y),
2258 __and(__mask, __y - __vector_broadcast<_Np, _Tp>(1)));
2262 template <
typename _Tp,
size_t _Np>
2263 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2264 _S_ceil(_SimdWrapper<_Tp, _Np> __x)
2266 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2267 const auto __negative_input
2268 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2269 const auto __inv_mask
2270 = __or(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2271 return __or(__and(__inv_mask, __y),
2272 __andnot(__inv_mask, __y + __vector_broadcast<_Np, _Tp>(1)));
2276 template <
typename _Tp,
size_t _Np>
2277 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2278 _S_isnan([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2280#if __FINITE_MATH_ONLY__
2282#elif !defined __SUPPORT_SNAN__
2283 return ~(__x._M_data == __x._M_data);
2284#elif defined __STDC_IEC_559__
2285 using _Ip = __int_for_sizeof_t<_Tp>;
2286 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2288 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
2289 return __infn < __absn;
2291#error "Not implemented: how to support SNaN but non-IEC559 floating-point?"
2296 template <
typename _Tp,
size_t _Np>
2297 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2298 _S_isfinite([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2300#if __FINITE_MATH_ONLY__
2301 using _UV =
typename _MaskMember<_Tp>::_BuiltinType;
2302 _GLIBCXX_SIMD_USE_CONSTEXPR _UV __alltrue = ~_UV();
2306 using _Ip = __int_for_sizeof_t<_Tp>;
2307 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2309 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2310 return __absn <= __maxn;
2315 template <
typename _Tp,
size_t _Np>
2316 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2317 _S_isunordered(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2318 {
return __or(_S_isnan(__x), _S_isnan(__y)); }
2321 template <
typename _Tp,
size_t _Np>
2322 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2323 _S_signbit(_SimdWrapper<_Tp, _Np> __x)
2325 using _Ip = __int_for_sizeof_t<_Tp>;
2326 return __vector_bitcast<_Ip>(__x) < 0;
2333 template <
typename _Tp,
size_t _Np>
2334 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2335 _S_isinf([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2337#if __FINITE_MATH_ONLY__
2340 return _SuperImpl::template _S_equal_to<_Tp, _Np>(_SuperImpl::_S_abs(__x),
2341 __vector_broadcast<_Np>(
2342 __infinity_v<_Tp>));
2357 template <
typename _Tp,
size_t _Np>
2358 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2359 _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
2361 using _Ip = __int_for_sizeof_t<_Tp>;
2362 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2364 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__norm_min_v<_Tp>));
2365#if __FINITE_MATH_ONLY__
2366 return __absn >= __minn;
2369 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2370 return __minn <= __absn && __absn <= __maxn;
2375 template <
typename _Tp,
size_t _Np>
2376 _GLIBCXX_SIMD_INTRINSIC
static __fixed_size_storage_t<int, _Np>
2377 _S_fpclassify(_SimdWrapper<_Tp, _Np> __x)
2379 using _I = __int_for_sizeof_t<_Tp>;
2381 = __vector_bitcast<_I>(__to_intrin(_SuperImpl::_S_abs(__x)));
2382 constexpr size_t _NI =
sizeof(__xn) /
sizeof(_I);
2383 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __minn
2384 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__norm_min_v<_Tp>));
2386 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_normal
2387 = __vector_broadcast<_NI, _I>(FP_NORMAL);
2388#if !__FINITE_MATH_ONLY__
2389 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __infn
2390 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__infinity_v<_Tp>));
2391 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_nan
2392 = __vector_broadcast<_NI, _I>(FP_NAN);
2393 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_infinite
2394 = __vector_broadcast<_NI, _I>(FP_INFINITE);
2396#ifndef __FAST_MATH__
2397 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_subnormal
2398 = __vector_broadcast<_NI, _I>(FP_SUBNORMAL);
2400 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_zero
2401 = __vector_broadcast<_NI, _I>(FP_ZERO);
2403 __vector_type_t<_I, _NI>
2404 __tmp = __xn < __minn
2405 #ifdef __FAST_MATH__
2408 ? (__xn == 0 ? __fp_zero : __fp_subnormal)
2410 #
if __FINITE_MATH_ONLY__
2413 : (__xn < __infn ? __fp_normal
2414 : (__xn == __infn ? __fp_infinite : __fp_nan));
2417 if constexpr (
sizeof(_I) ==
sizeof(
int))
2419 using _FixedInt = __fixed_size_storage_t<int, _Np>;
2420 const auto __as_int = __vector_bitcast<int, _Np>(__tmp);
2421 if constexpr (_FixedInt::_S_tuple_size == 1)
2423 else if constexpr (_FixedInt::_S_tuple_size == 2
2425 typename _FixedInt::_SecondType::_FirstAbi,
2427 return {__extract<0, 2>(__as_int), __as_int[_Np - 1]};
2428 else if constexpr (_FixedInt::_S_tuple_size == 2)
2429 return {__extract<0, 2>(__as_int),
2430 __auto_bitcast(__extract<1, 2>(__as_int))};
2432 __assert_unreachable<_Tp>();
2434 else if constexpr (_Np == 2 &&
sizeof(_I) == 8
2435 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 2)
2437 const auto __aslong = __vector_bitcast<_LLong>(__tmp);
2438 return {int(__aslong[0]), {int(__aslong[1])}};
2440#if _GLIBCXX_SIMD_X86INTRIN
2441 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 32
2442 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2443 return {_mm_packs_epi32(__to_intrin(__lo128(__tmp)),
2444 __to_intrin(__hi128(__tmp)))};
2445 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 64
2446 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2447 return {_mm512_cvtepi64_epi32(__to_intrin(__tmp))};
2449 else if constexpr (__fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2450 return {__call_with_subscripts<_Np>(__vector_bitcast<_LLong>(__tmp),
2451 [](
auto... __l) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2452 return __make_wrapper<int>(__l...);
2455 __assert_unreachable<_Tp>();
2459 template <
typename _Tp,
size_t _Np>
2460 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2461 _S_increment(_SimdWrapper<_Tp, _Np>& __x)
2462 { __x = __x._M_data + 1; }
2464 template <
typename _Tp,
size_t _Np>
2465 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2466 _S_decrement(_SimdWrapper<_Tp, _Np>& __x)
2467 { __x = __x._M_data - 1; }
2470 template <
typename _Tp,
size_t _Np,
typename _Up>
2471 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2472 _S_set(_SimdWrapper<_Tp, _Np>& __v,
int __i, _Up&& __x)
noexcept
2473 { __v._M_set(__i,
static_cast<_Up&&
>(__x)); }
2476 template <
typename _Tp,
typename _K,
size_t _Np>
2477 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2478 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2479 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2481 if (__k._M_is_constprop_none_of())
2483 else if (__k._M_is_constprop_all_of())
2486 __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs);
2489 template <
typename _Tp,
typename _K,
size_t _Np>
2490 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2491 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2492 __type_identity_t<_Tp> __rhs)
2494 if (__k._M_is_constprop_none_of())
2496 else if (__k._M_is_constprop_all_of())
2497 __lhs = __vector_broadcast<_Np>(__rhs);
2498 else if (__builtin_constant_p(__rhs) && __rhs == 0)
2500 if constexpr (!is_same_v<bool, _K>)
2504 = __andnot(__vector_bitcast<_Tp>(__k), __lhs._M_data);
2508 = _CommonImpl::_S_blend(__k, __lhs, _SimdWrapper<_Tp, _Np>());
2511 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2512 _SimdWrapper<_Tp, _Np>(
2513 __vector_broadcast<_Np>(__rhs)));
2517 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2518 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2519 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2520 _SimdWrapper<_Tp, _Np>& __lhs,
2521 const __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs,
2524 if (__k._M_is_constprop_none_of())
2526 else if (__k._M_is_constprop_all_of())
2527 __lhs = __op(_SuperImpl{}, __lhs, __rhs);
2529 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2530 __op(_SuperImpl{}, __lhs, __rhs));
2533 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2534 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2535 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2536 _SimdWrapper<_Tp, _Np>& __lhs,
2537 const __type_identity_t<_Tp> __rhs, _Op __op)
2538 { _S_masked_cassign(__k, __lhs, __vector_broadcast<_Np>(__rhs), __op); }
2541 template <
template <
typename>
class _Op,
typename _Tp,
typename _K,
2543 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2544 _S_masked_unary(
const _SimdWrapper<_K, _Np> __k,
2545 const _SimdWrapper<_Tp, _Np> __v)
2547 if (__k._M_is_constprop_none_of())
2549 auto __vv = _M_make_simd(__v);
2550 _Op<
decltype(__vv)> __op;
2551 if (__k._M_is_constprop_all_of())
2552 return __data(__op(__vv));
2553 else if constexpr (is_same_v<_Op<void>, __increment<void>>)
2555 static_assert(not std::is_same_v<_K, bool>);
2556 if constexpr (is_integral_v<_Tp>)
2558 return __v._M_data - __vector_bitcast<_Tp>(__k._M_data);
2559 else if constexpr (not __have_avx2)
2561 + __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
2565 else if constexpr (is_same_v<_Op<void>, __decrement<void>>)
2567 static_assert(not std::is_same_v<_K, bool>);
2568 if constexpr (is_integral_v<_Tp>)
2570 return __v._M_data + __vector_bitcast<_Tp>(__k._M_data);
2571 else if constexpr (not __have_avx2)
2573 - __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
2577 return _CommonImpl::_S_blend(__k, __v, __data(__op(__vv)));
2584struct _MaskImplBuiltinMixin
2586 template <
typename _Tp>
2587 using _TypeTag = _Tp*;
2590 template <
typename _Up,
size_t _ToN = 1>
2591 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2592 _S_to_maskvector(
bool __x)
2594 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2595 return __x ? __vector_type_t<_Up, _ToN>{~_Up()}
2596 : __vector_type_t<_Up, _ToN>{};
2599 template <
typename _Up,
size_t _UpN = 0,
size_t _Np,
bool _Sanitized,
2600 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2601 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2602 _S_to_maskvector(_BitMask<_Np, _Sanitized> __x)
2604 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2605 return __generate_vector<__vector_type_t<_Up, _ToN>>(
2606 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2607 if constexpr (__i < _Np)
2608 return __x[__i] ? ~_Up() : _Up();
2614 template <
typename _Up,
size_t _UpN = 0,
typename _Tp,
size_t _Np,
2615 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2616 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2617 _S_to_maskvector(_SimdWrapper<_Tp, _Np> __x)
2619 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2620 using _TW = _SimdWrapper<_Tp, _Np>;
2621 using _UW = _SimdWrapper<_Up, _ToN>;
2622 if constexpr (
sizeof(_Up) ==
sizeof(_Tp) &&
sizeof(_TW) ==
sizeof(_UW))
2623 return __wrapper_bitcast<_Up, _ToN>(__x);
2624 else if constexpr (is_same_v<_Tp, bool>)
2625 return _S_to_maskvector<_Up, _ToN>(_BitMask<_Np>(__x._M_data));
2652 return __generate_vector<__vector_type_t<_Up, _ToN>>(
2653 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2654 if constexpr (__i < _Np)
2655 return _Up(__x[__i.value]);
2665 template <
typename _Tp,
size_t _Np>
2666 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask<_Np>
2667 _S_to_bits(_SimdWrapper<_Tp, _Np> __x)
2669 static_assert(!is_same_v<_Tp, bool>);
2670 static_assert(_Np <= __CHAR_BIT__ *
sizeof(_ULLong));
2671 using _Up = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2673 = __vector_bitcast<_Up>(__x) >> (
sizeof(_Up) * __CHAR_BIT__ - 1);
2675 __execute_n_times<_Np>(
2676 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2677 __r |= _ULLong(__bools[__i.value]) << __i;
2686template <
typename _Abi,
typename>
2687 struct _MaskImplBuiltin : _MaskImplBuiltinMixin
2689 using _MaskImplBuiltinMixin::_S_to_bits;
2690 using _MaskImplBuiltinMixin::_S_to_maskvector;
2693 template <
typename _Tp>
2694 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
2696 template <
typename _Tp>
2697 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
2699 using _SuperImpl =
typename _Abi::_MaskImpl;
2700 using _CommonImpl =
typename _Abi::_CommonImpl;
2702 template <
typename _Tp>
2703 static constexpr size_t _S_size = simd_size_v<_Tp, _Abi>;
2707 template <
typename _Tp>
2708 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2709 _S_broadcast(
bool __x)
2710 {
return __x ? _Abi::template _S_implicit_mask<_Tp>() : _MaskMember<_Tp>(); }
2714 template <
typename _Tp>
2715 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2716 _S_load(
const bool* __mem)
2718 using _I = __int_for_sizeof_t<_Tp>;
2719 if (not __builtin_is_constant_evaluated())
2720 if constexpr (
sizeof(_Tp) ==
sizeof(
bool))
2723 = _CommonImpl::template _S_load<_I, _S_size<_Tp>>(__mem);
2727 return __generate_vector<_I, _S_size<_Tp>>(
2728 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2729 return __mem[__i] ? ~_I() : _I();
2735 template <
typename _Tp,
size_t _Np,
bool _Sanitized>
2736 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2737 _S_convert(_BitMask<_Np, _Sanitized> __x)
2739 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2740 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_to_bits());
2742 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2744 __x._M_sanitized());
2747 template <
typename _Tp,
size_t _Np>
2748 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2749 _S_convert(_SimdWrapper<bool, _Np> __x)
2751 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2752 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_data);
2754 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2756 _BitMask<_Np>(__x._M_data)._M_sanitized());
2759 template <
typename _Tp,
typename _Up,
size_t _Np>
2760 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2761 _S_convert(_SimdWrapper<_Up, _Np> __x)
2763 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2764 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(
2765 _SuperImpl::_S_to_bits(__x));
2767 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2771 template <
typename _Tp,
typename _Up,
typename _UAbi>
2772 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2773 _S_convert(simd_mask<_Up, _UAbi> __x)
2775 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2777 using _R = _SimdWrapper<bool, simd_size_v<_Tp, _Abi>>;
2778 if constexpr (__is_builtin_bitmask_abi<_UAbi>())
2779 return _R(__data(__x));
2780 else if constexpr (__is_scalar_abi<_UAbi>())
2781 return _R(__data(__x));
2782 else if constexpr (__is_fixed_size_abi_v<_UAbi>)
2783 return _R(__data(__x)._M_to_bits());
2785 return _R(_UAbi::_MaskImpl::_S_to_bits(__data(__x))._M_to_bits());
2788 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2795 template <
typename _Tp,
size_t _Np>
2796 static inline _SimdWrapper<_Tp, _Np>
2797 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge,
2798 _SimdWrapper<_Tp, _Np> __mask,
const bool* __mem)
noexcept
2801 auto __tmp = __wrapper_bitcast<__int_for_sizeof_t<_Tp>>(__merge);
2802 _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__mask),
2803 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2804 __tmp._M_set(__i, -__mem[__i]);
2806 __merge = __wrapper_bitcast<_Tp>(__tmp);
2811 template <
typename _Tp,
size_t _Np>
2812 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2813 _S_store(_SimdWrapper<_Tp, _Np> __v,
bool* __mem)
noexcept
2815 __execute_n_times<_Np>([&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2816 __mem[__i] = __v[__i];
2821 template <
typename _Tp,
size_t _Np>
2823 _S_masked_store(
const _SimdWrapper<_Tp, _Np> __v,
bool* __mem,
2824 const _SimdWrapper<_Tp, _Np> __k)
noexcept
2826 _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__k),
2827 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2828 __mem[__i] = __v[__i];
2833 template <
size_t _Np,
typename _Tp>
2834 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2835 _S_from_bitmask(_SanitizedBitMask<_Np> __bits, _TypeTag<_Tp>)
2836 {
return _SuperImpl::template _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits); }
2839 template <
typename _Tp,
size_t _Np>
2840 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2841 _S_logical_and(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2842 {
return __and(__x._M_data, __y._M_data); }
2844 template <
typename _Tp,
size_t _Np>
2845 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2846 _S_logical_or(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2847 {
return __or(__x._M_data, __y._M_data); }
2849 template <
typename _Tp,
size_t _Np>
2850 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2851 _S_bit_not(
const _SimdWrapper<_Tp, _Np>& __x)
2853 if constexpr (_Abi::template _S_is_partial<_Tp>)
2854 return __andnot(__x, __wrapper_bitcast<_Tp>(
2855 _Abi::template _S_implicit_mask<_Tp>()));
2857 return __not(__x._M_data);
2860 template <
typename _Tp,
size_t _Np>
2861 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2862 _S_bit_and(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2863 {
return __and(__x._M_data, __y._M_data); }
2865 template <
typename _Tp,
size_t _Np>
2866 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2867 _S_bit_or(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2868 {
return __or(__x._M_data, __y._M_data); }
2870 template <
typename _Tp,
size_t _Np>
2871 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2872 _S_bit_xor(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2873 {
return __xor(__x._M_data, __y._M_data); }
2876 template <
typename _Tp,
size_t _Np>
2877 static constexpr void
2878 _S_set(_SimdWrapper<_Tp, _Np>& __k,
int __i,
bool __x)
noexcept
2880 if constexpr (is_same_v<_Tp, bool>)
2881 __k._M_set(__i, __x);
2884 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
2885 if (__builtin_is_constant_evaluated())
2887 __k = __generate_from_n_evaluations<_Np,
2888 __vector_type_t<_Tp, _Np>>(
2889 [&](
auto __j) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2890 if (__i ==
static_cast<int>(__j))
2897 __k._M_data[__i] = -__x;
2902 template <
typename _Tp,
size_t _Np>
2903 _GLIBCXX_SIMD_INTRINSIC
static void
2904 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2905 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2906 { __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs); }
2908 template <
typename _Tp,
size_t _Np>
2909 _GLIBCXX_SIMD_INTRINSIC
static void
2910 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
bool __rhs)
2912 if (__builtin_constant_p(__rhs))
2915 __lhs = __andnot(__k, __lhs);
2917 __lhs = __or(__k, __lhs);
2920 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2921 __data(simd_mask<_Tp, _Abi>(__rhs)));
2926 template <
typename _Tp>
2927 _GLIBCXX_SIMD_INTRINSIC
static bool
2928 _S_all_of(simd_mask<_Tp, _Abi> __k)
2930 return __call_with_subscripts(
2931 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2932 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2933 {
return (... && !(__ent == 0)); });
2938 template <
typename _Tp>
2939 _GLIBCXX_SIMD_INTRINSIC
static bool
2940 _S_any_of(simd_mask<_Tp, _Abi> __k)
2942 return __call_with_subscripts(
2943 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2944 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2945 {
return (... || !(__ent == 0)); });
2950 template <
typename _Tp>
2951 _GLIBCXX_SIMD_INTRINSIC
static bool
2952 _S_none_of(simd_mask<_Tp, _Abi> __k)
2954 return __call_with_subscripts(
2955 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2956 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2957 {
return (... && (__ent == 0)); });
2962 template <
typename _Tp>
2963 _GLIBCXX_SIMD_INTRINSIC
static bool
2964 _S_some_of(simd_mask<_Tp, _Abi> __k)
2966 const int __n_true = _SuperImpl::_S_popcount(__k);
2967 return __n_true > 0 && __n_true < int(_S_size<_Tp>);
2972 template <
typename _Tp>
2973 _GLIBCXX_SIMD_INTRINSIC
static int
2974 _S_popcount(simd_mask<_Tp, _Abi> __k)
2976 using _I = __int_for_sizeof_t<_Tp>;
2977 if constexpr (is_default_constructible_v<simd<_I, _Abi>>)
2979 simd<_I, _Abi>(__private_init, __wrapper_bitcast<_I>(__data(__k))));
2981 return -
reduce(__bit_cast<rebind_simd_t<_I, simd<_Tp, _Abi>>>(
2982 simd<_Tp, _Abi>(__private_init, __data(__k))));
2987 template <
typename _Tp>
2988 _GLIBCXX_SIMD_INTRINSIC
static int
2989 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2990 {
return std::__countr_zero(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()); }
2994 template <
typename _Tp>
2995 _GLIBCXX_SIMD_INTRINSIC
static int
2996 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2997 {
return std::__bit_width(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()) - 1; }
3003_GLIBCXX_SIMD_END_NAMESPACE
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.