libstdc++
simd.h
1// Definition of the public simd interfaces -*- C++ -*-
2
3// Copyright (C) 2020-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_H
26#define _GLIBCXX_EXPERIMENTAL_SIMD_H
27
28#if __cplusplus >= 201703L
29
30#include "simd_detail.h"
31#include "numeric_traits.h"
32#include <bit>
33#include <bitset>
34#ifdef _GLIBCXX_DEBUG_UB
35#include <cstdio> // for stderr
36#endif
37#include <cstring>
38#include <cmath>
39#include <functional>
40#include <iosfwd>
41#include <utility>
42
43#if _GLIBCXX_SIMD_X86INTRIN
44#include <x86intrin.h>
45#elif _GLIBCXX_SIMD_HAVE_NEON
46#include <arm_neon.h>
47#endif
48
49/** @ingroup ts_simd
50 * @{
51 */
52/* There are several closely related types, with the following naming
53 * convention:
54 * _Tp: vectorizable (arithmetic) type (or any type)
55 * _TV: __vector_type_t<_Tp, _Np>
56 * _TW: _SimdWrapper<_Tp, _Np>
57 * _TI: __intrinsic_type_t<_Tp, _Np>
58 * _TVT: _VectorTraits<_TV> or _VectorTraits<_TW>
59 * If one additional type is needed use _U instead of _T.
60 * Otherwise use _T\d, _TV\d, _TW\d, TI\d, _TVT\d.
61 *
62 * More naming conventions:
63 * _Ap or _Abi: An ABI tag from the simd_abi namespace
64 * _Ip: often used for integer types with sizeof(_Ip) == sizeof(_Tp),
65 * _IV, _IW as for _TV, _TW
66 * _Np: number of elements (not bytes)
67 * _Bytes: number of bytes
68 *
69 * Variable names:
70 * __k: mask object (vector- or bitmask)
71 */
72_GLIBCXX_SIMD_BEGIN_NAMESPACE
73
74#if !_GLIBCXX_SIMD_X86INTRIN
75using __m128 [[__gnu__::__vector_size__(16)]] = float;
76using __m128d [[__gnu__::__vector_size__(16)]] = double;
77using __m128i [[__gnu__::__vector_size__(16)]] = long long;
78using __m256 [[__gnu__::__vector_size__(32)]] = float;
79using __m256d [[__gnu__::__vector_size__(32)]] = double;
80using __m256i [[__gnu__::__vector_size__(32)]] = long long;
81using __m512 [[__gnu__::__vector_size__(64)]] = float;
82using __m512d [[__gnu__::__vector_size__(64)]] = double;
83using __m512i [[__gnu__::__vector_size__(64)]] = long long;
84#endif
85
86namespace simd_abi {
87// simd_abi forward declarations {{{
88// implementation details:
89struct _Scalar;
90
91template <int _Np>
92 struct _Fixed;
93
94// There are two major ABIs that appear on different architectures.
95// Both have non-boolean values packed into an N Byte register
96// -> #elements = N / sizeof(T)
97// Masks differ:
98// 1. Use value vector registers for masks (all 0 or all 1)
99// 2. Use bitmasks (mask registers) with one bit per value in the corresponding
100// value vector
101//
102// Both can be partially used, masking off the rest when doing horizontal
103// operations or operations that can trap (e.g. FP_INVALID or integer division
104// by 0). This is encoded as the number of used bytes.
105template <int _UsedBytes>
106 struct _VecBuiltin;
107
108template <int _UsedBytes>
109 struct _VecBltnBtmsk;
110
111template <typename _Tp, int _Np>
112 using _VecN = _VecBuiltin<sizeof(_Tp) * _Np>;
113
114template <int _UsedBytes = 16>
115 using _Sse = _VecBuiltin<_UsedBytes>;
116
117template <int _UsedBytes = 32>
118 using _Avx = _VecBuiltin<_UsedBytes>;
119
120template <int _UsedBytes = 64>
121 using _Avx512 = _VecBltnBtmsk<_UsedBytes>;
122
123template <int _UsedBytes = 16>
124 using _Neon = _VecBuiltin<_UsedBytes>;
125
126// implementation-defined:
127using __sse = _Sse<>;
128using __avx = _Avx<>;
129using __avx512 = _Avx512<>;
130using __neon = _Neon<>;
131using __neon128 = _Neon<16>;
132using __neon64 = _Neon<8>;
133
134// standard:
135template <typename _Tp, size_t _Np, typename...>
136 struct deduce;
137
138template <int _Np>
139 using fixed_size = _Fixed<_Np>;
140
141using scalar = _Scalar;
142
143// }}}
144} // namespace simd_abi
145// forward declarations is_simd(_mask), simd(_mask), simd_size {{{
146template <typename _Tp>
147 struct is_simd;
148
149template <typename _Tp>
150 struct is_simd_mask;
151
152template <typename _Tp, typename _Abi>
153 class simd;
154
155template <typename _Tp, typename _Abi>
156 class simd_mask;
157
158template <typename _Tp, typename _Abi>
159 struct simd_size;
160
161// }}}
162// load/store flags {{{
163struct element_aligned_tag
164{
165 template <typename _Tp, typename _Up = typename _Tp::value_type>
166 static constexpr size_t _S_alignment = alignof(_Up);
167
168 template <typename _Tp, typename _Up>
169 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
170 _S_apply(_Up* __ptr)
171 { return __ptr; }
172};
173
174struct vector_aligned_tag
175{
176 template <typename _Tp, typename _Up = typename _Tp::value_type>
177 static constexpr size_t _S_alignment
178 = std::__bit_ceil(sizeof(_Up) * _Tp::size());
179
180 template <typename _Tp, typename _Up>
181 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
182 _S_apply(_Up* __ptr)
183 {
184 return static_cast<_Up*>(
185 __builtin_assume_aligned(__ptr, _S_alignment<_Tp, _Up>));
186 }
187};
188
189template <size_t _Np> struct overaligned_tag
190{
191 template <typename _Tp, typename _Up = typename _Tp::value_type>
192 static constexpr size_t _S_alignment = _Np;
193
194 template <typename _Tp, typename _Up>
195 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
196 _S_apply(_Up* __ptr)
197 { return static_cast<_Up*>(__builtin_assume_aligned(__ptr, _Np)); }
198};
199
200inline constexpr element_aligned_tag element_aligned = {};
201
202inline constexpr vector_aligned_tag vector_aligned = {};
203
204template <size_t _Np>
205 inline constexpr overaligned_tag<_Np> overaligned = {};
206
207// }}}
208template <size_t _Xp>
209 using _SizeConstant = integral_constant<size_t, _Xp>;
210// constexpr feature detection{{{
211constexpr inline bool __have_mmx = _GLIBCXX_SIMD_HAVE_MMX;
212constexpr inline bool __have_sse = _GLIBCXX_SIMD_HAVE_SSE;
213constexpr inline bool __have_sse2 = _GLIBCXX_SIMD_HAVE_SSE2;
214constexpr inline bool __have_sse3 = _GLIBCXX_SIMD_HAVE_SSE3;
215constexpr inline bool __have_ssse3 = _GLIBCXX_SIMD_HAVE_SSSE3;
216constexpr inline bool __have_sse4_1 = _GLIBCXX_SIMD_HAVE_SSE4_1;
217constexpr inline bool __have_sse4_2 = _GLIBCXX_SIMD_HAVE_SSE4_2;
218constexpr inline bool __have_xop = _GLIBCXX_SIMD_HAVE_XOP;
219constexpr inline bool __have_avx = _GLIBCXX_SIMD_HAVE_AVX;
220constexpr inline bool __have_avx2 = _GLIBCXX_SIMD_HAVE_AVX2;
221constexpr inline bool __have_bmi = _GLIBCXX_SIMD_HAVE_BMI1;
222constexpr inline bool __have_bmi2 = _GLIBCXX_SIMD_HAVE_BMI2;
223constexpr inline bool __have_lzcnt = _GLIBCXX_SIMD_HAVE_LZCNT;
224constexpr inline bool __have_sse4a = _GLIBCXX_SIMD_HAVE_SSE4A;
225constexpr inline bool __have_fma = _GLIBCXX_SIMD_HAVE_FMA;
226constexpr inline bool __have_fma4 = _GLIBCXX_SIMD_HAVE_FMA4;
227constexpr inline bool __have_f16c = _GLIBCXX_SIMD_HAVE_F16C;
228constexpr inline bool __have_popcnt = _GLIBCXX_SIMD_HAVE_POPCNT;
229constexpr inline bool __have_avx512f = _GLIBCXX_SIMD_HAVE_AVX512F;
230constexpr inline bool __have_avx512dq = _GLIBCXX_SIMD_HAVE_AVX512DQ;
231constexpr inline bool __have_avx512vl = _GLIBCXX_SIMD_HAVE_AVX512VL;
232constexpr inline bool __have_avx512bw = _GLIBCXX_SIMD_HAVE_AVX512BW;
233constexpr inline bool __have_avx512dq_vl = __have_avx512dq && __have_avx512vl;
234constexpr inline bool __have_avx512bw_vl = __have_avx512bw && __have_avx512vl;
235constexpr inline bool __have_avx512bitalg = _GLIBCXX_SIMD_HAVE_AVX512BITALG;
236constexpr inline bool __have_avx512vbmi2 = _GLIBCXX_SIMD_HAVE_AVX512VBMI2;
237constexpr inline bool __have_avx512vbmi = _GLIBCXX_SIMD_HAVE_AVX512VBMI;
238constexpr inline bool __have_avx512ifma = _GLIBCXX_SIMD_HAVE_AVX512IFMA;
239constexpr inline bool __have_avx512cd = _GLIBCXX_SIMD_HAVE_AVX512CD;
240constexpr inline bool __have_avx512vnni = _GLIBCXX_SIMD_HAVE_AVX512VNNI;
241constexpr inline bool __have_avx512vpopcntdq = _GLIBCXX_SIMD_HAVE_AVX512VPOPCNTDQ;
242constexpr inline bool __have_avx512vp2intersect = _GLIBCXX_SIMD_HAVE_AVX512VP2INTERSECT;
243
244constexpr inline bool __have_neon = _GLIBCXX_SIMD_HAVE_NEON;
245constexpr inline bool __have_neon_a32 = _GLIBCXX_SIMD_HAVE_NEON_A32;
246constexpr inline bool __have_neon_a64 = _GLIBCXX_SIMD_HAVE_NEON_A64;
247constexpr inline bool __support_neon_float =
248#if defined __GCC_IEC_559
249 __GCC_IEC_559 == 0;
250#elif defined __FAST_MATH__
251 true;
252#else
253 false;
254#endif
255
256#ifdef _ARCH_PWR10
257constexpr inline bool __have_power10vec = true;
258#else
259constexpr inline bool __have_power10vec = false;
260#endif
261#ifdef __POWER9_VECTOR__
262constexpr inline bool __have_power9vec = true;
263#else
264constexpr inline bool __have_power9vec = false;
265#endif
266#if defined __POWER8_VECTOR__
267constexpr inline bool __have_power8vec = true;
268#else
269constexpr inline bool __have_power8vec = __have_power9vec;
270#endif
271#if defined __VSX__
272constexpr inline bool __have_power_vsx = true;
273#else
274constexpr inline bool __have_power_vsx = __have_power8vec;
275#endif
276#if defined __ALTIVEC__
277constexpr inline bool __have_power_vmx = true;
278#else
279constexpr inline bool __have_power_vmx = __have_power_vsx;
280#endif
281
282// }}}
283
284namespace __detail
285{
286#ifdef math_errhandling
287 // Determines _S_handle_fpexcept from math_errhandling if it is defined and expands to a constant
288 // expression. math_errhandling may expand to an extern symbol, in which case a constexpr value
289 // must be guessed.
290 template <int = math_errhandling>
291 constexpr bool __handle_fpexcept_impl(int)
292 { return math_errhandling & MATH_ERREXCEPT; }
293#endif
294
295 // Fallback if math_errhandling doesn't work: with fast-math assume floating-point exceptions are
296 // ignored, otherwise implement correct exception behavior.
297 constexpr bool __handle_fpexcept_impl(float)
298 {
299#if defined __FAST_MATH__
300 return false;
301#else
302 return true;
303#endif
304 }
305
306 /// True if math functions must raise floating-point exceptions as specified by C17.
307 static constexpr bool _S_handle_fpexcept = __handle_fpexcept_impl(0);
308
309 constexpr std::uint_least64_t
310 __floating_point_flags()
311 {
312 std::uint_least64_t __flags = 0;
313 if constexpr (_S_handle_fpexcept)
314 __flags |= 1;
315#ifdef __FAST_MATH__
316 __flags |= 1 << 1;
317#elif __FINITE_MATH_ONLY__
318 __flags |= 2 << 1;
319#elif __GCC_IEC_559 < 2
320 __flags |= 3 << 1;
321#endif
322 __flags |= (__FLT_EVAL_METHOD__ + 1) << 3;
323 return __flags;
324 }
325
326 constexpr std::uint_least64_t
327 __machine_flags()
328 {
329 if constexpr (__have_mmx || __have_sse)
330 return __have_mmx
331 | (__have_sse << 1)
332 | (__have_sse2 << 2)
333 | (__have_sse3 << 3)
334 | (__have_ssse3 << 4)
335 | (__have_sse4_1 << 5)
336 | (__have_sse4_2 << 6)
337 | (__have_xop << 7)
338 | (__have_avx << 8)
339 | (__have_avx2 << 9)
340 | (__have_bmi << 10)
341 | (__have_bmi2 << 11)
342 | (__have_lzcnt << 12)
343 | (__have_sse4a << 13)
344 | (__have_fma << 14)
345 | (__have_fma4 << 15)
346 | (__have_f16c << 16)
347 | (__have_popcnt << 17)
348 | (__have_avx512f << 18)
349 | (__have_avx512dq << 19)
350 | (__have_avx512vl << 20)
351 | (__have_avx512bw << 21)
352 | (__have_avx512bitalg << 22)
353 | (__have_avx512vbmi2 << 23)
354 | (__have_avx512vbmi << 24)
355 | (__have_avx512ifma << 25)
356 | (__have_avx512cd << 26)
357 | (__have_avx512vnni << 27)
358 | (__have_avx512vpopcntdq << 28)
359 | (__have_avx512vp2intersect << 29);
360 else if constexpr (__have_neon)
361 return __have_neon
362 | (__have_neon_a32 << 1)
363 | (__have_neon_a64 << 2)
364 | (__have_neon_a64 << 2)
365 | (__support_neon_float << 3);
366 else if constexpr (__have_power_vmx)
367 return __have_power_vmx
368 | (__have_power_vsx << 1)
369 | (__have_power8vec << 2)
370 | (__have_power9vec << 3)
371 | (__have_power10vec << 4);
372 else
373 return 0;
374 }
375
376 namespace
377 {
378 struct _OdrEnforcer {};
379 }
380
381 template <std::uint_least64_t...>
382 struct _MachineFlagsTemplate {};
383
384 /**@internal
385 * Use this type as default template argument to all function templates that
386 * are not declared always_inline. It ensures, that a function
387 * specialization, which the compiler decides not to inline, has a unique symbol
388 * (_OdrEnforcer) or a symbol matching the machine/architecture flags
389 * (_MachineFlagsTemplate). This helps to avoid ODR violations in cases where
390 * users link TUs compiled with different flags. This is especially important
391 * for using simd in libraries.
392 */
393 using __odr_helper
394 = conditional_t<__machine_flags() == 0, _OdrEnforcer,
395 _MachineFlagsTemplate<__machine_flags(), __floating_point_flags()>>;
396
397 struct _Minimum
398 {
399 template <typename _Tp>
400 _GLIBCXX_SIMD_INTRINSIC constexpr
401 _Tp
402 operator()(_Tp __a, _Tp __b) const
403 {
404 using std::min;
405 return min(__a, __b);
406 }
407 };
408
409 struct _Maximum
410 {
411 template <typename _Tp>
412 _GLIBCXX_SIMD_INTRINSIC constexpr
413 _Tp
414 operator()(_Tp __a, _Tp __b) const
415 {
416 using std::max;
417 return max(__a, __b);
418 }
419 };
420} // namespace __detail
421
422// unrolled/pack execution helpers
423// __execute_n_times{{{
424template <typename _Fp, size_t... _I>
425 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
426 void
427 __execute_on_index_sequence(_Fp&& __f, index_sequence<_I...>)
428 { ((void)__f(_SizeConstant<_I>()), ...); }
429
430template <typename _Fp>
431 _GLIBCXX_SIMD_INTRINSIC constexpr void
432 __execute_on_index_sequence(_Fp&&, index_sequence<>)
433 { }
434
435template <size_t _Np, typename _Fp>
436 _GLIBCXX_SIMD_INTRINSIC constexpr void
437 __execute_n_times(_Fp&& __f)
438 {
439 __execute_on_index_sequence(static_cast<_Fp&&>(__f),
440 make_index_sequence<_Np>{});
441 }
442
443// }}}
444// __generate_from_n_evaluations{{{
445template <typename _R, typename _Fp, size_t... _I>
446 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
447 _R
448 __execute_on_index_sequence_with_return(_Fp&& __f, index_sequence<_I...>)
449 { return _R{__f(_SizeConstant<_I>())...}; }
450
451template <size_t _Np, typename _R, typename _Fp>
452 _GLIBCXX_SIMD_INTRINSIC constexpr _R
453 __generate_from_n_evaluations(_Fp&& __f)
454 {
455 return __execute_on_index_sequence_with_return<_R>(
456 static_cast<_Fp&&>(__f), make_index_sequence<_Np>{});
457 }
458
459// }}}
460// __call_with_n_evaluations{{{
461template <size_t... _I, typename _F0, typename _FArgs>
462 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
463 auto
464 __call_with_n_evaluations(index_sequence<_I...>, _F0&& __f0, _FArgs&& __fargs)
465 { return __f0(__fargs(_SizeConstant<_I>())...); }
466
467template <size_t _Np, typename _F0, typename _FArgs>
468 _GLIBCXX_SIMD_INTRINSIC constexpr auto
469 __call_with_n_evaluations(_F0&& __f0, _FArgs&& __fargs)
470 {
471 return __call_with_n_evaluations(make_index_sequence<_Np>{},
472 static_cast<_F0&&>(__f0),
473 static_cast<_FArgs&&>(__fargs));
474 }
475
476// }}}
477// __call_with_subscripts{{{
478template <size_t _First = 0, size_t... _It, typename _Tp, typename _Fp>
479 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
480 auto
481 __call_with_subscripts(_Tp&& __x, index_sequence<_It...>, _Fp&& __fun)
482 { return __fun(__x[_First + _It]...); }
483
484template <size_t _Np, size_t _First = 0, typename _Tp, typename _Fp>
485 _GLIBCXX_SIMD_INTRINSIC constexpr auto
486 __call_with_subscripts(_Tp&& __x, _Fp&& __fun)
487 {
488 return __call_with_subscripts<_First>(static_cast<_Tp&&>(__x),
489 make_index_sequence<_Np>(),
490 static_cast<_Fp&&>(__fun));
491 }
492
493// }}}
494
495// vvv ---- type traits ---- vvv
496// integer type aliases{{{
497using _UChar = unsigned char;
498using _SChar = signed char;
499using _UShort = unsigned short;
500using _UInt = unsigned int;
501using _ULong = unsigned long;
502using _ULLong = unsigned long long;
503using _LLong = long long;
504
505//}}}
506// __first_of_pack{{{
507template <typename _T0, typename...>
508 struct __first_of_pack
509 { using type = _T0; };
510
511template <typename... _Ts>
512 using __first_of_pack_t = typename __first_of_pack<_Ts...>::type;
513
514//}}}
515// __value_type_or_identity_t {{{
516template <typename _Tp>
517 typename _Tp::value_type
518 __value_type_or_identity_impl(int);
519
520template <typename _Tp>
521 _Tp
522 __value_type_or_identity_impl(float);
523
524template <typename _Tp>
525 using __value_type_or_identity_t
526 = decltype(__value_type_or_identity_impl<_Tp>(int()));
527
528// }}}
529// __is_vectorizable {{{
530template <typename _Tp>
531 struct __is_vectorizable : public is_arithmetic<_Tp> {};
532
533template <>
534 struct __is_vectorizable<bool> : public false_type {};
535
536template <typename _Tp>
537 inline constexpr bool __is_vectorizable_v = __is_vectorizable<_Tp>::value;
538
539// Deduces to a vectorizable type
540template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
541 using _Vectorizable = _Tp;
542
543// }}}
544// _LoadStorePtr / __is_possible_loadstore_conversion {{{
545template <typename _Ptr, typename _ValueType>
546 struct __is_possible_loadstore_conversion
547 : conjunction<__is_vectorizable<_Ptr>, __is_vectorizable<_ValueType>> {};
548
549template <>
550 struct __is_possible_loadstore_conversion<bool, bool> : true_type {};
551
552// Deduces to a type allowed for load/store with the given value type.
553template <typename _Ptr, typename _ValueType,
554 typename = enable_if_t<
555 __is_possible_loadstore_conversion<_Ptr, _ValueType>::value>>
556 using _LoadStorePtr = _Ptr;
557
558// }}}
559// __is_bitmask{{{
560template <typename _Tp, typename = void_t<>>
561 struct __is_bitmask : false_type {};
562
563template <typename _Tp>
564 inline constexpr bool __is_bitmask_v = __is_bitmask<_Tp>::value;
565
566// the __mmaskXX case:
567template <typename _Tp>
568 struct __is_bitmask<_Tp,
569 void_t<decltype(declval<unsigned&>() = declval<_Tp>() & 1u)>>
570 : true_type {};
571
572// }}}
573// __int_for_sizeof{{{
574#pragma GCC diagnostic push
575#pragma GCC diagnostic ignored "-Wpedantic"
576template <size_t _Bytes>
577 constexpr auto
578 __int_for_sizeof()
579 {
580 if constexpr (_Bytes == sizeof(int))
581 return int();
582 #ifdef __clang__
583 else if constexpr (_Bytes == sizeof(char))
584 return char();
585 #else
586 else if constexpr (_Bytes == sizeof(_SChar))
587 return _SChar();
588 #endif
589 else if constexpr (_Bytes == sizeof(short))
590 return short();
591 #ifndef __clang__
592 else if constexpr (_Bytes == sizeof(long))
593 return long();
594 #endif
595 else if constexpr (_Bytes == sizeof(_LLong))
596 return _LLong();
597 #ifdef __SIZEOF_INT128__
598 else if constexpr (_Bytes == sizeof(__int128))
599 return __int128();
600 #endif // __SIZEOF_INT128__
601 else if constexpr (_Bytes % sizeof(int) == 0)
602 {
603 constexpr size_t _Np = _Bytes / sizeof(int);
604 struct _Ip
605 {
606 int _M_data[_Np];
607
608 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
609 operator&(_Ip __rhs) const
610 {
611 return __generate_from_n_evaluations<_Np, _Ip>(
612 [&](auto __i) { return __rhs._M_data[__i] & _M_data[__i]; });
613 }
614
615 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
616 operator|(_Ip __rhs) const
617 {
618 return __generate_from_n_evaluations<_Np, _Ip>(
619 [&](auto __i) { return __rhs._M_data[__i] | _M_data[__i]; });
620 }
621
622 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
623 operator^(_Ip __rhs) const
624 {
625 return __generate_from_n_evaluations<_Np, _Ip>(
626 [&](auto __i) { return __rhs._M_data[__i] ^ _M_data[__i]; });
627 }
628
629 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
630 operator~() const
631 {
632 return __generate_from_n_evaluations<_Np, _Ip>(
633 [&](auto __i) { return ~_M_data[__i]; });
634 }
635 };
636 return _Ip{};
637 }
638 else
639 static_assert(_Bytes != _Bytes, "this should be unreachable");
640 }
641#pragma GCC diagnostic pop
642
643template <typename _Tp>
644 using __int_for_sizeof_t = decltype(__int_for_sizeof<sizeof(_Tp)>());
645
646template <size_t _Np>
647 using __int_with_sizeof_t = decltype(__int_for_sizeof<_Np>());
648
649// }}}
650// __is_fixed_size_abi{{{
651template <typename _Tp>
652 struct __is_fixed_size_abi : false_type {};
653
654template <int _Np>
655 struct __is_fixed_size_abi<simd_abi::fixed_size<_Np>> : true_type {};
656
657template <typename _Tp>
658 inline constexpr bool __is_fixed_size_abi_v = __is_fixed_size_abi<_Tp>::value;
659
660// }}}
661// __is_scalar_abi {{{
662template <typename _Abi>
663 constexpr bool
664 __is_scalar_abi()
665 { return is_same_v<simd_abi::scalar, _Abi>; }
666
667// }}}
668// __abi_bytes_v {{{
669template <template <int> class _Abi, int _Bytes>
670 constexpr int
671 __abi_bytes_impl(_Abi<_Bytes>*)
672 { return _Bytes; }
673
674template <typename _Tp>
675 constexpr int
676 __abi_bytes_impl(_Tp*)
677 { return -1; }
678
679template <typename _Abi>
680 inline constexpr int __abi_bytes_v
681 = __abi_bytes_impl(static_cast<_Abi*>(nullptr));
682
683// }}}
684// __is_builtin_bitmask_abi {{{
685template <typename _Abi>
686 constexpr bool
687 __is_builtin_bitmask_abi()
688 { return is_same_v<simd_abi::_VecBltnBtmsk<__abi_bytes_v<_Abi>>, _Abi>; }
689
690// }}}
691// __is_sse_abi {{{
692template <typename _Abi>
693 constexpr bool
694 __is_sse_abi()
695 {
696 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
697 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
698 }
699
700// }}}
701// __is_avx_abi {{{
702template <typename _Abi>
703 constexpr bool
704 __is_avx_abi()
705 {
706 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
707 return _Bytes > 16 && _Bytes <= 32
708 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
709 }
710
711// }}}
712// __is_avx512_abi {{{
713template <typename _Abi>
714 constexpr bool
715 __is_avx512_abi()
716 {
717 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
718 return _Bytes <= 64 && is_same_v<simd_abi::_Avx512<_Bytes>, _Abi>;
719 }
720
721// }}}
722// __is_neon_abi {{{
723template <typename _Abi>
724 constexpr bool
725 __is_neon_abi()
726 {
727 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
728 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
729 }
730
731// }}}
732// __make_dependent_t {{{
733template <typename, typename _Up>
734 struct __make_dependent
735 { using type = _Up; };
736
737template <typename _Tp, typename _Up>
738 using __make_dependent_t = typename __make_dependent<_Tp, _Up>::type;
739
740// }}}
741// ^^^ ---- type traits ---- ^^^
742
743// __invoke_ub{{{
744template <typename... _Args>
745 [[noreturn]] _GLIBCXX_SIMD_ALWAYS_INLINE void
746 __invoke_ub([[maybe_unused]] const char* __msg,
747 [[maybe_unused]] const _Args&... __args)
748 {
749#ifdef _GLIBCXX_DEBUG_UB
750 __builtin_fprintf(stderr, __msg, __args...);
751 __builtin_trap();
752#else
753 __builtin_unreachable();
754#endif
755 }
756
757// }}}
758// __assert_unreachable{{{
759template <typename _Tp>
760 struct __assert_unreachable
761 { static_assert(!is_same_v<_Tp, _Tp>, "this should be unreachable"); };
762
763// }}}
764// __size_or_zero_v {{{
765template <typename _Tp, typename _Ap, size_t _Np = simd_size<_Tp, _Ap>::value>
766 constexpr size_t
767 __size_or_zero_dispatch(int)
768 { return _Np; }
769
770template <typename _Tp, typename _Ap>
771 constexpr size_t
772 __size_or_zero_dispatch(float)
773 { return 0; }
774
775template <typename _Tp, typename _Ap>
776 inline constexpr size_t __size_or_zero_v
777 = __size_or_zero_dispatch<_Tp, _Ap>(0);
778
779// }}}
780// __div_roundup {{{
781inline constexpr size_t
782__div_roundup(size_t __a, size_t __b)
783{ return (__a + __b - 1) / __b; }
784
785// }}}
786// _ExactBool{{{
787class _ExactBool
788{
789 const bool _M_data;
790
791public:
792 _GLIBCXX_SIMD_INTRINSIC constexpr _ExactBool(bool __b) : _M_data(__b) {}
793
794 _ExactBool(int) = delete;
795
796 _GLIBCXX_SIMD_INTRINSIC constexpr operator bool() const { return _M_data; }
797};
798
799// }}}
800// __may_alias{{{
801/**@internal
802 * Helper __may_alias<_Tp> that turns _Tp into the type to be used for an
803 * aliasing pointer. This adds the __may_alias attribute to _Tp (with compilers
804 * that support it).
805 */
806template <typename _Tp>
807 using __may_alias [[__gnu__::__may_alias__]] = _Tp;
808
809// }}}
810// _UnsupportedBase {{{
811// simd and simd_mask base for unsupported <_Tp, _Abi>
812struct _UnsupportedBase
813{
814 _UnsupportedBase() = delete;
815 _UnsupportedBase(const _UnsupportedBase&) = delete;
816 _UnsupportedBase& operator=(const _UnsupportedBase&) = delete;
817 ~_UnsupportedBase() = delete;
818};
819
820// }}}
821// _InvalidTraits {{{
822/**
823 * @internal
824 * Defines the implementation of __a given <_Tp, _Abi>.
825 *
826 * Implementations must ensure that only valid <_Tp, _Abi> instantiations are
827 * possible. Static assertions in the type definition do not suffice. It is
828 * important that SFINAE works.
829 */
830struct _InvalidTraits
831{
832 using _IsValid = false_type;
833 using _SimdBase = _UnsupportedBase;
834 using _MaskBase = _UnsupportedBase;
835
836 static constexpr size_t _S_full_size = 0;
837 static constexpr bool _S_is_partial = false;
838
839 static constexpr size_t _S_simd_align = 1;
840 struct _SimdImpl;
841 struct _SimdMember {};
842 struct _SimdCastType;
843
844 static constexpr size_t _S_mask_align = 1;
845 struct _MaskImpl;
846 struct _MaskMember {};
847 struct _MaskCastType;
848};
849
850// }}}
851// _SimdTraits {{{
852template <typename _Tp, typename _Abi, typename = void_t<>>
853 struct _SimdTraits : _InvalidTraits {};
854
855// }}}
856// __private_init, __bitset_init{{{
857/**
858 * @internal
859 * Tag used for private init constructor of simd and simd_mask
860 */
861inline constexpr struct _PrivateInit {} __private_init = {};
862
863inline constexpr struct _BitsetInit {} __bitset_init = {};
864
865// }}}
866// __is_narrowing_conversion<_From, _To>{{{
867template <typename _From, typename _To, bool = is_arithmetic_v<_From>,
868 bool = is_arithmetic_v<_To>>
869 struct __is_narrowing_conversion;
870
871// ignore "signed/unsigned mismatch" in the following trait.
872// The implicit conversions will do the right thing here.
873template <typename _From, typename _To>
874 struct __is_narrowing_conversion<_From, _To, true, true>
875 : public __bool_constant<(
876 __digits_v<_From> > __digits_v<_To>
877 || __finite_max_v<_From> > __finite_max_v<_To>
878 || __finite_min_v<_From> < __finite_min_v<_To>
879 || (is_signed_v<_From> && is_unsigned_v<_To>))> {};
880
881template <typename _Tp>
882 struct __is_narrowing_conversion<_Tp, bool, true, true>
883 : public true_type {};
884
885template <>
886 struct __is_narrowing_conversion<bool, bool, true, true>
887 : public false_type {};
888
889template <typename _Tp>
890 struct __is_narrowing_conversion<_Tp, _Tp, true, true>
891 : public false_type {};
892
893template <typename _From, typename _To>
894 struct __is_narrowing_conversion<_From, _To, false, true>
895 : public negation<is_convertible<_From, _To>> {};
896
897// }}}
898// __converts_to_higher_integer_rank{{{
899template <typename _From, typename _To, bool = (sizeof(_From) < sizeof(_To))>
900 struct __converts_to_higher_integer_rank : public true_type {};
901
902// this may fail for char -> short if sizeof(char) == sizeof(short)
903template <typename _From, typename _To>
904 struct __converts_to_higher_integer_rank<_From, _To, false>
905 : public is_same<decltype(declval<_From>() + declval<_To>()), _To> {};
906
907// }}}
908// __data(simd/simd_mask) {{{
909template <typename _Tp, typename _Ap>
910 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
911 __data(const simd<_Tp, _Ap>& __x);
912
913template <typename _Tp, typename _Ap>
914 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
915 __data(simd<_Tp, _Ap>& __x);
916
917template <typename _Tp, typename _Ap>
918 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
919 __data(const simd_mask<_Tp, _Ap>& __x);
920
921template <typename _Tp, typename _Ap>
922 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
923 __data(simd_mask<_Tp, _Ap>& __x);
924
925// }}}
926// _SimdConverter {{{
927template <typename _FromT, typename _FromA, typename _ToT, typename _ToA,
928 typename = void>
929 struct _SimdConverter;
930
931template <typename _Tp, typename _Ap>
932 struct _SimdConverter<_Tp, _Ap, _Tp, _Ap, void>
933 {
934 template <typename _Up>
935 _GLIBCXX_SIMD_INTRINSIC const _Up&
936 operator()(const _Up& __x)
937 { return __x; }
938 };
939
940// }}}
941// __to_value_type_or_member_type {{{
942template <typename _V>
943 _GLIBCXX_SIMD_INTRINSIC constexpr auto
944 __to_value_type_or_member_type(const _V& __x) -> decltype(__data(__x))
945 { return __data(__x); }
946
947template <typename _V>
948 _GLIBCXX_SIMD_INTRINSIC constexpr const typename _V::value_type&
949 __to_value_type_or_member_type(const typename _V::value_type& __x)
950 { return __x; }
951
952// }}}
953// __bool_storage_member_type{{{
954template <size_t _Size>
955 struct __bool_storage_member_type;
956
957template <size_t _Size>
958 using __bool_storage_member_type_t =
959 typename __bool_storage_member_type<_Size>::type;
960
961// }}}
962// _SimdTuple {{{
963// why not tuple?
964// 1. tuple gives no guarantee about the storage order, but I require
965// storage
966// equivalent to array<_Tp, _Np>
967// 2. direct access to the element type (first template argument)
968// 3. enforces equal element type, only different _Abi types are allowed
969template <typename _Tp, typename... _Abis>
970 struct _SimdTuple;
971
972//}}}
973// __fixed_size_storage_t {{{
974template <typename _Tp, int _Np>
975 struct __fixed_size_storage;
976
977template <typename _Tp, int _Np>
978 using __fixed_size_storage_t = typename __fixed_size_storage<_Tp, _Np>::type;
979
980// }}}
981// _SimdWrapper fwd decl{{{
982template <typename _Tp, size_t _Size, typename = void_t<>>
983 struct _SimdWrapper;
984
985template <typename _Tp>
986 using _SimdWrapper8 = _SimdWrapper<_Tp, 8 / sizeof(_Tp)>;
987template <typename _Tp>
988 using _SimdWrapper16 = _SimdWrapper<_Tp, 16 / sizeof(_Tp)>;
989template <typename _Tp>
990 using _SimdWrapper32 = _SimdWrapper<_Tp, 32 / sizeof(_Tp)>;
991template <typename _Tp>
992 using _SimdWrapper64 = _SimdWrapper<_Tp, 64 / sizeof(_Tp)>;
993
994// }}}
995// __is_simd_wrapper {{{
996template <typename _Tp>
997 struct __is_simd_wrapper : false_type {};
998
999template <typename _Tp, size_t _Np>
1000 struct __is_simd_wrapper<_SimdWrapper<_Tp, _Np>> : true_type {};
1001
1002template <typename _Tp>
1003 inline constexpr bool __is_simd_wrapper_v = __is_simd_wrapper<_Tp>::value;
1004
1005// }}}
1006// _BitOps {{{
1007struct _BitOps
1008{
1009 // _S_bit_iteration {{{
1010 template <typename _Tp, typename _Fp>
1011 static void
1012 _S_bit_iteration(_Tp __mask, _Fp&& __f)
1013 {
1014 static_assert(sizeof(_ULLong) >= sizeof(_Tp));
1015 conditional_t<sizeof(_Tp) <= sizeof(_UInt), _UInt, _ULLong> __k;
1016 if constexpr (is_convertible_v<_Tp, decltype(__k)>)
1017 __k = __mask;
1018 else
1019 __k = __mask.to_ullong();
1020 while(__k)
1021 {
1022 __f(std::__countr_zero(__k));
1023 __k &= (__k - 1);
1024 }
1025 }
1026
1027 //}}}
1028};
1029
1030//}}}
1031// __increment, __decrement {{{
1032template <typename _Tp = void>
1033 struct __increment
1034 { constexpr _Tp operator()(_Tp __a) const { return ++__a; } };
1035
1036template <>
1037 struct __increment<void>
1038 {
1039 template <typename _Tp>
1040 constexpr _Tp
1041 operator()(_Tp __a) const
1042 { return ++__a; }
1043 };
1044
1045template <typename _Tp = void>
1046 struct __decrement
1047 { constexpr _Tp operator()(_Tp __a) const { return --__a; } };
1048
1049template <>
1050 struct __decrement<void>
1051 {
1052 template <typename _Tp>
1053 constexpr _Tp
1054 operator()(_Tp __a) const
1055 { return --__a; }
1056 };
1057
1058// }}}
1059// _ValuePreserving(OrInt) {{{
1060template <typename _From, typename _To,
1061 typename = enable_if_t<negation<
1062 __is_narrowing_conversion<__remove_cvref_t<_From>, _To>>::value>>
1063 using _ValuePreserving = _From;
1064
1065template <typename _From, typename _To,
1066 typename _DecayedFrom = __remove_cvref_t<_From>,
1067 typename = enable_if_t<conjunction<
1068 is_convertible<_From, _To>,
1069 disjunction<
1070 is_same<_DecayedFrom, _To>, is_same<_DecayedFrom, int>,
1071 conjunction<is_same<_DecayedFrom, _UInt>, is_unsigned<_To>>,
1072 negation<__is_narrowing_conversion<_DecayedFrom, _To>>>>::value>>
1073 using _ValuePreservingOrInt = _From;
1074
1075// }}}
1076// __intrinsic_type {{{
1077template <typename _Tp, size_t _Bytes, typename = void_t<>>
1078 struct __intrinsic_type;
1079
1080template <typename _Tp, size_t _Size>
1081 using __intrinsic_type_t =
1082 typename __intrinsic_type<_Tp, _Size * sizeof(_Tp)>::type;
1083
1084template <typename _Tp>
1085 using __intrinsic_type2_t = typename __intrinsic_type<_Tp, 2>::type;
1086template <typename _Tp>
1087 using __intrinsic_type4_t = typename __intrinsic_type<_Tp, 4>::type;
1088template <typename _Tp>
1089 using __intrinsic_type8_t = typename __intrinsic_type<_Tp, 8>::type;
1090template <typename _Tp>
1091 using __intrinsic_type16_t = typename __intrinsic_type<_Tp, 16>::type;
1092template <typename _Tp>
1093 using __intrinsic_type32_t = typename __intrinsic_type<_Tp, 32>::type;
1094template <typename _Tp>
1095 using __intrinsic_type64_t = typename __intrinsic_type<_Tp, 64>::type;
1096
1097// }}}
1098// _BitMask {{{
1099template <size_t _Np, bool _Sanitized = false>
1100 struct _BitMask;
1101
1102template <size_t _Np, bool _Sanitized>
1103 struct __is_bitmask<_BitMask<_Np, _Sanitized>, void> : true_type {};
1104
1105template <size_t _Np>
1106 using _SanitizedBitMask = _BitMask<_Np, true>;
1107
1108template <size_t _Np, bool _Sanitized>
1109 struct _BitMask
1110 {
1111 static_assert(_Np > 0);
1112
1113 static constexpr size_t _NBytes = __div_roundup(_Np, __CHAR_BIT__);
1114
1115 using _Tp = conditional_t<_Np == 1, bool,
1116 make_unsigned_t<__int_with_sizeof_t<std::min(
1117 sizeof(_ULLong), std::__bit_ceil(_NBytes))>>>;
1118
1119 static constexpr int _S_array_size = __div_roundup(_NBytes, sizeof(_Tp));
1120
1121 _Tp _M_bits[_S_array_size];
1122
1123 static constexpr int _S_unused_bits
1124 = _Np == 1 ? 0 : _S_array_size * sizeof(_Tp) * __CHAR_BIT__ - _Np;
1125
1126 static constexpr _Tp _S_bitmask = +_Tp(~_Tp()) >> _S_unused_bits;
1127
1128 constexpr _BitMask() noexcept = default;
1129
1130 constexpr _BitMask(unsigned long long __x) noexcept
1131 : _M_bits{static_cast<_Tp>(__x)} {}
1132
1133 _BitMask(bitset<_Np> __x) noexcept : _BitMask(__x.to_ullong()) {}
1134
1135 constexpr _BitMask(const _BitMask&) noexcept = default;
1136
1137 template <bool _RhsSanitized, typename = enable_if_t<_RhsSanitized == false
1138 && _Sanitized == true>>
1139 constexpr _BitMask(const _BitMask<_Np, _RhsSanitized>& __rhs) noexcept
1140 : _BitMask(__rhs._M_sanitized()) {}
1141
1142 constexpr operator _SimdWrapper<bool, _Np>() const noexcept
1143 {
1144 static_assert(_S_array_size == 1);
1145 return _M_bits[0];
1146 }
1147
1148 // precondition: is sanitized
1149 constexpr _Tp
1150 _M_to_bits() const noexcept
1151 {
1152 static_assert(_S_array_size == 1);
1153 return _M_bits[0];
1154 }
1155
1156 // precondition: is sanitized
1157 constexpr unsigned long long
1158 to_ullong() const noexcept
1159 {
1160 static_assert(_S_array_size == 1);
1161 return _M_bits[0];
1162 }
1163
1164 // precondition: is sanitized
1165 constexpr unsigned long
1166 to_ulong() const noexcept
1167 {
1168 static_assert(_S_array_size == 1);
1169 return _M_bits[0];
1170 }
1171
1172 constexpr bitset<_Np>
1173 _M_to_bitset() const noexcept
1174 {
1175 static_assert(_S_array_size == 1);
1176 return _M_bits[0];
1177 }
1178
1179 constexpr decltype(auto)
1180 _M_sanitized() const noexcept
1181 {
1182 if constexpr (_Sanitized)
1183 return *this;
1184 else if constexpr (_Np == 1)
1185 return _SanitizedBitMask<_Np>(_M_bits[0]);
1186 else
1187 {
1188 _SanitizedBitMask<_Np> __r = {};
1189 for (int __i = 0; __i < _S_array_size; ++__i)
1190 __r._M_bits[__i] = _M_bits[__i];
1191 if constexpr (_S_unused_bits > 0)
1192 __r._M_bits[_S_array_size - 1] &= _S_bitmask;
1193 return __r;
1194 }
1195 }
1196
1197 template <size_t _Mp, bool _LSanitized>
1198 constexpr _BitMask<_Np + _Mp, _Sanitized>
1199 _M_prepend(_BitMask<_Mp, _LSanitized> __lsb) const noexcept
1200 {
1201 constexpr size_t _RN = _Np + _Mp;
1202 using _Rp = _BitMask<_RN, _Sanitized>;
1203 if constexpr (_Rp::_S_array_size == 1)
1204 {
1205 _Rp __r{{_M_bits[0]}};
1206 __r._M_bits[0] <<= _Mp;
1207 __r._M_bits[0] |= __lsb._M_sanitized()._M_bits[0];
1208 return __r;
1209 }
1210 else
1211 __assert_unreachable<_Rp>();
1212 }
1213
1214 // Return a new _BitMask with size _NewSize while dropping _DropLsb least
1215 // significant bits. If the operation implicitly produces a sanitized bitmask,
1216 // the result type will have _Sanitized set.
1217 template <size_t _DropLsb, size_t _NewSize = _Np - _DropLsb>
1218 constexpr auto
1219 _M_extract() const noexcept
1220 {
1221 static_assert(_Np > _DropLsb);
1222 static_assert(_DropLsb + _NewSize <= sizeof(_ULLong) * __CHAR_BIT__,
1223 "not implemented for bitmasks larger than one ullong");
1224 if constexpr (_NewSize == 1)
1225 // must sanitize because the return _Tp is bool
1226 return _SanitizedBitMask<1>(_M_bits[0] & (_Tp(1) << _DropLsb));
1227 else
1228 return _BitMask<_NewSize,
1229 ((_NewSize + _DropLsb == sizeof(_Tp) * __CHAR_BIT__
1230 && _NewSize + _DropLsb <= _Np)
1231 || ((_Sanitized || _Np == sizeof(_Tp) * __CHAR_BIT__)
1232 && _NewSize + _DropLsb >= _Np))>(_M_bits[0]
1233 >> _DropLsb);
1234 }
1235
1236 // True if all bits are set. Implicitly sanitizes if _Sanitized == false.
1237 constexpr bool
1238 all() const noexcept
1239 {
1240 if constexpr (_Np == 1)
1241 return _M_bits[0];
1242 else if constexpr (!_Sanitized)
1243 return _M_sanitized().all();
1244 else
1245 {
1246 constexpr _Tp __allbits = ~_Tp();
1247 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1248 if (_M_bits[__i] != __allbits)
1249 return false;
1250 return _M_bits[_S_array_size - 1] == _S_bitmask;
1251 }
1252 }
1253
1254 // True if at least one bit is set. Implicitly sanitizes if _Sanitized ==
1255 // false.
1256 constexpr bool
1257 any() const noexcept
1258 {
1259 if constexpr (_Np == 1)
1260 return _M_bits[0];
1261 else if constexpr (!_Sanitized)
1262 return _M_sanitized().any();
1263 else
1264 {
1265 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1266 if (_M_bits[__i] != 0)
1267 return true;
1268 return _M_bits[_S_array_size - 1] != 0;
1269 }
1270 }
1271
1272 // True if no bit is set. Implicitly sanitizes if _Sanitized == false.
1273 constexpr bool
1274 none() const noexcept
1275 {
1276 if constexpr (_Np == 1)
1277 return !_M_bits[0];
1278 else if constexpr (!_Sanitized)
1279 return _M_sanitized().none();
1280 else
1281 {
1282 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1283 if (_M_bits[__i] != 0)
1284 return false;
1285 return _M_bits[_S_array_size - 1] == 0;
1286 }
1287 }
1288
1289 // Returns the number of set bits. Implicitly sanitizes if _Sanitized ==
1290 // false.
1291 constexpr int
1292 count() const noexcept
1293 {
1294 if constexpr (_Np == 1)
1295 return _M_bits[0];
1296 else if constexpr (!_Sanitized)
1297 return _M_sanitized().none();
1298 else
1299 {
1300 int __result = __builtin_popcountll(_M_bits[0]);
1301 for (int __i = 1; __i < _S_array_size; ++__i)
1302 __result += __builtin_popcountll(_M_bits[__i]);
1303 return __result;
1304 }
1305 }
1306
1307 // Returns the bit at offset __i as bool.
1308 constexpr bool
1309 operator[](size_t __i) const noexcept
1310 {
1311 if constexpr (_Np == 1)
1312 return _M_bits[0];
1313 else if constexpr (_S_array_size == 1)
1314 return (_M_bits[0] >> __i) & 1;
1315 else
1316 {
1317 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1318 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1319 return (_M_bits[__j] >> __shift) & 1;
1320 }
1321 }
1322
1323 template <size_t __i>
1324 constexpr bool
1325 operator[](_SizeConstant<__i>) const noexcept
1326 {
1327 static_assert(__i < _Np);
1328 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1329 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1330 return static_cast<bool>(_M_bits[__j] & (_Tp(1) << __shift));
1331 }
1332
1333 // Set the bit at offset __i to __x.
1334 constexpr void
1335 set(size_t __i, bool __x) noexcept
1336 {
1337 if constexpr (_Np == 1)
1338 _M_bits[0] = __x;
1339 else if constexpr (_S_array_size == 1)
1340 {
1341 _M_bits[0] &= ~_Tp(_Tp(1) << __i);
1342 _M_bits[0] |= _Tp(_Tp(__x) << __i);
1343 }
1344 else
1345 {
1346 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1347 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1348 _M_bits[__j] &= ~_Tp(_Tp(1) << __shift);
1349 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1350 }
1351 }
1352
1353 template <size_t __i>
1354 constexpr void
1355 set(_SizeConstant<__i>, bool __x) noexcept
1356 {
1357 static_assert(__i < _Np);
1358 if constexpr (_Np == 1)
1359 _M_bits[0] = __x;
1360 else
1361 {
1362 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1363 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1364 constexpr _Tp __mask = ~_Tp(_Tp(1) << __shift);
1365 _M_bits[__j] &= __mask;
1366 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1367 }
1368 }
1369
1370 // Inverts all bits. Sanitized input leads to sanitized output.
1371 constexpr _BitMask
1372 operator~() const noexcept
1373 {
1374 if constexpr (_Np == 1)
1375 return !_M_bits[0];
1376 else
1377 {
1378 _BitMask __result{};
1379 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1380 __result._M_bits[__i] = ~_M_bits[__i];
1381 if constexpr (_Sanitized)
1382 __result._M_bits[_S_array_size - 1]
1383 = _M_bits[_S_array_size - 1] ^ _S_bitmask;
1384 else
1385 __result._M_bits[_S_array_size - 1] = ~_M_bits[_S_array_size - 1];
1386 return __result;
1387 }
1388 }
1389
1390 constexpr _BitMask&
1391 operator^=(const _BitMask& __b) & noexcept
1392 {
1393 __execute_n_times<_S_array_size>(
1394 [&](auto __i) { _M_bits[__i] ^= __b._M_bits[__i]; });
1395 return *this;
1396 }
1397
1398 constexpr _BitMask&
1399 operator|=(const _BitMask& __b) & noexcept
1400 {
1401 __execute_n_times<_S_array_size>(
1402 [&](auto __i) { _M_bits[__i] |= __b._M_bits[__i]; });
1403 return *this;
1404 }
1405
1406 constexpr _BitMask&
1407 operator&=(const _BitMask& __b) & noexcept
1408 {
1409 __execute_n_times<_S_array_size>(
1410 [&](auto __i) { _M_bits[__i] &= __b._M_bits[__i]; });
1411 return *this;
1412 }
1413
1414 friend constexpr _BitMask
1415 operator^(const _BitMask& __a, const _BitMask& __b) noexcept
1416 {
1417 _BitMask __r = __a;
1418 __r ^= __b;
1419 return __r;
1420 }
1421
1422 friend constexpr _BitMask
1423 operator|(const _BitMask& __a, const _BitMask& __b) noexcept
1424 {
1425 _BitMask __r = __a;
1426 __r |= __b;
1427 return __r;
1428 }
1429
1430 friend constexpr _BitMask
1431 operator&(const _BitMask& __a, const _BitMask& __b) noexcept
1432 {
1433 _BitMask __r = __a;
1434 __r &= __b;
1435 return __r;
1436 }
1437
1438 _GLIBCXX_SIMD_INTRINSIC
1439 constexpr bool
1440 _M_is_constprop() const
1441 {
1442 if constexpr (_S_array_size == 0)
1443 return __builtin_constant_p(_M_bits[0]);
1444 else
1445 {
1446 for (int __i = 0; __i < _S_array_size; ++__i)
1447 if (!__builtin_constant_p(_M_bits[__i]))
1448 return false;
1449 return true;
1450 }
1451 }
1452 };
1453
1454// }}}
1455
1456// vvv ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- vvv
1457// __min_vector_size {{{
1458template <typename _Tp = void>
1459 static inline constexpr int __min_vector_size = 2 * sizeof(_Tp);
1460
1461#if _GLIBCXX_SIMD_HAVE_NEON
1462template <>
1463 inline constexpr int __min_vector_size<void> = 8;
1464#else
1465template <>
1466 inline constexpr int __min_vector_size<void> = 16;
1467#endif
1468
1469// }}}
1470// __vector_type {{{
1471template <typename _Tp, size_t _Np, typename = void>
1472 struct __vector_type_n {};
1473
1474// substition failure for 0-element case
1475template <typename _Tp>
1476 struct __vector_type_n<_Tp, 0, void> {};
1477
1478// special case 1-element to be _Tp itself
1479template <typename _Tp>
1480 struct __vector_type_n<_Tp, 1, enable_if_t<__is_vectorizable_v<_Tp>>>
1481 { using type = _Tp; };
1482
1483// else, use GNU-style builtin vector types
1484template <typename _Tp, size_t _Np>
1485 struct __vector_type_n<_Tp, _Np,
1486 enable_if_t<__is_vectorizable_v<_Tp> && _Np >= 2>>
1487 {
1488 static constexpr size_t _S_Np2 = std::__bit_ceil(_Np * sizeof(_Tp));
1489
1490 static constexpr size_t _S_Bytes =
1491#ifdef __i386__
1492 // Using [[gnu::vector_size(8)]] would wreak havoc on the FPU because
1493 // those objects are passed via MMX registers and nothing ever calls EMMS.
1494 _S_Np2 == 8 ? 16 :
1495#endif
1496 _S_Np2 < __min_vector_size<_Tp> ? __min_vector_size<_Tp>
1497 : _S_Np2;
1498
1499 using type [[__gnu__::__vector_size__(_S_Bytes)]] = _Tp;
1500 };
1501
1502template <typename _Tp, size_t _Bytes, size_t = _Bytes % sizeof(_Tp)>
1503 struct __vector_type;
1504
1505template <typename _Tp, size_t _Bytes>
1506 struct __vector_type<_Tp, _Bytes, 0>
1507 : __vector_type_n<_Tp, _Bytes / sizeof(_Tp)> {};
1508
1509template <typename _Tp, size_t _Size>
1510 using __vector_type_t = typename __vector_type_n<_Tp, _Size>::type;
1511
1512template <typename _Tp>
1513 using __vector_type2_t = typename __vector_type<_Tp, 2>::type;
1514template <typename _Tp>
1515 using __vector_type4_t = typename __vector_type<_Tp, 4>::type;
1516template <typename _Tp>
1517 using __vector_type8_t = typename __vector_type<_Tp, 8>::type;
1518template <typename _Tp>
1519 using __vector_type16_t = typename __vector_type<_Tp, 16>::type;
1520template <typename _Tp>
1521 using __vector_type32_t = typename __vector_type<_Tp, 32>::type;
1522template <typename _Tp>
1523 using __vector_type64_t = typename __vector_type<_Tp, 64>::type;
1524
1525// }}}
1526// __is_vector_type {{{
1527template <typename _Tp, typename = void_t<>>
1528 struct __is_vector_type : false_type {};
1529
1530template <typename _Tp>
1531 struct __is_vector_type<
1532 _Tp, void_t<typename __vector_type<
1533 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1534 : is_same<_Tp, typename __vector_type<
1535 remove_reference_t<decltype(declval<_Tp>()[0])>,
1536 sizeof(_Tp)>::type> {};
1537
1538template <typename _Tp>
1539 inline constexpr bool __is_vector_type_v = __is_vector_type<_Tp>::value;
1540
1541// }}}
1542// __is_intrinsic_type {{{
1543#if _GLIBCXX_SIMD_HAVE_SSE_ABI
1544template <typename _Tp>
1545 using __is_intrinsic_type = __is_vector_type<_Tp>;
1546#else // not SSE (x86)
1547template <typename _Tp, typename = void_t<>>
1548 struct __is_intrinsic_type : false_type {};
1549
1550template <typename _Tp>
1551 struct __is_intrinsic_type<
1552 _Tp, void_t<typename __intrinsic_type<
1553 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1554 : is_same<_Tp, typename __intrinsic_type<
1555 remove_reference_t<decltype(declval<_Tp>()[0])>,
1556 sizeof(_Tp)>::type> {};
1557#endif
1558
1559template <typename _Tp>
1560 inline constexpr bool __is_intrinsic_type_v = __is_intrinsic_type<_Tp>::value;
1561
1562// }}}
1563// _VectorTraits{{{
1564template <typename _Tp, typename = void_t<>>
1565 struct _VectorTraitsImpl;
1566
1567template <typename _Tp>
1568 struct _VectorTraitsImpl<_Tp, enable_if_t<__is_vector_type_v<_Tp>
1569 || __is_intrinsic_type_v<_Tp>>>
1570 {
1571 using type = _Tp;
1572 using value_type = remove_reference_t<decltype(declval<_Tp>()[0])>;
1573 static constexpr int _S_full_size = sizeof(_Tp) / sizeof(value_type);
1574 using _Wrapper = _SimdWrapper<value_type, _S_full_size>;
1575 template <typename _Up, int _W = _S_full_size>
1576 static constexpr bool _S_is
1577 = is_same_v<value_type, _Up> && _W == _S_full_size;
1578 };
1579
1580template <typename _Tp, size_t _Np>
1581 struct _VectorTraitsImpl<_SimdWrapper<_Tp, _Np>,
1582 void_t<__vector_type_t<_Tp, _Np>>>
1583 {
1584 using type = __vector_type_t<_Tp, _Np>;
1585 using value_type = _Tp;
1586 static constexpr int _S_full_size = sizeof(type) / sizeof(value_type);
1587 using _Wrapper = _SimdWrapper<_Tp, _Np>;
1588 static constexpr bool _S_is_partial = (_Np == _S_full_size);
1589 static constexpr int _S_partial_width = _Np;
1590 template <typename _Up, int _W = _S_full_size>
1591 static constexpr bool _S_is
1592 = is_same_v<value_type, _Up>&& _W == _S_full_size;
1593 };
1594
1595template <typename _Tp, typename = typename _VectorTraitsImpl<_Tp>::type>
1596 using _VectorTraits = _VectorTraitsImpl<_Tp>;
1597
1598// }}}
1599// __as_vector{{{
1600template <typename _V>
1601 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1602 __as_vector(_V __x)
1603 {
1604 if constexpr (__is_vector_type_v<_V>)
1605 return __x;
1606 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1607 return __data(__x)._M_data;
1608 else if constexpr (__is_vectorizable_v<_V>)
1609 return __vector_type_t<_V, 2>{__x};
1610 else
1611 return __x._M_data;
1612 }
1613
1614// }}}
1615// __as_wrapper{{{
1616template <size_t _Np = 0, typename _V>
1617 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1618 __as_wrapper(_V __x)
1619 {
1620 if constexpr (__is_vector_type_v<_V>)
1621 return _SimdWrapper<typename _VectorTraits<_V>::value_type,
1622 (_Np > 0 ? _Np : _VectorTraits<_V>::_S_full_size)>(__x);
1623 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1624 {
1625 static_assert(_V::size() == _Np);
1626 return __data(__x);
1627 }
1628 else
1629 {
1630 static_assert(_V::_S_size == _Np);
1631 return __x;
1632 }
1633 }
1634
1635// }}}
1636// __intrin_bitcast{{{
1637template <typename _To, typename _From>
1638 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1639 __intrin_bitcast(_From __v)
1640 {
1641 static_assert((__is_vector_type_v<_From> || __is_intrinsic_type_v<_From>)
1642 && (__is_vector_type_v<_To> || __is_intrinsic_type_v<_To>));
1643 if constexpr (sizeof(_To) == sizeof(_From))
1644 return reinterpret_cast<_To>(__v);
1645 else if constexpr (sizeof(_From) > sizeof(_To))
1646 if constexpr (sizeof(_To) >= 16)
1647 return reinterpret_cast<const __may_alias<_To>&>(__v);
1648 else
1649 {
1650 _To __r;
1651 __builtin_memcpy(&__r, &__v, sizeof(_To));
1652 return __r;
1653 }
1654#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1655 else if constexpr (__have_avx && sizeof(_From) == 16 && sizeof(_To) == 32)
1656 return reinterpret_cast<_To>(__builtin_ia32_ps256_ps(
1657 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1658 else if constexpr (__have_avx512f && sizeof(_From) == 16
1659 && sizeof(_To) == 64)
1660 return reinterpret_cast<_To>(__builtin_ia32_ps512_ps(
1661 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1662 else if constexpr (__have_avx512f && sizeof(_From) == 32
1663 && sizeof(_To) == 64)
1664 return reinterpret_cast<_To>(__builtin_ia32_ps512_256ps(
1665 reinterpret_cast<__vector_type_t<float, 8>>(__v)));
1666#endif // _GLIBCXX_SIMD_X86INTRIN
1667 else if constexpr (sizeof(__v) <= 8)
1668 return reinterpret_cast<_To>(
1669 __vector_type_t<__int_for_sizeof_t<_From>, sizeof(_To) / sizeof(_From)>{
1670 reinterpret_cast<__int_for_sizeof_t<_From>>(__v)});
1671 else
1672 {
1673 static_assert(sizeof(_To) > sizeof(_From));
1674 _To __r = {};
1675 __builtin_memcpy(&__r, &__v, sizeof(_From));
1676 return __r;
1677 }
1678 }
1679
1680// }}}
1681// __vector_bitcast{{{
1682template <typename _To, size_t _NN = 0, typename _From,
1683 typename _FromVT = _VectorTraits<_From>,
1684 size_t _Np = _NN == 0 ? sizeof(_From) / sizeof(_To) : _NN>
1685 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1686 __vector_bitcast(_From __x)
1687 {
1688 using _R = __vector_type_t<_To, _Np>;
1689 return __intrin_bitcast<_R>(__x);
1690 }
1691
1692template <typename _To, size_t _NN = 0, typename _Tp, size_t _Nx,
1693 size_t _Np
1694 = _NN == 0 ? sizeof(_SimdWrapper<_Tp, _Nx>) / sizeof(_To) : _NN>
1695 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1696 __vector_bitcast(const _SimdWrapper<_Tp, _Nx>& __x)
1697 {
1698 static_assert(_Np > 1);
1699 return __intrin_bitcast<__vector_type_t<_To, _Np>>(__x._M_data);
1700 }
1701
1702// }}}
1703// __convert_x86 declarations {{{
1704#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
1705template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1706 _To __convert_x86(_Tp);
1707
1708template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1709 _To __convert_x86(_Tp, _Tp);
1710
1711template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1712 _To __convert_x86(_Tp, _Tp, _Tp, _Tp);
1713
1714template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1715 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp);
1716
1717template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1718 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp,
1719 _Tp, _Tp, _Tp, _Tp);
1720#endif // _GLIBCXX_SIMD_WORKAROUND_PR85048
1721
1722//}}}
1723// __bit_cast {{{
1724template <typename _To, typename _From>
1725 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1726 __bit_cast(const _From __x)
1727 {
1728#if __has_builtin(__builtin_bit_cast)
1729 return __builtin_bit_cast(_To, __x);
1730#else
1731 static_assert(sizeof(_To) == sizeof(_From));
1732 constexpr bool __to_is_vectorizable
1733 = is_arithmetic_v<_To> || is_enum_v<_To>;
1734 constexpr bool __from_is_vectorizable
1735 = is_arithmetic_v<_From> || is_enum_v<_From>;
1736 if constexpr (__is_vector_type_v<_To> && __is_vector_type_v<_From>)
1737 return reinterpret_cast<_To>(__x);
1738 else if constexpr (__is_vector_type_v<_To> && __from_is_vectorizable)
1739 {
1740 using _FV [[gnu::vector_size(sizeof(_From))]] = _From;
1741 return reinterpret_cast<_To>(_FV{__x});
1742 }
1743 else if constexpr (__to_is_vectorizable && __from_is_vectorizable)
1744 {
1745 using _TV [[gnu::vector_size(sizeof(_To))]] = _To;
1746 using _FV [[gnu::vector_size(sizeof(_From))]] = _From;
1747 return reinterpret_cast<_TV>(_FV{__x})[0];
1748 }
1749 else if constexpr (__to_is_vectorizable && __is_vector_type_v<_From>)
1750 {
1751 using _TV [[gnu::vector_size(sizeof(_To))]] = _To;
1752 return reinterpret_cast<_TV>(__x)[0];
1753 }
1754 else
1755 {
1756 _To __r;
1757 __builtin_memcpy(reinterpret_cast<char*>(&__r),
1758 reinterpret_cast<const char*>(&__x), sizeof(_To));
1759 return __r;
1760 }
1761#endif
1762 }
1763
1764// }}}
1765// __to_intrin {{{
1766template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
1767 typename _R
1768 = __intrinsic_type_t<typename _TVT::value_type, _TVT::_S_full_size>>
1769 _GLIBCXX_SIMD_INTRINSIC constexpr _R
1770 __to_intrin(_Tp __x)
1771 {
1772 static_assert(sizeof(__x) <= sizeof(_R),
1773 "__to_intrin may never drop values off the end");
1774 if constexpr (sizeof(__x) == sizeof(_R))
1775 return reinterpret_cast<_R>(__as_vector(__x));
1776 else
1777 {
1778 using _Up = __int_for_sizeof_t<_Tp>;
1779 return reinterpret_cast<_R>(
1780 __vector_type_t<_Up, sizeof(_R) / sizeof(_Up)>{__bit_cast<_Up>(__x)});
1781 }
1782 }
1783
1784// }}}
1785// __make_vector{{{
1786template <typename _Tp, typename... _Args>
1787 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, sizeof...(_Args)>
1788 __make_vector(const _Args&... __args)
1789 {
1790 return __vector_type_t<_Tp, sizeof...(_Args)>{static_cast<_Tp>(__args)...};
1791 }
1792
1793// }}}
1794// __vector_broadcast{{{
1795template <size_t _Np, typename _Tp>
1796 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1797 __vector_broadcast(_Tp __x)
1798 {
1799 return __call_with_n_evaluations<_Np>(
1800 [](auto... __xx) { return __vector_type_t<_Tp, _Np>{__xx...}; },
1801 [&__x](int) { return __x; });
1802 }
1803
1804// }}}
1805// __generate_vector{{{
1806 template <typename _Tp, size_t _Np, typename _Gp, size_t... _I>
1807 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1808 __generate_vector_impl(_Gp&& __gen, index_sequence<_I...>)
1809 {
1810 return __vector_type_t<_Tp, _Np>{
1811 static_cast<_Tp>(__gen(_SizeConstant<_I>()))...};
1812 }
1813
1814template <typename _V, typename _VVT = _VectorTraits<_V>, typename _Gp>
1815 _GLIBCXX_SIMD_INTRINSIC constexpr _V
1816 __generate_vector(_Gp&& __gen)
1817 {
1818 if constexpr (__is_vector_type_v<_V>)
1819 return __generate_vector_impl<typename _VVT::value_type,
1820 _VVT::_S_full_size>(
1821 static_cast<_Gp&&>(__gen), make_index_sequence<_VVT::_S_full_size>());
1822 else
1823 return __generate_vector_impl<typename _VVT::value_type,
1824 _VVT::_S_partial_width>(
1825 static_cast<_Gp&&>(__gen),
1826 make_index_sequence<_VVT::_S_partial_width>());
1827 }
1828
1829template <typename _Tp, size_t _Np, typename _Gp>
1830 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1831 __generate_vector(_Gp&& __gen)
1832 {
1833 return __generate_vector_impl<_Tp, _Np>(static_cast<_Gp&&>(__gen),
1834 make_index_sequence<_Np>());
1835 }
1836
1837// }}}
1838// __xor{{{
1839template <typename _TW>
1840 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1841 __xor(_TW __a, _TW __b) noexcept
1842 {
1843 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1844 {
1845 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1846 _VectorTraitsImpl<_TW>>::value_type;
1847 if constexpr (is_floating_point_v<_Tp>)
1848 {
1849 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1850 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1851 ^ __vector_bitcast<_Ip>(__b));
1852 }
1853 else if constexpr (__is_vector_type_v<_TW>)
1854 return __a ^ __b;
1855 else
1856 return __a._M_data ^ __b._M_data;
1857 }
1858 else
1859 return __a ^ __b;
1860 }
1861
1862// }}}
1863// __or{{{
1864template <typename _TW>
1865 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1866 __or(_TW __a, _TW __b) noexcept
1867 {
1868 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1869 {
1870 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1871 _VectorTraitsImpl<_TW>>::value_type;
1872 if constexpr (is_floating_point_v<_Tp>)
1873 {
1874 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1875 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1876 | __vector_bitcast<_Ip>(__b));
1877 }
1878 else if constexpr (__is_vector_type_v<_TW>)
1879 return __a | __b;
1880 else
1881 return __a._M_data | __b._M_data;
1882 }
1883 else
1884 return __a | __b;
1885 }
1886
1887// }}}
1888// __and{{{
1889template <typename _TW>
1890 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1891 __and(_TW __a, _TW __b) noexcept
1892 {
1893 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1894 {
1895 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1896 _VectorTraitsImpl<_TW>>::value_type;
1897 if constexpr (is_floating_point_v<_Tp>)
1898 {
1899 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1900 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1901 & __vector_bitcast<_Ip>(__b));
1902 }
1903 else if constexpr (__is_vector_type_v<_TW>)
1904 return __a & __b;
1905 else
1906 return __a._M_data & __b._M_data;
1907 }
1908 else
1909 return __a & __b;
1910 }
1911
1912// }}}
1913// __andnot{{{
1914#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1915static constexpr struct
1916{
1917 _GLIBCXX_SIMD_INTRINSIC __v4sf
1918 operator()(__v4sf __a, __v4sf __b) const noexcept
1919 { return __builtin_ia32_andnps(__a, __b); }
1920
1921 _GLIBCXX_SIMD_INTRINSIC __v2df
1922 operator()(__v2df __a, __v2df __b) const noexcept
1923 { return __builtin_ia32_andnpd(__a, __b); }
1924
1925 _GLIBCXX_SIMD_INTRINSIC __v2di
1926 operator()(__v2di __a, __v2di __b) const noexcept
1927 { return __builtin_ia32_pandn128(__a, __b); }
1928
1929 _GLIBCXX_SIMD_INTRINSIC __v8sf
1930 operator()(__v8sf __a, __v8sf __b) const noexcept
1931 { return __builtin_ia32_andnps256(__a, __b); }
1932
1933 _GLIBCXX_SIMD_INTRINSIC __v4df
1934 operator()(__v4df __a, __v4df __b) const noexcept
1935 { return __builtin_ia32_andnpd256(__a, __b); }
1936
1937 _GLIBCXX_SIMD_INTRINSIC __v4di
1938 operator()(__v4di __a, __v4di __b) const noexcept
1939 {
1940 if constexpr (__have_avx2)
1941 return __builtin_ia32_andnotsi256(__a, __b);
1942 else
1943 return reinterpret_cast<__v4di>(
1944 __builtin_ia32_andnpd256(reinterpret_cast<__v4df>(__a),
1945 reinterpret_cast<__v4df>(__b)));
1946 }
1947
1948 _GLIBCXX_SIMD_INTRINSIC __v16sf
1949 operator()(__v16sf __a, __v16sf __b) const noexcept
1950 {
1951 if constexpr (__have_avx512dq)
1952 return _mm512_andnot_ps(__a, __b);
1953 else
1954 return reinterpret_cast<__v16sf>(
1955 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
1956 reinterpret_cast<__v8di>(__b)));
1957 }
1958
1959 _GLIBCXX_SIMD_INTRINSIC __v8df
1960 operator()(__v8df __a, __v8df __b) const noexcept
1961 {
1962 if constexpr (__have_avx512dq)
1963 return _mm512_andnot_pd(__a, __b);
1964 else
1965 return reinterpret_cast<__v8df>(
1966 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
1967 reinterpret_cast<__v8di>(__b)));
1968 }
1969
1970 _GLIBCXX_SIMD_INTRINSIC __v8di
1971 operator()(__v8di __a, __v8di __b) const noexcept
1972 { return _mm512_andnot_si512(__a, __b); }
1973} _S_x86_andnot;
1974#endif // _GLIBCXX_SIMD_X86INTRIN && !__clang__
1975
1976template <typename _TW>
1977 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1978 __andnot(_TW __a, _TW __b) noexcept
1979 {
1980 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1981 {
1982 using _TVT = conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1983 _VectorTraitsImpl<_TW>>;
1984 using _Tp = typename _TVT::value_type;
1985#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1986 if constexpr (sizeof(_TW) >= 16)
1987 {
1988 const auto __ai = __to_intrin(__a);
1989 const auto __bi = __to_intrin(__b);
1990 if (!__builtin_is_constant_evaluated()
1991 && !(__builtin_constant_p(__ai) && __builtin_constant_p(__bi)))
1992 {
1993 const auto __r = _S_x86_andnot(__ai, __bi);
1994 if constexpr (is_convertible_v<decltype(__r), _TW>)
1995 return __r;
1996 else
1997 return reinterpret_cast<typename _TVT::type>(__r);
1998 }
1999 }
2000#endif // _GLIBCXX_SIMD_X86INTRIN
2001 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2002 return __vector_bitcast<_Tp>(~__vector_bitcast<_Ip>(__a)
2003 & __vector_bitcast<_Ip>(__b));
2004 }
2005 else
2006 return ~__a & __b;
2007 }
2008
2009// }}}
2010// __not{{{
2011template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2012 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
2013 __not(_Tp __a) noexcept
2014 {
2015 if constexpr (is_floating_point_v<typename _TVT::value_type>)
2016 return reinterpret_cast<typename _TVT::type>(
2017 ~__vector_bitcast<unsigned>(__a));
2018 else
2019 return ~__a;
2020 }
2021
2022// }}}
2023// __concat{{{
2024template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
2025 typename _R = __vector_type_t<typename _TVT::value_type,
2026 _TVT::_S_full_size * 2>>
2027 constexpr _R
2028 __concat(_Tp a_, _Tp b_)
2029 {
2030#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2031 using _W
2032 = conditional_t<is_floating_point_v<typename _TVT::value_type>, double,
2033 conditional_t<(sizeof(_Tp) >= 2 * sizeof(long long)),
2034 long long, typename _TVT::value_type>>;
2035 constexpr int input_width = sizeof(_Tp) / sizeof(_W);
2036 const auto __a = __vector_bitcast<_W>(a_);
2037 const auto __b = __vector_bitcast<_W>(b_);
2038 using _Up = __vector_type_t<_W, sizeof(_R) / sizeof(_W)>;
2039#else
2040 constexpr int input_width = _TVT::_S_full_size;
2041 const _Tp& __a = a_;
2042 const _Tp& __b = b_;
2043 using _Up = _R;
2044#endif
2045 if constexpr (input_width == 2)
2046 return reinterpret_cast<_R>(_Up{__a[0], __a[1], __b[0], __b[1]});
2047 else if constexpr (input_width == 4)
2048 return reinterpret_cast<_R>(
2049 _Up{__a[0], __a[1], __a[2], __a[3], __b[0], __b[1], __b[2], __b[3]});
2050 else if constexpr (input_width == 8)
2051 return reinterpret_cast<_R>(
2052 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6], __a[7],
2053 __b[0], __b[1], __b[2], __b[3], __b[4], __b[5], __b[6], __b[7]});
2054 else if constexpr (input_width == 16)
2055 return reinterpret_cast<_R>(
2056 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2057 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2058 __a[14], __a[15], __b[0], __b[1], __b[2], __b[3], __b[4],
2059 __b[5], __b[6], __b[7], __b[8], __b[9], __b[10], __b[11],
2060 __b[12], __b[13], __b[14], __b[15]});
2061 else if constexpr (input_width == 32)
2062 return reinterpret_cast<_R>(
2063 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2064 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2065 __a[14], __a[15], __a[16], __a[17], __a[18], __a[19], __a[20],
2066 __a[21], __a[22], __a[23], __a[24], __a[25], __a[26], __a[27],
2067 __a[28], __a[29], __a[30], __a[31], __b[0], __b[1], __b[2],
2068 __b[3], __b[4], __b[5], __b[6], __b[7], __b[8], __b[9],
2069 __b[10], __b[11], __b[12], __b[13], __b[14], __b[15], __b[16],
2070 __b[17], __b[18], __b[19], __b[20], __b[21], __b[22], __b[23],
2071 __b[24], __b[25], __b[26], __b[27], __b[28], __b[29], __b[30],
2072 __b[31]});
2073 }
2074
2075// }}}
2076// __zero_extend {{{
2077template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2078 struct _ZeroExtendProxy
2079 {
2080 using value_type = typename _TVT::value_type;
2081 static constexpr size_t _Np = _TVT::_S_full_size;
2082 const _Tp __x;
2083
2084 template <typename _To, typename _ToVT = _VectorTraits<_To>,
2085 typename
2086 = enable_if_t<is_same_v<typename _ToVT::value_type, value_type>>>
2087 _GLIBCXX_SIMD_INTRINSIC operator _To() const
2088 {
2089 constexpr size_t _ToN = _ToVT::_S_full_size;
2090 if constexpr (_ToN == _Np)
2091 return __x;
2092 else if constexpr (_ToN == 2 * _Np)
2093 {
2094#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2095 if constexpr (__have_avx && _TVT::template _S_is<float, 4>)
2096 return __vector_bitcast<value_type>(
2097 _mm256_insertf128_ps(__m256(), __x, 0));
2098 else if constexpr (__have_avx && _TVT::template _S_is<double, 2>)
2099 return __vector_bitcast<value_type>(
2100 _mm256_insertf128_pd(__m256d(), __x, 0));
2101 else if constexpr (__have_avx2 && _Np * sizeof(value_type) == 16)
2102 return __vector_bitcast<value_type>(
2103 _mm256_insertf128_si256(__m256i(), __to_intrin(__x), 0));
2104 else if constexpr (__have_avx512f && _TVT::template _S_is<float, 8>)
2105 {
2106 if constexpr (__have_avx512dq)
2107 return __vector_bitcast<value_type>(
2108 _mm512_insertf32x8(__m512(), __x, 0));
2109 else
2110 return reinterpret_cast<__m512>(
2111 _mm512_insertf64x4(__m512d(),
2112 reinterpret_cast<__m256d>(__x), 0));
2113 }
2114 else if constexpr (__have_avx512f
2115 && _TVT::template _S_is<double, 4>)
2116 return __vector_bitcast<value_type>(
2117 _mm512_insertf64x4(__m512d(), __x, 0));
2118 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 32)
2119 return __vector_bitcast<value_type>(
2120 _mm512_inserti64x4(__m512i(), __to_intrin(__x), 0));
2121#endif
2122 return __concat(__x, _Tp());
2123 }
2124 else if constexpr (_ToN == 4 * _Np)
2125 {
2126#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2127 if constexpr (__have_avx512dq && _TVT::template _S_is<double, 2>)
2128 {
2129 return __vector_bitcast<value_type>(
2130 _mm512_insertf64x2(__m512d(), __x, 0));
2131 }
2132 else if constexpr (__have_avx512f
2133 && is_floating_point_v<value_type>)
2134 {
2135 return __vector_bitcast<value_type>(
2136 _mm512_insertf32x4(__m512(), reinterpret_cast<__m128>(__x),
2137 0));
2138 }
2139 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 16)
2140 {
2141 return __vector_bitcast<value_type>(
2142 _mm512_inserti32x4(__m512i(), __to_intrin(__x), 0));
2143 }
2144#endif
2145 return __concat(__concat(__x, _Tp()),
2146 __vector_type_t<value_type, _Np * 2>());
2147 }
2148 else if constexpr (_ToN == 8 * _Np)
2149 return __concat(operator __vector_type_t<value_type, _Np * 4>(),
2150 __vector_type_t<value_type, _Np * 4>());
2151 else if constexpr (_ToN == 16 * _Np)
2152 return __concat(operator __vector_type_t<value_type, _Np * 8>(),
2153 __vector_type_t<value_type, _Np * 8>());
2154 else
2155 __assert_unreachable<_Tp>();
2156 }
2157 };
2158
2159template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2160 _GLIBCXX_SIMD_INTRINSIC _ZeroExtendProxy<_Tp, _TVT>
2161 __zero_extend(_Tp __x)
2162 { return {__x}; }
2163
2164// }}}
2165// __extract<_Np, By>{{{
2166template <int _Offset,
2167 int _SplitBy,
2168 typename _Tp,
2169 typename _TVT = _VectorTraits<_Tp>,
2170 typename _R = __vector_type_t<typename _TVT::value_type,
2171 _TVT::_S_full_size / _SplitBy>>
2172 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2173 __extract(_Tp __in)
2174 {
2175 using value_type = typename _TVT::value_type;
2176#if _GLIBCXX_SIMD_X86INTRIN // {{{
2177 if constexpr (sizeof(_Tp) == 64 && _SplitBy == 4 && _Offset > 0)
2178 {
2179 if constexpr (__have_avx512dq && is_same_v<double, value_type>)
2180 return _mm512_extractf64x2_pd(__to_intrin(__in), _Offset);
2181 else if constexpr (is_floating_point_v<value_type>)
2182 return __vector_bitcast<value_type>(
2183 _mm512_extractf32x4_ps(__intrin_bitcast<__m512>(__in), _Offset));
2184 else
2185 return reinterpret_cast<_R>(
2186 _mm512_extracti32x4_epi32(__intrin_bitcast<__m512i>(__in),
2187 _Offset));
2188 }
2189 else
2190#endif // _GLIBCXX_SIMD_X86INTRIN }}}
2191 {
2192#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2193 using _W = conditional_t<
2194 is_floating_point_v<value_type>, double,
2195 conditional_t<(sizeof(_R) >= 16), long long, value_type>>;
2196 static_assert(sizeof(_R) % sizeof(_W) == 0);
2197 constexpr int __return_width = sizeof(_R) / sizeof(_W);
2198 using _Up = __vector_type_t<_W, __return_width>;
2199 const auto __x = __vector_bitcast<_W>(__in);
2200#else
2201 constexpr int __return_width = _TVT::_S_full_size / _SplitBy;
2202 using _Up = _R;
2203 const __vector_type_t<value_type, _TVT::_S_full_size>& __x
2204 = __in; // only needed for _Tp = _SimdWrapper<value_type, _Np>
2205#endif
2206 constexpr int _O = _Offset * __return_width;
2207 return __call_with_subscripts<__return_width, _O>(
2208 __x, [](auto... __entries) {
2209 return reinterpret_cast<_R>(_Up{__entries...});
2210 });
2211 }
2212 }
2213
2214// }}}
2215// __lo/__hi64[z]{{{
2216template <typename _Tp,
2217 typename _R
2218 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2219 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2220 __lo64(_Tp __x)
2221 {
2222 _R __r{};
2223 __builtin_memcpy(&__r, &__x, 8);
2224 return __r;
2225 }
2226
2227template <typename _Tp,
2228 typename _R
2229 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2230 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2231 __hi64(_Tp __x)
2232 {
2233 static_assert(sizeof(_Tp) == 16, "use __hi64z if you meant it");
2234 _R __r{};
2235 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2236 return __r;
2237 }
2238
2239template <typename _Tp,
2240 typename _R
2241 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2242 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2243 __hi64z([[maybe_unused]] _Tp __x)
2244 {
2245 _R __r{};
2246 if constexpr (sizeof(_Tp) == 16)
2247 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2248 return __r;
2249 }
2250
2251// }}}
2252// __lo/__hi128{{{
2253template <typename _Tp>
2254 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2255 __lo128(_Tp __x)
2256 { return __extract<0, sizeof(_Tp) / 16>(__x); }
2257
2258template <typename _Tp>
2259 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2260 __hi128(_Tp __x)
2261 {
2262 static_assert(sizeof(__x) == 32);
2263 return __extract<1, 2>(__x);
2264 }
2265
2266// }}}
2267// __lo/__hi256{{{
2268template <typename _Tp>
2269 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2270 __lo256(_Tp __x)
2271 {
2272 static_assert(sizeof(__x) == 64);
2273 return __extract<0, 2>(__x);
2274 }
2275
2276template <typename _Tp>
2277 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2278 __hi256(_Tp __x)
2279 {
2280 static_assert(sizeof(__x) == 64);
2281 return __extract<1, 2>(__x);
2282 }
2283
2284// }}}
2285// __auto_bitcast{{{
2286template <typename _Tp>
2287 struct _AutoCast
2288 {
2289 static_assert(__is_vector_type_v<_Tp>);
2290
2291 const _Tp __x;
2292
2293 template <typename _Up, typename _UVT = _VectorTraits<_Up>>
2294 _GLIBCXX_SIMD_INTRINSIC constexpr operator _Up() const
2295 { return __intrin_bitcast<typename _UVT::type>(__x); }
2296 };
2297
2298template <typename _Tp>
2299 _GLIBCXX_SIMD_INTRINSIC constexpr _AutoCast<_Tp>
2300 __auto_bitcast(const _Tp& __x)
2301 { return {__x}; }
2302
2303template <typename _Tp, size_t _Np>
2304 _GLIBCXX_SIMD_INTRINSIC constexpr
2305 _AutoCast<typename _SimdWrapper<_Tp, _Np>::_BuiltinType>
2306 __auto_bitcast(const _SimdWrapper<_Tp, _Np>& __x)
2307 { return {__x._M_data}; }
2308
2309// }}}
2310// ^^^ ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- ^^^
2311
2312#if _GLIBCXX_SIMD_HAVE_SSE_ABI
2313// __bool_storage_member_type{{{
2314#if _GLIBCXX_SIMD_HAVE_AVX512F && _GLIBCXX_SIMD_X86INTRIN
2315template <size_t _Size>
2316 struct __bool_storage_member_type
2317 {
2318 static_assert((_Size & (_Size - 1)) != 0,
2319 "This trait may only be used for non-power-of-2 sizes. "
2320 "Power-of-2 sizes must be specialized.");
2321 using type =
2322 typename __bool_storage_member_type<std::__bit_ceil(_Size)>::type;
2323 };
2324
2325template <>
2326 struct __bool_storage_member_type<1> { using type = bool; };
2327
2328template <>
2329 struct __bool_storage_member_type<2> { using type = __mmask8; };
2330
2331template <>
2332 struct __bool_storage_member_type<4> { using type = __mmask8; };
2333
2334template <>
2335 struct __bool_storage_member_type<8> { using type = __mmask8; };
2336
2337template <>
2338 struct __bool_storage_member_type<16> { using type = __mmask16; };
2339
2340template <>
2341 struct __bool_storage_member_type<32> { using type = __mmask32; };
2342
2343template <>
2344 struct __bool_storage_member_type<64> { using type = __mmask64; };
2345#endif // _GLIBCXX_SIMD_HAVE_AVX512F
2346
2347// }}}
2348// __intrinsic_type (x86){{{
2349// the following excludes bool via __is_vectorizable
2350#if _GLIBCXX_SIMD_HAVE_SSE
2351template <typename _Tp, size_t _Bytes>
2352 struct __intrinsic_type<_Tp, _Bytes,
2353 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 64>>
2354 {
2355 static_assert(!is_same_v<_Tp, long double>,
2356 "no __intrinsic_type support for long double on x86");
2357
2358 static constexpr size_t _S_VBytes = _Bytes <= 16 ? 16
2359 : _Bytes <= 32 ? 32
2360 : 64;
2361
2362 using type [[__gnu__::__vector_size__(_S_VBytes)]]
2363 = conditional_t<is_integral_v<_Tp>, long long int, _Tp>;
2364 };
2365#endif // _GLIBCXX_SIMD_HAVE_SSE
2366
2367// }}}
2368#endif // _GLIBCXX_SIMD_HAVE_SSE_ABI
2369// __intrinsic_type (ARM){{{
2370#if _GLIBCXX_SIMD_HAVE_NEON
2371template <>
2372 struct __intrinsic_type<float, 8, void>
2373 { using type = float32x2_t; };
2374
2375template <>
2376 struct __intrinsic_type<float, 16, void>
2377 { using type = float32x4_t; };
2378
2379#if _GLIBCXX_SIMD_HAVE_NEON_A64
2380template <>
2381 struct __intrinsic_type<double, 8, void>
2382 { using type = float64x1_t; };
2383
2384template <>
2385 struct __intrinsic_type<double, 16, void>
2386 { using type = float64x2_t; };
2387#endif
2388
2389#define _GLIBCXX_SIMD_ARM_INTRIN(_Bits, _Np) \
2390template <> \
2391 struct __intrinsic_type<__int_with_sizeof_t<_Bits / 8>, \
2392 _Np * _Bits / 8, void> \
2393 { using type = int##_Bits##x##_Np##_t; }; \
2394template <> \
2395 struct __intrinsic_type<make_unsigned_t<__int_with_sizeof_t<_Bits / 8>>, \
2396 _Np * _Bits / 8, void> \
2397 { using type = uint##_Bits##x##_Np##_t; }
2398_GLIBCXX_SIMD_ARM_INTRIN(8, 8);
2399_GLIBCXX_SIMD_ARM_INTRIN(8, 16);
2400_GLIBCXX_SIMD_ARM_INTRIN(16, 4);
2401_GLIBCXX_SIMD_ARM_INTRIN(16, 8);
2402_GLIBCXX_SIMD_ARM_INTRIN(32, 2);
2403_GLIBCXX_SIMD_ARM_INTRIN(32, 4);
2404_GLIBCXX_SIMD_ARM_INTRIN(64, 1);
2405_GLIBCXX_SIMD_ARM_INTRIN(64, 2);
2406#undef _GLIBCXX_SIMD_ARM_INTRIN
2407
2408template <typename _Tp, size_t _Bytes>
2409 struct __intrinsic_type<_Tp, _Bytes,
2410 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2411 {
2412 static constexpr int _SVecBytes = _Bytes <= 8 ? 8 : 16;
2413 using _Ip = __int_for_sizeof_t<_Tp>;
2414 using _Up = conditional_t<
2415 is_floating_point_v<_Tp>, _Tp,
2416 conditional_t<is_unsigned_v<_Tp>, make_unsigned_t<_Ip>, _Ip>>;
2417 static_assert(!is_same_v<_Tp, _Up> || _SVecBytes != _Bytes,
2418 "should use explicit specialization above");
2419 using type = typename __intrinsic_type<_Up, _SVecBytes>::type;
2420 };
2421#endif // _GLIBCXX_SIMD_HAVE_NEON
2422
2423// }}}
2424// __intrinsic_type (PPC){{{
2425#ifdef __ALTIVEC__
2426template <typename _Tp>
2427 struct __intrinsic_type_impl;
2428
2429#define _GLIBCXX_SIMD_PPC_INTRIN(_Tp) \
2430 template <> \
2431 struct __intrinsic_type_impl<_Tp> { using type = __vector _Tp; }
2432_GLIBCXX_SIMD_PPC_INTRIN(float);
2433_GLIBCXX_SIMD_PPC_INTRIN(double);
2434_GLIBCXX_SIMD_PPC_INTRIN(signed char);
2435_GLIBCXX_SIMD_PPC_INTRIN(unsigned char);
2436_GLIBCXX_SIMD_PPC_INTRIN(signed short);
2437_GLIBCXX_SIMD_PPC_INTRIN(unsigned short);
2438_GLIBCXX_SIMD_PPC_INTRIN(signed int);
2439_GLIBCXX_SIMD_PPC_INTRIN(unsigned int);
2440_GLIBCXX_SIMD_PPC_INTRIN(signed long);
2441_GLIBCXX_SIMD_PPC_INTRIN(unsigned long);
2442_GLIBCXX_SIMD_PPC_INTRIN(signed long long);
2443_GLIBCXX_SIMD_PPC_INTRIN(unsigned long long);
2444#undef _GLIBCXX_SIMD_PPC_INTRIN
2445
2446template <typename _Tp, size_t _Bytes>
2447 struct __intrinsic_type<_Tp, _Bytes,
2448 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2449 {
2450 static constexpr bool _S_is_ldouble = is_same_v<_Tp, long double>;
2451 // allow _Tp == long double with -mlong-double-64
2452 static_assert(!(_S_is_ldouble && sizeof(long double) > sizeof(double)),
2453 "no __intrinsic_type support for long double on PPC");
2454#ifndef __VSX__
2455 static_assert(!is_same_v<_Tp, double>,
2456 "no __intrinsic_type support for double on PPC w/o VSX");
2457#endif
2458 using type =
2459 typename __intrinsic_type_impl<
2460 conditional_t<is_floating_point_v<_Tp>,
2461 conditional_t<_S_is_ldouble, double, _Tp>,
2462 __int_for_sizeof_t<_Tp>>>::type;
2463 };
2464#endif // __ALTIVEC__
2465
2466// }}}
2467// _SimdWrapper<bool>{{{1
2468template <size_t _Width>
2469 struct _SimdWrapper<bool, _Width,
2470 void_t<typename __bool_storage_member_type<_Width>::type>>
2471 {
2472 using _BuiltinType = typename __bool_storage_member_type<_Width>::type;
2473 using value_type = bool;
2474
2475 static constexpr size_t _S_full_size = sizeof(_BuiltinType) * __CHAR_BIT__;
2476
2477 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<bool, _S_full_size>
2478 __as_full_vector() const { return _M_data; }
2479
2480 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
2481 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_BuiltinType __k)
2482 : _M_data(__k) {};
2483
2484 _GLIBCXX_SIMD_INTRINSIC operator const _BuiltinType&() const
2485 { return _M_data; }
2486
2487 _GLIBCXX_SIMD_INTRINSIC operator _BuiltinType&()
2488 { return _M_data; }
2489
2490 _GLIBCXX_SIMD_INTRINSIC _BuiltinType __intrin() const
2491 { return _M_data; }
2492
2493 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator[](size_t __i) const
2494 { return _M_data & (_BuiltinType(1) << __i); }
2495
2496 template <size_t __i>
2497 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
2498 operator[](_SizeConstant<__i>) const
2499 { return _M_data & (_BuiltinType(1) << __i); }
2500
2501 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, value_type __x)
2502 {
2503 if (__x)
2504 _M_data |= (_BuiltinType(1) << __i);
2505 else
2506 _M_data &= ~(_BuiltinType(1) << __i);
2507 }
2508
2509 _GLIBCXX_SIMD_INTRINSIC
2510 constexpr bool _M_is_constprop() const
2511 { return __builtin_constant_p(_M_data); }
2512
2513 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
2514 {
2515 if (__builtin_constant_p(_M_data))
2516 {
2517 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2518 constexpr _BuiltinType __active_mask
2519 = ~_BuiltinType() >> (__nbits - _Width);
2520 return (_M_data & __active_mask) == 0;
2521 }
2522 return false;
2523 }
2524
2525 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
2526 {
2527 if (__builtin_constant_p(_M_data))
2528 {
2529 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2530 constexpr _BuiltinType __active_mask
2531 = ~_BuiltinType() >> (__nbits - _Width);
2532 return (_M_data & __active_mask) == __active_mask;
2533 }
2534 return false;
2535 }
2536
2537 _BuiltinType _M_data;
2538 };
2539
2540// _SimdWrapperBase{{{1
2541template <bool _MustZeroInitPadding, typename _BuiltinType>
2542 struct _SimdWrapperBase;
2543
2544template <typename _BuiltinType>
2545 struct _SimdWrapperBase<false, _BuiltinType> // no padding or no SNaNs
2546 {
2547 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() = default;
2548 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
2549 : _M_data(__init)
2550 {}
2551
2552 _BuiltinType _M_data;
2553 };
2554
2555template <typename _BuiltinType>
2556 struct _SimdWrapperBase<true, _BuiltinType> // with padding that needs to
2557 // never become SNaN
2558 {
2559 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() : _M_data() {}
2560 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
2561 : _M_data(__init)
2562 {}
2563
2564 _BuiltinType _M_data;
2565 };
2566
2567// }}}
2568// _SimdWrapper{{{
2569template <typename _Tp, size_t _Width>
2570 struct _SimdWrapper<
2571 _Tp, _Width,
2572 void_t<__vector_type_t<_Tp, _Width>, __intrinsic_type_t<_Tp, _Width>>>
2573 : _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2574 && sizeof(_Tp) * _Width
2575 == sizeof(__vector_type_t<_Tp, _Width>),
2576 __vector_type_t<_Tp, _Width>>
2577 {
2578 using _Base
2579 = _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2580 && sizeof(_Tp) * _Width
2581 == sizeof(__vector_type_t<_Tp, _Width>),
2582 __vector_type_t<_Tp, _Width>>;
2583
2584 static_assert(__is_vectorizable_v<_Tp>);
2585 static_assert(_Width >= 2); // 1 doesn't make sense, use _Tp directly then
2586
2587 using _BuiltinType = __vector_type_t<_Tp, _Width>;
2588 using value_type = _Tp;
2589
2590 static inline constexpr size_t _S_full_size
2591 = sizeof(_BuiltinType) / sizeof(value_type);
2592 static inline constexpr int _S_size = _Width;
2593 static inline constexpr bool _S_is_partial = _S_full_size != _S_size;
2594
2595 using _Base::_M_data;
2596
2597 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<_Tp, _S_full_size>
2598 __as_full_vector() const
2599 { return _M_data; }
2600
2601 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(initializer_list<_Tp> __init)
2602 : _Base(__generate_from_n_evaluations<_Width, _BuiltinType>(
2603 [&](auto __i) { return __init.begin()[__i.value]; })) {}
2604
2605 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
2606 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(const _SimdWrapper&)
2607 = default;
2608 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_SimdWrapper&&) = default;
2609
2610 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2611 operator=(const _SimdWrapper&) = default;
2612 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2613 operator=(_SimdWrapper&&) = default;
2614
2615 template <typename _V, typename = enable_if_t<disjunction_v<
2616 is_same<_V, __vector_type_t<_Tp, _Width>>,
2617 is_same<_V, __intrinsic_type_t<_Tp, _Width>>>>>
2618 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_V __x)
2619 // __vector_bitcast can convert e.g. __m128 to __vector(2) float
2620 : _Base(__vector_bitcast<_Tp, _Width>(__x)) {}
2621
2622 template <typename... _As,
2623 typename = enable_if_t<((is_same_v<simd_abi::scalar, _As> && ...)
2624 && sizeof...(_As) <= _Width)>>
2625 _GLIBCXX_SIMD_INTRINSIC constexpr
2626 operator _SimdTuple<_Tp, _As...>() const
2627 {
2628 const auto& dd = _M_data; // workaround for GCC7 ICE
2629 return __generate_from_n_evaluations<sizeof...(_As),
2630 _SimdTuple<_Tp, _As...>>([&](
2631 auto __i) constexpr { return dd[int(__i)]; });
2632 }
2633
2634 _GLIBCXX_SIMD_INTRINSIC constexpr operator const _BuiltinType&() const
2635 { return _M_data; }
2636
2637 _GLIBCXX_SIMD_INTRINSIC constexpr operator _BuiltinType&()
2638 { return _M_data; }
2639
2640 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](size_t __i) const
2641 { return _M_data[__i]; }
2642
2643 template <size_t __i>
2644 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](_SizeConstant<__i>) const
2645 { return _M_data[__i]; }
2646
2647 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, _Tp __x)
2648 { _M_data[__i] = __x; }
2649
2650 _GLIBCXX_SIMD_INTRINSIC
2651 constexpr bool _M_is_constprop() const
2652 { return __builtin_constant_p(_M_data); }
2653
2654 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
2655 {
2656 if (__builtin_constant_p(_M_data))
2657 {
2658 bool __r = true;
2659 if constexpr (is_floating_point_v<_Tp>)
2660 {
2661 using _Ip = __int_for_sizeof_t<_Tp>;
2662 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2663 __execute_n_times<_Width>(
2664 [&](auto __i) { __r &= __intdata[__i.value] == _Ip(); });
2665 }
2666 else
2667 __execute_n_times<_Width>(
2668 [&](auto __i) { __r &= _M_data[__i.value] == _Tp(); });
2669 return __r;
2670 }
2671 return false;
2672 }
2673
2674 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
2675 {
2676 if (__builtin_constant_p(_M_data))
2677 {
2678 bool __r = true;
2679 if constexpr (is_floating_point_v<_Tp>)
2680 {
2681 using _Ip = __int_for_sizeof_t<_Tp>;
2682 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2683 __execute_n_times<_Width>(
2684 [&](auto __i) { __r &= __intdata[__i.value] == ~_Ip(); });
2685 }
2686 else
2687 __execute_n_times<_Width>(
2688 [&](auto __i) { __r &= _M_data[__i.value] == ~_Tp(); });
2689 return __r;
2690 }
2691 return false;
2692 }
2693 };
2694
2695// }}}
2696
2697// __vectorized_sizeof {{{
2698template <typename _Tp>
2699 constexpr size_t
2700 __vectorized_sizeof()
2701 {
2702 if constexpr (!__is_vectorizable_v<_Tp>)
2703 return 0;
2704
2705 if constexpr (sizeof(_Tp) <= 8)
2706 {
2707 // X86:
2708 if constexpr (__have_avx512bw)
2709 return 64;
2710 if constexpr (__have_avx512f && sizeof(_Tp) >= 4)
2711 return 64;
2712 if constexpr (__have_avx2)
2713 return 32;
2714 if constexpr (__have_avx && is_floating_point_v<_Tp>)
2715 return 32;
2716 if constexpr (__have_sse2)
2717 return 16;
2718 if constexpr (__have_sse && is_same_v<_Tp, float>)
2719 return 16;
2720 /* The following is too much trouble because of mixed MMX and x87 code.
2721 * While nothing here explicitly calls MMX instructions of registers,
2722 * they are still emitted but no EMMS cleanup is done.
2723 if constexpr (__have_mmx && sizeof(_Tp) <= 4 && is_integral_v<_Tp>)
2724 return 8;
2725 */
2726
2727 // PowerPC:
2728 if constexpr (__have_power8vec
2729 || (__have_power_vmx && (sizeof(_Tp) < 8))
2730 || (__have_power_vsx && is_floating_point_v<_Tp>) )
2731 return 16;
2732
2733 // ARM:
2734 if constexpr (__have_neon_a64
2735 || (__have_neon_a32 && !is_same_v<_Tp, double>) )
2736 return 16;
2737 if constexpr (__have_neon
2738 && sizeof(_Tp) < 8
2739 // Only allow fp if the user allows non-ICE559 fp (e.g.
2740 // via -ffast-math). ARMv7 NEON fp is not conforming to
2741 // IEC559.
2742 && (__support_neon_float || !is_floating_point_v<_Tp>))
2743 return 16;
2744 }
2745
2746 return sizeof(_Tp);
2747 }
2748
2749// }}}
2750namespace simd_abi {
2751// most of simd_abi is defined in simd_detail.h
2752template <typename _Tp>
2753 inline constexpr int max_fixed_size
2754 = (__have_avx512bw && sizeof(_Tp) == 1) ? 64 : 32;
2755
2756// compatible {{{
2757#if defined __x86_64__ || defined __aarch64__
2758template <typename _Tp>
2759 using compatible = conditional_t<(sizeof(_Tp) <= 8), _VecBuiltin<16>, scalar>;
2760#elif defined __ARM_NEON
2761// FIXME: not sure, probably needs to be scalar (or dependent on the hard-float
2762// ABI?)
2763template <typename _Tp>
2764 using compatible
2765 = conditional_t<(sizeof(_Tp) < 8
2766 && (__support_neon_float || !is_floating_point_v<_Tp>)),
2767 _VecBuiltin<16>, scalar>;
2768#else
2769template <typename>
2770 using compatible = scalar;
2771#endif
2772
2773// }}}
2774// native {{{
2775template <typename _Tp>
2776 constexpr auto
2777 __determine_native_abi()
2778 {
2779 constexpr size_t __bytes = __vectorized_sizeof<_Tp>();
2780 if constexpr (__bytes == sizeof(_Tp))
2781 return static_cast<scalar*>(nullptr);
2782 else if constexpr (__have_avx512vl || (__have_avx512f && __bytes == 64))
2783 return static_cast<_VecBltnBtmsk<__bytes>*>(nullptr);
2784 else
2785 return static_cast<_VecBuiltin<__bytes>*>(nullptr);
2786 }
2787
2788template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
2789 using native = remove_pointer_t<decltype(__determine_native_abi<_Tp>())>;
2790
2791// }}}
2792// __default_abi {{{
2793#if defined _GLIBCXX_SIMD_DEFAULT_ABI
2794template <typename _Tp>
2795 using __default_abi = _GLIBCXX_SIMD_DEFAULT_ABI<_Tp>;
2796#else
2797template <typename _Tp>
2798 using __default_abi = compatible<_Tp>;
2799#endif
2800
2801// }}}
2802} // namespace simd_abi
2803
2804// traits {{{1
2805// is_abi_tag {{{2
2806template <typename _Tp, typename = void_t<>>
2807 struct is_abi_tag : false_type {};
2808
2809template <typename _Tp>
2810 struct is_abi_tag<_Tp, void_t<typename _Tp::_IsValidAbiTag>>
2811 : public _Tp::_IsValidAbiTag {};
2812
2813template <typename _Tp>
2814 inline constexpr bool is_abi_tag_v = is_abi_tag<_Tp>::value;
2815
2816// is_simd(_mask) {{{2
2817template <typename _Tp>
2818 struct is_simd : public false_type {};
2819
2820template <typename _Tp>
2821 inline constexpr bool is_simd_v = is_simd<_Tp>::value;
2822
2823template <typename _Tp>
2824 struct is_simd_mask : public false_type {};
2825
2826template <typename _Tp>
2827inline constexpr bool is_simd_mask_v = is_simd_mask<_Tp>::value;
2828
2829// simd_size {{{2
2830template <typename _Tp, typename _Abi, typename = void>
2831 struct __simd_size_impl {};
2832
2833template <typename _Tp, typename _Abi>
2834 struct __simd_size_impl<
2835 _Tp, _Abi,
2836 enable_if_t<conjunction_v<__is_vectorizable<_Tp>, is_abi_tag<_Abi>>>>
2837 : _SizeConstant<_Abi::template _S_size<_Tp>> {};
2838
2839template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2840 struct simd_size : __simd_size_impl<_Tp, _Abi> {};
2841
2842template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2843 inline constexpr size_t simd_size_v = simd_size<_Tp, _Abi>::value;
2844
2845// simd_abi::deduce {{{2
2846template <typename _Tp, size_t _Np, typename = void>
2847 struct __deduce_impl;
2848
2849namespace simd_abi {
2850/**
2851 * @tparam _Tp The requested `value_type` for the elements.
2852 * @tparam _Np The requested number of elements.
2853 * @tparam _Abis This parameter is ignored, since this implementation cannot
2854 * make any use of it. Either __a good native ABI is matched and used as `type`
2855 * alias, or the `fixed_size<_Np>` ABI is used, which internally is built from
2856 * the best matching native ABIs.
2857 */
2858template <typename _Tp, size_t _Np, typename...>
2859 struct deduce : __deduce_impl<_Tp, _Np> {};
2860
2861template <typename _Tp, size_t _Np, typename... _Abis>
2862 using deduce_t = typename deduce<_Tp, _Np, _Abis...>::type;
2863} // namespace simd_abi
2864
2865// }}}2
2866// rebind_simd {{{2
2867template <typename _Tp, typename _V, typename = void>
2868 struct rebind_simd;
2869
2870template <typename _Tp, typename _Up, typename _Abi>
2871 struct rebind_simd<
2872 _Tp, simd<_Up, _Abi>,
2873 void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
2874 {
2875 using type
2876 = simd<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
2877 };
2878
2879template <typename _Tp, typename _Up, typename _Abi>
2880 struct rebind_simd<
2881 _Tp, simd_mask<_Up, _Abi>,
2882 void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
2883 {
2884 using type
2885 = simd_mask<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
2886 };
2887
2888template <typename _Tp, typename _V>
2889 using rebind_simd_t = typename rebind_simd<_Tp, _V>::type;
2890
2891// resize_simd {{{2
2892template <int _Np, typename _V, typename = void>
2893 struct resize_simd;
2894
2895template <int _Np, typename _Tp, typename _Abi>
2896 struct resize_simd<_Np, simd<_Tp, _Abi>,
2897 void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
2898 { using type = simd<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
2899
2900template <int _Np, typename _Tp, typename _Abi>
2901 struct resize_simd<_Np, simd_mask<_Tp, _Abi>,
2902 void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
2903 { using type = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
2904
2905template <int _Np, typename _V>
2906 using resize_simd_t = typename resize_simd<_Np, _V>::type;
2907
2908// }}}2
2909// memory_alignment {{{2
2910template <typename _Tp, typename _Up = typename _Tp::value_type>
2911 struct memory_alignment
2912 : public _SizeConstant<vector_aligned_tag::_S_alignment<_Tp, _Up>> {};
2913
2914template <typename _Tp, typename _Up = typename _Tp::value_type>
2915 inline constexpr size_t memory_alignment_v = memory_alignment<_Tp, _Up>::value;
2916
2917// class template simd [simd] {{{1
2918template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2919 class simd;
2920
2921template <typename _Tp, typename _Abi>
2922 struct is_simd<simd<_Tp, _Abi>> : public true_type {};
2923
2924template <typename _Tp>
2925 using native_simd = simd<_Tp, simd_abi::native<_Tp>>;
2926
2927template <typename _Tp, int _Np>
2928 using fixed_size_simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
2929
2930template <typename _Tp, size_t _Np>
2931 using __deduced_simd = simd<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
2932
2933// class template simd_mask [simd_mask] {{{1
2934template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2935 class simd_mask;
2936
2937template <typename _Tp, typename _Abi>
2938 struct is_simd_mask<simd_mask<_Tp, _Abi>> : public true_type {};
2939
2940template <typename _Tp>
2941 using native_simd_mask = simd_mask<_Tp, simd_abi::native<_Tp>>;
2942
2943template <typename _Tp, int _Np>
2944 using fixed_size_simd_mask = simd_mask<_Tp, simd_abi::fixed_size<_Np>>;
2945
2946template <typename _Tp, size_t _Np>
2947 using __deduced_simd_mask = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
2948
2949// casts [simd.casts] {{{1
2950// static_simd_cast {{{2
2951template <typename _Tp, typename _Up, typename _Ap, bool = is_simd_v<_Tp>,
2952 typename = void>
2953 struct __static_simd_cast_return_type;
2954
2955template <typename _Tp, typename _A0, typename _Up, typename _Ap>
2956 struct __static_simd_cast_return_type<simd_mask<_Tp, _A0>, _Up, _Ap, false,
2957 void>
2958 : __static_simd_cast_return_type<simd<_Tp, _A0>, _Up, _Ap> {};
2959
2960template <typename _Tp, typename _Up, typename _Ap>
2961 struct __static_simd_cast_return_type<
2962 _Tp, _Up, _Ap, true, enable_if_t<_Tp::size() == simd_size_v<_Up, _Ap>>>
2963 { using type = _Tp; };
2964
2965template <typename _Tp, typename _Ap>
2966 struct __static_simd_cast_return_type<_Tp, _Tp, _Ap, false,
2967#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
2968 enable_if_t<__is_vectorizable_v<_Tp>>
2969#else
2970 void
2971#endif
2972 >
2973 { using type = simd<_Tp, _Ap>; };
2974
2975template <typename _Tp, typename = void>
2976 struct __safe_make_signed { using type = _Tp;};
2977
2978template <typename _Tp>
2979 struct __safe_make_signed<_Tp, enable_if_t<is_integral_v<_Tp>>>
2980 {
2981 // the extra make_unsigned_t is because of PR85951
2982 using type = make_signed_t<make_unsigned_t<_Tp>>;
2983 };
2984
2985template <typename _Tp>
2986 using safe_make_signed_t = typename __safe_make_signed<_Tp>::type;
2987
2988template <typename _Tp, typename _Up, typename _Ap>
2989 struct __static_simd_cast_return_type<_Tp, _Up, _Ap, false,
2990#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
2991 enable_if_t<__is_vectorizable_v<_Tp>>
2992#else
2993 void
2994#endif
2995 >
2996 {
2997 using type = conditional_t<
2998 (is_integral_v<_Up> && is_integral_v<_Tp> &&
2999#ifndef _GLIBCXX_SIMD_FIX_P2TS_ISSUE65
3000 is_signed_v<_Up> != is_signed_v<_Tp> &&
3001#endif
3002 is_same_v<safe_make_signed_t<_Up>, safe_make_signed_t<_Tp>>),
3003 simd<_Tp, _Ap>, fixed_size_simd<_Tp, simd_size_v<_Up, _Ap>>>;
3004 };
3005
3006template <typename _Tp, typename _Up, typename _Ap,
3007 typename _R
3008 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3009 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _R
3010 static_simd_cast(const simd<_Up, _Ap>& __x)
3011 {
3012 if constexpr (is_same<_R, simd<_Up, _Ap>>::value)
3013 return __x;
3014 else
3015 {
3016 _SimdConverter<_Up, _Ap, typename _R::value_type, typename _R::abi_type>
3017 __c;
3018 return _R(__private_init, __c(__data(__x)));
3019 }
3020 }
3021
3022namespace __proposed {
3023template <typename _Tp, typename _Up, typename _Ap,
3024 typename _R
3025 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3026 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR typename _R::mask_type
3027 static_simd_cast(const simd_mask<_Up, _Ap>& __x)
3028 {
3029 using _RM = typename _R::mask_type;
3030 return {__private_init, _RM::abi_type::_MaskImpl::template _S_convert<
3031 typename _RM::simd_type::value_type>(__x)};
3032 }
3033
3034template <typename _To, typename _Up, typename _Abi>
3035 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3036 _To
3037 simd_bit_cast(const simd<_Up, _Abi>& __x)
3038 {
3039 using _Tp = typename _To::value_type;
3040 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3041 using _From = simd<_Up, _Abi>;
3042 using _FromMember = typename _SimdTraits<_Up, _Abi>::_SimdMember;
3043 // with concepts, the following should be constraints
3044 static_assert(sizeof(_To) == sizeof(_From));
3045 static_assert(is_trivially_copyable_v<_Tp> && is_trivially_copyable_v<_Up>);
3046 static_assert(is_trivially_copyable_v<_ToMember> && is_trivially_copyable_v<_FromMember>);
3047#if __has_builtin(__builtin_bit_cast)
3048 return {__private_init, __builtin_bit_cast(_ToMember, __data(__x))};
3049#else
3050 return {__private_init, __bit_cast<_ToMember>(__data(__x))};
3051#endif
3052 }
3053
3054template <typename _To, typename _Up, typename _Abi>
3055 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3056 _To
3057 simd_bit_cast(const simd_mask<_Up, _Abi>& __x)
3058 {
3059 using _From = simd_mask<_Up, _Abi>;
3060 static_assert(sizeof(_To) == sizeof(_From));
3061 static_assert(is_trivially_copyable_v<_From>);
3062 // _To can be simd<T, A>, specifically simd<T, fixed_size<N>> in which case _To is not trivially
3063 // copyable.
3064 if constexpr (is_simd_v<_To>)
3065 {
3066 using _Tp = typename _To::value_type;
3067 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3068 static_assert(is_trivially_copyable_v<_ToMember>);
3069#if __has_builtin(__builtin_bit_cast)
3070 return {__private_init, __builtin_bit_cast(_ToMember, __x)};
3071#else
3072 return {__private_init, __bit_cast<_ToMember>(__x)};
3073#endif
3074 }
3075 else
3076 {
3077 static_assert(is_trivially_copyable_v<_To>);
3078#if __has_builtin(__builtin_bit_cast)
3079 return __builtin_bit_cast(_To, __x);
3080#else
3081 return __bit_cast<_To>(__x);
3082#endif
3083 }
3084 }
3085} // namespace __proposed
3086
3087// simd_cast {{{2
3088template <typename _Tp, typename _Up, typename _Ap,
3089 typename _To = __value_type_or_identity_t<_Tp>>
3090 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3091 simd_cast(const simd<_ValuePreserving<_Up, _To>, _Ap>& __x)
3092 -> decltype(static_simd_cast<_Tp>(__x))
3093 { return static_simd_cast<_Tp>(__x); }
3094
3095namespace __proposed {
3096template <typename _Tp, typename _Up, typename _Ap,
3097 typename _To = __value_type_or_identity_t<_Tp>>
3098 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3099 simd_cast(const simd_mask<_ValuePreserving<_Up, _To>, _Ap>& __x)
3100 -> decltype(static_simd_cast<_Tp>(__x))
3101 { return static_simd_cast<_Tp>(__x); }
3102} // namespace __proposed
3103
3104// }}}2
3105// resizing_simd_cast {{{
3106namespace __proposed {
3107/* Proposed spec:
3108
3109template <class T, class U, class Abi>
3110T resizing_simd_cast(const simd<U, Abi>& x)
3111
3112p1 Constraints:
3113 - is_simd_v<T> is true and
3114 - T::value_type is the same type as U
3115
3116p2 Returns:
3117 A simd object with the i^th element initialized to x[i] for all i in the
3118 range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3119 than simd_size_v<U, Abi>, the remaining elements are value-initialized.
3120
3121template <class T, class U, class Abi>
3122T resizing_simd_cast(const simd_mask<U, Abi>& x)
3123
3124p1 Constraints: is_simd_mask_v<T> is true
3125
3126p2 Returns:
3127 A simd_mask object with the i^th element initialized to x[i] for all i in
3128the range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3129 than simd_size_v<U, Abi>, the remaining elements are initialized to false.
3130
3131 */
3132
3133template <typename _Tp, typename _Up, typename _Ap>
3134 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR enable_if_t<
3135 conjunction_v<is_simd<_Tp>, is_same<typename _Tp::value_type, _Up>>, _Tp>
3136 resizing_simd_cast(const simd<_Up, _Ap>& __x)
3137 {
3138 if constexpr (is_same_v<typename _Tp::abi_type, _Ap>)
3139 return __x;
3140 else if constexpr (simd_size_v<_Up, _Ap> == 1)
3141 {
3142 _Tp __r{};
3143 __r[0] = __x[0];
3144 return __r;
3145 }
3146 else if constexpr (_Tp::size() == 1)
3147 return __x[0];
3148 else if constexpr (sizeof(_Tp) == sizeof(__x)
3149 && !__is_fixed_size_abi_v<_Ap>)
3150 return {__private_init,
3151 __vector_bitcast<typename _Tp::value_type, _Tp::size()>(
3152 _Ap::_S_masked(__data(__x))._M_data)};
3153 else
3154 {
3155 _Tp __r{};
3156 __builtin_memcpy(&__data(__r), &__data(__x),
3157 sizeof(_Up)
3158 * std::min(_Tp::size(), simd_size_v<_Up, _Ap>));
3159 return __r;
3160 }
3161 }
3162
3163template <typename _Tp, typename _Up, typename _Ap>
3164 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3165 enable_if_t<is_simd_mask_v<_Tp>, _Tp>
3166 resizing_simd_cast(const simd_mask<_Up, _Ap>& __x)
3167 {
3168 return {__private_init, _Tp::abi_type::_MaskImpl::template _S_convert<
3169 typename _Tp::simd_type::value_type>(__x)};
3170 }
3171} // namespace __proposed
3172
3173// }}}
3174// to_fixed_size {{{2
3175template <typename _Tp, int _Np>
3176 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd<_Tp, _Np>
3177 to_fixed_size(const fixed_size_simd<_Tp, _Np>& __x)
3178 { return __x; }
3179
3180template <typename _Tp, int _Np>
3181 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd_mask<_Tp, _Np>
3182 to_fixed_size(const fixed_size_simd_mask<_Tp, _Np>& __x)
3183 { return __x; }
3184
3185template <typename _Tp, typename _Ap>
3186 _GLIBCXX_SIMD_INTRINSIC auto
3187 to_fixed_size(const simd<_Tp, _Ap>& __x)
3188 {
3189 return simd<_Tp, simd_abi::fixed_size<simd_size_v<_Tp, _Ap>>>([&__x](
3190 auto __i) constexpr { return __x[__i]; });
3191 }
3192
3193template <typename _Tp, typename _Ap>
3194 _GLIBCXX_SIMD_INTRINSIC auto
3195 to_fixed_size(const simd_mask<_Tp, _Ap>& __x)
3196 {
3197 constexpr int _Np = simd_mask<_Tp, _Ap>::size();
3198 fixed_size_simd_mask<_Tp, _Np> __r;
3199 __execute_n_times<_Np>([&](auto __i) constexpr { __r[__i] = __x[__i]; });
3200 return __r;
3201 }
3202
3203// to_native {{{2
3204template <typename _Tp, int _Np>
3205 _GLIBCXX_SIMD_INTRINSIC
3206 enable_if_t<(_Np == native_simd<_Tp>::size()), native_simd<_Tp>>
3207 to_native(const fixed_size_simd<_Tp, _Np>& __x)
3208 {
3209 alignas(memory_alignment_v<native_simd<_Tp>>) _Tp __mem[_Np];
3210 __x.copy_to(__mem, vector_aligned);
3211 return {__mem, vector_aligned};
3212 }
3213
3214template <typename _Tp, size_t _Np>
3215 _GLIBCXX_SIMD_INTRINSIC
3216 enable_if_t<(_Np == native_simd_mask<_Tp>::size()), native_simd_mask<_Tp>>
3217 to_native(const fixed_size_simd_mask<_Tp, _Np>& __x)
3218 {
3219 return native_simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; });
3220 }
3221
3222// to_compatible {{{2
3223template <typename _Tp, size_t _Np>
3224 _GLIBCXX_SIMD_INTRINSIC enable_if_t<(_Np == simd<_Tp>::size()), simd<_Tp>>
3225 to_compatible(const simd<_Tp, simd_abi::fixed_size<_Np>>& __x)
3226 {
3227 alignas(memory_alignment_v<simd<_Tp>>) _Tp __mem[_Np];
3228 __x.copy_to(__mem, vector_aligned);
3229 return {__mem, vector_aligned};
3230 }
3231
3232template <typename _Tp, size_t _Np>
3233 _GLIBCXX_SIMD_INTRINSIC
3234 enable_if_t<(_Np == simd_mask<_Tp>::size()), simd_mask<_Tp>>
3235 to_compatible(const simd_mask<_Tp, simd_abi::fixed_size<_Np>>& __x)
3236 { return simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; }); }
3237
3238// masked assignment [simd_mask.where] {{{1
3239
3240// where_expression {{{1
3241// const_where_expression<M, T> {{{2
3242template <typename _M, typename _Tp>
3243 class const_where_expression
3244 {
3245 using _V = _Tp;
3246 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3247
3248 struct _Wrapper { using value_type = _V; };
3249
3250 protected:
3251 using _Impl = typename _V::_Impl;
3252
3253 using value_type =
3254 typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3255
3256 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3257 __get_mask(const const_where_expression& __x)
3258 { return __x._M_k; }
3259
3260 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3261 __get_lvalue(const const_where_expression& __x)
3262 { return __x._M_value; }
3263
3264 const _M& _M_k;
3265 _Tp& _M_value;
3266
3267 public:
3268 const_where_expression(const const_where_expression&) = delete;
3269 const_where_expression& operator=(const const_where_expression&) = delete;
3270
3271 _GLIBCXX_SIMD_INTRINSIC const_where_expression(const _M& __kk, const _Tp& dd)
3272 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3273
3274 _GLIBCXX_SIMD_INTRINSIC _V
3275 operator-() const&&
3276 {
3277 return {__private_init,
3278 _Impl::template _S_masked_unary<negate>(__data(_M_k),
3279 __data(_M_value))};
3280 }
3281
3282 template <typename _Up, typename _Flags>
3283 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
3284 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3285 {
3286 return {__private_init,
3287 _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3288 _Flags::template _S_apply<_V>(__mem))};
3289 }
3290
3291 template <typename _Up, typename _Flags>
3292 _GLIBCXX_SIMD_INTRINSIC void
3293 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3294 {
3295 _Impl::_S_masked_store(__data(_M_value),
3296 _Flags::template _S_apply<_V>(__mem),
3297 __data(_M_k));
3298 }
3299 };
3300
3301// const_where_expression<bool, T> {{{2
3302template <typename _Tp>
3303 class const_where_expression<bool, _Tp>
3304 {
3305 using _M = bool;
3306 using _V = _Tp;
3307
3308 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3309
3310 struct _Wrapper { using value_type = _V; };
3311
3312 protected:
3313 using value_type =
3314 typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3315
3316 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3317 __get_mask(const const_where_expression& __x)
3318 { return __x._M_k; }
3319
3320 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3321 __get_lvalue(const const_where_expression& __x)
3322 { return __x._M_value; }
3323
3324 const bool _M_k;
3325 _Tp& _M_value;
3326
3327 public:
3328 const_where_expression(const const_where_expression&) = delete;
3329 const_where_expression& operator=(const const_where_expression&) = delete;
3330
3331 _GLIBCXX_SIMD_INTRINSIC const_where_expression(const bool __kk, const _Tp& dd)
3332 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3333
3334 _GLIBCXX_SIMD_INTRINSIC _V operator-() const&&
3335 { return _M_k ? -_M_value : _M_value; }
3336
3337 template <typename _Up, typename _Flags>
3338 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
3339 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3340 { return _M_k ? static_cast<_V>(__mem[0]) : _M_value; }
3341
3342 template <typename _Up, typename _Flags>
3343 _GLIBCXX_SIMD_INTRINSIC void
3344 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3345 {
3346 if (_M_k)
3347 __mem[0] = _M_value;
3348 }
3349 };
3350
3351// where_expression<M, T> {{{2
3352template <typename _M, typename _Tp>
3353 class where_expression : public const_where_expression<_M, _Tp>
3354 {
3355 using _Impl = typename const_where_expression<_M, _Tp>::_Impl;
3356
3357 static_assert(!is_const<_Tp>::value,
3358 "where_expression may only be instantiated with __a non-const "
3359 "_Tp parameter");
3360
3361 using typename const_where_expression<_M, _Tp>::value_type;
3362 using const_where_expression<_M, _Tp>::_M_k;
3363 using const_where_expression<_M, _Tp>::_M_value;
3364
3365 static_assert(
3366 is_same<typename _M::abi_type, typename _Tp::abi_type>::value, "");
3367 static_assert(_M::size() == _Tp::size(), "");
3368
3369 _GLIBCXX_SIMD_INTRINSIC friend _Tp& __get_lvalue(where_expression& __x)
3370 { return __x._M_value; }
3371
3372 public:
3373 where_expression(const where_expression&) = delete;
3374 where_expression& operator=(const where_expression&) = delete;
3375
3376 _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
3377 : const_where_expression<_M, _Tp>(__kk, dd) {}
3378
3379 template <typename _Up>
3380 _GLIBCXX_SIMD_INTRINSIC void operator=(_Up&& __x) &&
3381 {
3382 _Impl::_S_masked_assign(__data(_M_k), __data(_M_value),
3383 __to_value_type_or_member_type<_Tp>(
3384 static_cast<_Up&&>(__x)));
3385 }
3386
3387#define _GLIBCXX_SIMD_OP_(__op, __name) \
3388 template <typename _Up> \
3389 _GLIBCXX_SIMD_INTRINSIC void operator __op##=(_Up&& __x)&& \
3390 { \
3391 _Impl::template _S_masked_cassign( \
3392 __data(_M_k), __data(_M_value), \
3393 __to_value_type_or_member_type<_Tp>(static_cast<_Up&&>(__x)), \
3394 [](auto __impl, auto __lhs, auto __rhs) constexpr { \
3395 return __impl.__name(__lhs, __rhs); \
3396 }); \
3397 } \
3398 static_assert(true)
3399 _GLIBCXX_SIMD_OP_(+, _S_plus);
3400 _GLIBCXX_SIMD_OP_(-, _S_minus);
3401 _GLIBCXX_SIMD_OP_(*, _S_multiplies);
3402 _GLIBCXX_SIMD_OP_(/, _S_divides);
3403 _GLIBCXX_SIMD_OP_(%, _S_modulus);
3404 _GLIBCXX_SIMD_OP_(&, _S_bit_and);
3405 _GLIBCXX_SIMD_OP_(|, _S_bit_or);
3406 _GLIBCXX_SIMD_OP_(^, _S_bit_xor);
3407 _GLIBCXX_SIMD_OP_(<<, _S_shift_left);
3408 _GLIBCXX_SIMD_OP_(>>, _S_shift_right);
3409#undef _GLIBCXX_SIMD_OP_
3410
3411 _GLIBCXX_SIMD_INTRINSIC void operator++() &&
3412 {
3413 __data(_M_value)
3414 = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
3415 __data(_M_value));
3416 }
3417
3418 _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
3419 {
3420 __data(_M_value)
3421 = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
3422 __data(_M_value));
3423 }
3424
3425 _GLIBCXX_SIMD_INTRINSIC void operator--() &&
3426 {
3427 __data(_M_value)
3428 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
3429 __data(_M_value));
3430 }
3431
3432 _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
3433 {
3434 __data(_M_value)
3435 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
3436 __data(_M_value));
3437 }
3438
3439 // intentionally hides const_where_expression::copy_from
3440 template <typename _Up, typename _Flags>
3441 _GLIBCXX_SIMD_INTRINSIC void
3442 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
3443 {
3444 __data(_M_value)
3445 = _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3446 _Flags::template _S_apply<_Tp>(__mem));
3447 }
3448 };
3449
3450// where_expression<bool, T> {{{2
3451template <typename _Tp>
3452 class where_expression<bool, _Tp> : public const_where_expression<bool, _Tp>
3453 {
3454 using _M = bool;
3455 using typename const_where_expression<_M, _Tp>::value_type;
3456 using const_where_expression<_M, _Tp>::_M_k;
3457 using const_where_expression<_M, _Tp>::_M_value;
3458
3459 public:
3460 where_expression(const where_expression&) = delete;
3461 where_expression& operator=(const where_expression&) = delete;
3462
3463 _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
3464 : const_where_expression<_M, _Tp>(__kk, dd) {}
3465
3466#define _GLIBCXX_SIMD_OP_(__op) \
3467 template <typename _Up> \
3468 _GLIBCXX_SIMD_INTRINSIC void operator __op(_Up&& __x)&& \
3469 { if (_M_k) _M_value __op static_cast<_Up&&>(__x); }
3470
3471 _GLIBCXX_SIMD_OP_(=)
3472 _GLIBCXX_SIMD_OP_(+=)
3473 _GLIBCXX_SIMD_OP_(-=)
3474 _GLIBCXX_SIMD_OP_(*=)
3475 _GLIBCXX_SIMD_OP_(/=)
3476 _GLIBCXX_SIMD_OP_(%=)
3477 _GLIBCXX_SIMD_OP_(&=)
3478 _GLIBCXX_SIMD_OP_(|=)
3479 _GLIBCXX_SIMD_OP_(^=)
3480 _GLIBCXX_SIMD_OP_(<<=)
3481 _GLIBCXX_SIMD_OP_(>>=)
3482 #undef _GLIBCXX_SIMD_OP_
3483
3484 _GLIBCXX_SIMD_INTRINSIC void operator++() &&
3485 { if (_M_k) ++_M_value; }
3486
3487 _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
3488 { if (_M_k) ++_M_value; }
3489
3490 _GLIBCXX_SIMD_INTRINSIC void operator--() &&
3491 { if (_M_k) --_M_value; }
3492
3493 _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
3494 { if (_M_k) --_M_value; }
3495
3496 // intentionally hides const_where_expression::copy_from
3497 template <typename _Up, typename _Flags>
3498 _GLIBCXX_SIMD_INTRINSIC void
3499 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
3500 { if (_M_k) _M_value = __mem[0]; }
3501 };
3502
3503// where {{{1
3504template <typename _Tp, typename _Ap>
3505 _GLIBCXX_SIMD_INTRINSIC where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3506 where(const typename simd<_Tp, _Ap>::mask_type& __k, simd<_Tp, _Ap>& __value)
3507 { return {__k, __value}; }
3508
3509template <typename _Tp, typename _Ap>
3510 _GLIBCXX_SIMD_INTRINSIC
3511 const_where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3512 where(const typename simd<_Tp, _Ap>::mask_type& __k,
3513 const simd<_Tp, _Ap>& __value)
3514 { return {__k, __value}; }
3515
3516template <typename _Tp, typename _Ap>
3517 _GLIBCXX_SIMD_INTRINSIC
3518 where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3519 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
3520 simd_mask<_Tp, _Ap>& __value)
3521 { return {__k, __value}; }
3522
3523template <typename _Tp, typename _Ap>
3524 _GLIBCXX_SIMD_INTRINSIC
3525 const_where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3526 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
3527 const simd_mask<_Tp, _Ap>& __value)
3528 { return {__k, __value}; }
3529
3530template <typename _Tp>
3531 _GLIBCXX_SIMD_INTRINSIC where_expression<bool, _Tp>
3532 where(_ExactBool __k, _Tp& __value)
3533 { return {__k, __value}; }
3534
3535template <typename _Tp>
3536 _GLIBCXX_SIMD_INTRINSIC const_where_expression<bool, _Tp>
3537 where(_ExactBool __k, const _Tp& __value)
3538 { return {__k, __value}; }
3539
3540 template <typename _Tp, typename _Ap>
3541 void where(bool __k, simd<_Tp, _Ap>& __value) = delete;
3542
3543 template <typename _Tp, typename _Ap>
3544 void where(bool __k, const simd<_Tp, _Ap>& __value) = delete;
3545
3546// proposed mask iterations {{{1
3547namespace __proposed {
3548template <size_t _Np>
3549 class where_range
3550 {
3551 const bitset<_Np> __bits;
3552
3553 public:
3554 where_range(bitset<_Np> __b) : __bits(__b) {}
3555
3556 class iterator
3557 {
3558 size_t __mask;
3559 size_t __bit;
3560
3561 _GLIBCXX_SIMD_INTRINSIC void __next_bit()
3562 { __bit = __builtin_ctzl(__mask); }
3563
3564 _GLIBCXX_SIMD_INTRINSIC void __reset_lsb()
3565 {
3566 // 01100100 - 1 = 01100011
3567 __mask &= (__mask - 1);
3568 // __asm__("btr %1,%0" : "+r"(__mask) : "r"(__bit));
3569 }
3570
3571 public:
3572 iterator(decltype(__mask) __m) : __mask(__m) { __next_bit(); }
3573 iterator(const iterator&) = default;
3574 iterator(iterator&&) = default;
3575
3576 _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator->() const
3577 { return __bit; }
3578
3579 _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator*() const
3580 { return __bit; }
3581
3582 _GLIBCXX_SIMD_ALWAYS_INLINE iterator& operator++()
3583 {
3584 __reset_lsb();
3585 __next_bit();
3586 return *this;
3587 }
3588
3589 _GLIBCXX_SIMD_ALWAYS_INLINE iterator operator++(int)
3590 {
3591 iterator __tmp = *this;
3592 __reset_lsb();
3593 __next_bit();
3594 return __tmp;
3595 }
3596
3597 _GLIBCXX_SIMD_ALWAYS_INLINE bool operator==(const iterator& __rhs) const
3598 { return __mask == __rhs.__mask; }
3599
3600 _GLIBCXX_SIMD_ALWAYS_INLINE bool operator!=(const iterator& __rhs) const
3601 { return __mask != __rhs.__mask; }
3602 };
3603
3604 iterator begin() const
3605 { return __bits.to_ullong(); }
3606
3607 iterator end() const
3608 { return 0; }
3609 };
3610
3611template <typename _Tp, typename _Ap>
3612 where_range<simd_size_v<_Tp, _Ap>>
3613 where(const simd_mask<_Tp, _Ap>& __k)
3614 { return __k.__to_bitset(); }
3615
3616} // namespace __proposed
3617
3618// }}}1
3619// reductions [simd.reductions] {{{1
3620template <typename _Tp, typename _Abi, typename _BinaryOperation = plus<>>
3621 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3622 reduce(const simd<_Tp, _Abi>& __v,
3623 _BinaryOperation __binary_op = _BinaryOperation())
3624 { return _Abi::_SimdImpl::_S_reduce(__v, __binary_op); }
3625
3626template <typename _M, typename _V, typename _BinaryOperation = plus<>>
3627 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3628 reduce(const const_where_expression<_M, _V>& __x,
3629 typename _V::value_type __identity_element,
3630 _BinaryOperation __binary_op)
3631 {
3632 if (__builtin_expect(none_of(__get_mask(__x)), false))
3633 return __identity_element;
3634
3635 _V __tmp = __identity_element;
3636 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3637 __data(__get_lvalue(__x)));
3638 return reduce(__tmp, __binary_op);
3639 }
3640
3641template <typename _M, typename _V>
3642 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3643 reduce(const const_where_expression<_M, _V>& __x, plus<> __binary_op = {})
3644 { return reduce(__x, 0, __binary_op); }
3645
3646template <typename _M, typename _V>
3647 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3648 reduce(const const_where_expression<_M, _V>& __x, multiplies<> __binary_op)
3649 { return reduce(__x, 1, __binary_op); }
3650
3651template <typename _M, typename _V>
3652 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3653 reduce(const const_where_expression<_M, _V>& __x, bit_and<> __binary_op)
3654 { return reduce(__x, ~typename _V::value_type(), __binary_op); }
3655
3656template <typename _M, typename _V>
3657 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3658 reduce(const const_where_expression<_M, _V>& __x, bit_or<> __binary_op)
3659 { return reduce(__x, 0, __binary_op); }
3660
3661template <typename _M, typename _V>
3662 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3663 reduce(const const_where_expression<_M, _V>& __x, bit_xor<> __binary_op)
3664 { return reduce(__x, 0, __binary_op); }
3665
3666template <typename _Tp, typename _Abi>
3667 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3668 hmin(const simd<_Tp, _Abi>& __v) noexcept
3669 {
3670 return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Minimum());
3671 }
3672
3673template <typename _Tp, typename _Abi>
3674 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3675 hmax(const simd<_Tp, _Abi>& __v) noexcept
3676 {
3677 return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Maximum());
3678 }
3679
3680template <typename _M, typename _V>
3681 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3682 typename _V::value_type
3683 hmin(const const_where_expression<_M, _V>& __x) noexcept
3684 {
3685 using _Tp = typename _V::value_type;
3686 constexpr _Tp __id_elem =
3687#ifdef __FINITE_MATH_ONLY__
3688 __finite_max_v<_Tp>;
3689#else
3690 __value_or<__infinity, _Tp>(__finite_max_v<_Tp>);
3691#endif
3692 _V __tmp = __id_elem;
3693 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3694 __data(__get_lvalue(__x)));
3695 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Minimum());
3696 }
3697
3698template <typename _M, typename _V>
3699 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3700 typename _V::value_type
3701 hmax(const const_where_expression<_M, _V>& __x) noexcept
3702 {
3703 using _Tp = typename _V::value_type;
3704 constexpr _Tp __id_elem =
3705#ifdef __FINITE_MATH_ONLY__
3706 __finite_min_v<_Tp>;
3707#else
3708 [] {
3709 if constexpr (__value_exists_v<__infinity, _Tp>)
3710 return -__infinity_v<_Tp>;
3711 else
3712 return __finite_min_v<_Tp>;
3713 }();
3714#endif
3715 _V __tmp = __id_elem;
3716 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3717 __data(__get_lvalue(__x)));
3718 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Maximum());
3719 }
3720
3721// }}}1
3722// algorithms [simd.alg] {{{
3723template <typename _Tp, typename _Ap>
3724 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3725 min(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3726 { return {__private_init, _Ap::_SimdImpl::_S_min(__data(__a), __data(__b))}; }
3727
3728template <typename _Tp, typename _Ap>
3729 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3730 max(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3731 { return {__private_init, _Ap::_SimdImpl::_S_max(__data(__a), __data(__b))}; }
3732
3733template <typename _Tp, typename _Ap>
3734 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3735 pair<simd<_Tp, _Ap>, simd<_Tp, _Ap>>
3736 minmax(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3737 {
3738 const auto pair_of_members
3739 = _Ap::_SimdImpl::_S_minmax(__data(__a), __data(__b));
3740 return {simd<_Tp, _Ap>(__private_init, pair_of_members.first),
3741 simd<_Tp, _Ap>(__private_init, pair_of_members.second)};
3742 }
3743
3744template <typename _Tp, typename _Ap>
3745 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3746 clamp(const simd<_Tp, _Ap>& __v, const simd<_Tp, _Ap>& __lo,
3747 const simd<_Tp, _Ap>& __hi)
3748 {
3749 using _Impl = typename _Ap::_SimdImpl;
3750 return {__private_init,
3751 _Impl::_S_min(__data(__hi),
3752 _Impl::_S_max(__data(__lo), __data(__v)))};
3753 }
3754
3755// }}}
3756
3757template <size_t... _Sizes, typename _Tp, typename _Ap,
3758 typename = enable_if_t<((_Sizes + ...) == simd<_Tp, _Ap>::size())>>
3759 inline tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
3760 split(const simd<_Tp, _Ap>&);
3761
3762// __extract_part {{{
3763template <int _Index, int _Total, int _Combine = 1, typename _Tp, size_t _Np>
3764 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
3765 _SimdWrapper<_Tp, _Np / _Total * _Combine>
3766 __extract_part(const _SimdWrapper<_Tp, _Np> __x);
3767
3768template <int Index, int Parts, int _Combine = 1, typename _Tp, typename _A0,
3769 typename... _As>
3770 _GLIBCXX_SIMD_INTRINSIC auto
3771 __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x);
3772
3773// }}}
3774// _SizeList {{{
3775template <size_t _V0, size_t... _Values>
3776 struct _SizeList
3777 {
3778 template <size_t _I>
3779 static constexpr size_t _S_at(_SizeConstant<_I> = {})
3780 {
3781 if constexpr (_I == 0)
3782 return _V0;
3783 else
3784 return _SizeList<_Values...>::template _S_at<_I - 1>();
3785 }
3786
3787 template <size_t _I>
3788 static constexpr auto _S_before(_SizeConstant<_I> = {})
3789 {
3790 if constexpr (_I == 0)
3791 return _SizeConstant<0>();
3792 else
3793 return _SizeConstant<
3794 _V0 + _SizeList<_Values...>::template _S_before<_I - 1>()>();
3795 }
3796
3797 template <size_t _Np>
3798 static constexpr auto _S_pop_front(_SizeConstant<_Np> = {})
3799 {
3800 if constexpr (_Np == 0)
3801 return _SizeList();
3802 else
3803 return _SizeList<_Values...>::template _S_pop_front<_Np - 1>();
3804 }
3805 };
3806
3807// }}}
3808// __extract_center {{{
3809template <typename _Tp, size_t _Np>
3810 _GLIBCXX_SIMD_INTRINSIC _SimdWrapper<_Tp, _Np / 2>
3811 __extract_center(_SimdWrapper<_Tp, _Np> __x)
3812 {
3813 static_assert(_Np >= 4);
3814 static_assert(_Np % 4 == 0); // x0 - x1 - x2 - x3 -> return {x1, x2}
3815#if _GLIBCXX_SIMD_X86INTRIN // {{{
3816 if constexpr (__have_avx512f && sizeof(_Tp) * _Np == 64)
3817 {
3818 const auto __intrin = __to_intrin(__x);
3819 if constexpr (is_integral_v<_Tp>)
3820 return __vector_bitcast<_Tp>(_mm512_castsi512_si256(
3821 _mm512_shuffle_i32x4(__intrin, __intrin,
3822 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3823 else if constexpr (sizeof(_Tp) == 4)
3824 return __vector_bitcast<_Tp>(_mm512_castps512_ps256(
3825 _mm512_shuffle_f32x4(__intrin, __intrin,
3826 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3827 else if constexpr (sizeof(_Tp) == 8)
3828 return __vector_bitcast<_Tp>(_mm512_castpd512_pd256(
3829 _mm512_shuffle_f64x2(__intrin, __intrin,
3830 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3831 else
3832 __assert_unreachable<_Tp>();
3833 }
3834 else if constexpr (sizeof(_Tp) * _Np == 32 && is_floating_point_v<_Tp>)
3835 return __vector_bitcast<_Tp>(
3836 _mm_shuffle_pd(__lo128(__vector_bitcast<double>(__x)),
3837 __hi128(__vector_bitcast<double>(__x)), 1));
3838 else if constexpr (sizeof(__x) == 32 && sizeof(_Tp) * _Np <= 32)
3839 return __vector_bitcast<_Tp>(
3840 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
3841 __lo128(__vector_bitcast<_LLong>(__x)),
3842 sizeof(_Tp) * _Np / 4));
3843 else
3844#endif // _GLIBCXX_SIMD_X86INTRIN }}}
3845 {
3846 __vector_type_t<_Tp, _Np / 2> __r;
3847 __builtin_memcpy(&__r,
3848 reinterpret_cast<const char*>(&__x)
3849 + sizeof(_Tp) * _Np / 4,
3850 sizeof(_Tp) * _Np / 2);
3851 return __r;
3852 }
3853 }
3854
3855template <typename _Tp, typename _A0, typename... _As>
3856 _GLIBCXX_SIMD_INTRINSIC
3857 _SimdWrapper<_Tp, _SimdTuple<_Tp, _A0, _As...>::_S_size() / 2>
3858 __extract_center(const _SimdTuple<_Tp, _A0, _As...>& __x)
3859 {
3860 if constexpr (sizeof...(_As) == 0)
3861 return __extract_center(__x.first);
3862 else
3863 return __extract_part<1, 4, 2>(__x);
3864 }
3865
3866// }}}
3867// __split_wrapper {{{
3868template <size_t... _Sizes, typename _Tp, typename... _As>
3869 auto
3870 __split_wrapper(_SizeList<_Sizes...>, const _SimdTuple<_Tp, _As...>& __x)
3871 {
3872 return split<_Sizes...>(
3873 fixed_size_simd<_Tp, _SimdTuple<_Tp, _As...>::_S_size()>(__private_init,
3874 __x));
3875 }
3876
3877// }}}
3878
3879// split<simd>(simd) {{{
3880template <typename _V, typename _Ap,
3881 size_t Parts = simd_size_v<typename _V::value_type, _Ap> / _V::size()>
3882 enable_if_t<simd_size_v<typename _V::value_type, _Ap> == Parts * _V::size()
3883 && is_simd_v<_V>, array<_V, Parts>>
3884 split(const simd<typename _V::value_type, _Ap>& __x)
3885 {
3886 using _Tp = typename _V::value_type;
3887 if constexpr (Parts == 1)
3888 {
3889 return {simd_cast<_V>(__x)};
3890 }
3891 else if (__x._M_is_constprop())
3892 {
3893 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3894 auto __i) constexpr {
3895 return _V([&](auto __j) constexpr {
3896 return __x[__i * _V::size() + __j];
3897 });
3898 });
3899 }
3900 else if constexpr (
3901 __is_fixed_size_abi_v<_Ap>
3902 && (is_same_v<typename _V::abi_type, simd_abi::scalar>
3903 || (__is_fixed_size_abi_v<typename _V::abi_type>
3904 && sizeof(_V) == sizeof(_Tp) * _V::size() // _V doesn't have padding
3905 )))
3906 {
3907 // fixed_size -> fixed_size (w/o padding) or scalar
3908#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
3909 const __may_alias<_Tp>* const __element_ptr
3910 = reinterpret_cast<const __may_alias<_Tp>*>(&__data(__x));
3911 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3912 auto __i) constexpr {
3913 return _V(__element_ptr + __i * _V::size(), vector_aligned);
3914 });
3915#else
3916 const auto& __xx = __data(__x);
3917 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3918 auto __i) constexpr {
3919 [[maybe_unused]] constexpr size_t __offset
3920 = decltype(__i)::value * _V::size();
3921 return _V([&](auto __j) constexpr {
3922 constexpr _SizeConstant<__j + __offset> __k;
3923 return __xx[__k];
3924 });
3925 });
3926#endif
3927 }
3928 else if constexpr (is_same_v<typename _V::abi_type, simd_abi::scalar>)
3929 {
3930 // normally memcpy should work here as well
3931 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3932 auto __i) constexpr { return __x[__i]; });
3933 }
3934 else
3935 {
3936 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3937 auto __i) constexpr {
3938 if constexpr (__is_fixed_size_abi_v<typename _V::abi_type>)
3939 return _V([&](auto __j) constexpr {
3940 return __x[__i * _V::size() + __j];
3941 });
3942 else
3943 return _V(__private_init,
3944 __extract_part<decltype(__i)::value, Parts>(__data(__x)));
3945 });
3946 }
3947 }
3948
3949// }}}
3950// split<simd_mask>(simd_mask) {{{
3951template <typename _V, typename _Ap,
3952 size_t _Parts
3953 = simd_size_v<typename _V::simd_type::value_type, _Ap> / _V::size()>
3954 enable_if_t<is_simd_mask_v<_V> && simd_size_v<typename
3955 _V::simd_type::value_type, _Ap> == _Parts * _V::size(), array<_V, _Parts>>
3956 split(const simd_mask<typename _V::simd_type::value_type, _Ap>& __x)
3957 {
3958 if constexpr (is_same_v<_Ap, typename _V::abi_type>)
3959 return {__x};
3960 else if constexpr (_Parts == 1)
3961 return {__proposed::static_simd_cast<_V>(__x)};
3962 else if constexpr (_Parts == 2 && __is_sse_abi<typename _V::abi_type>()
3963 && __is_avx_abi<_Ap>())
3964 return {_V(__private_init, __lo128(__data(__x))),
3965 _V(__private_init, __hi128(__data(__x)))};
3966 else if constexpr (_V::size() <= __CHAR_BIT__ * sizeof(_ULLong))
3967 {
3968 const bitset __bits = __x.__to_bitset();
3969 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
3970 auto __i) constexpr {
3971 constexpr size_t __offset = __i * _V::size();
3972 return _V(__bitset_init, (__bits >> __offset).to_ullong());
3973 });
3974 }
3975 else
3976 {
3977 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
3978 auto __i) constexpr {
3979 constexpr size_t __offset = __i * _V::size();
3980 return _V(
3981 __private_init, [&](auto __j) constexpr {
3982 return __x[__j + __offset];
3983 });
3984 });
3985 }
3986 }
3987
3988// }}}
3989// split<_Sizes...>(simd) {{{
3990template <size_t... _Sizes, typename _Tp, typename _Ap, typename>
3991 _GLIBCXX_SIMD_ALWAYS_INLINE
3992 tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
3993 split(const simd<_Tp, _Ap>& __x)
3994 {
3995 using _SL = _SizeList<_Sizes...>;
3996 using _Tuple = tuple<__deduced_simd<_Tp, _Sizes>...>;
3997 constexpr size_t _Np = simd_size_v<_Tp, _Ap>;
3998 constexpr size_t _N0 = _SL::template _S_at<0>();
3999 using _V = __deduced_simd<_Tp, _N0>;
4000
4001 if (__x._M_is_constprop())
4002 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4003 auto __i) constexpr {
4004 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4005 constexpr size_t __offset = _SL::_S_before(__i);
4006 return _Vi([&](auto __j) constexpr { return __x[__offset + __j]; });
4007 });
4008 else if constexpr (_Np == _N0)
4009 {
4010 static_assert(sizeof...(_Sizes) == 1);
4011 return {simd_cast<_V>(__x)};
4012 }
4013 else if constexpr // split from fixed_size, such that __x::first.size == _N0
4014 (__is_fixed_size_abi_v<
4015 _Ap> && __fixed_size_storage_t<_Tp, _Np>::_S_first_size == _N0)
4016 {
4017 static_assert(
4018 !__is_fixed_size_abi_v<typename _V::abi_type>,
4019 "How can <_Tp, _Np> be __a single _SimdTuple entry but __a "
4020 "fixed_size_simd "
4021 "when deduced?");
4022 // extract first and recurse (__split_wrapper is needed to deduce a new
4023 // _Sizes pack)
4024 return tuple_cat(make_tuple(_V(__private_init, __data(__x).first)),
4025 __split_wrapper(_SL::template _S_pop_front<1>(),
4026 __data(__x).second));
4027 }
4028 else if constexpr ((!is_same_v<simd_abi::scalar,
4029 simd_abi::deduce_t<_Tp, _Sizes>> && ...)
4030 && (!__is_fixed_size_abi_v<
4031 simd_abi::deduce_t<_Tp, _Sizes>> && ...))
4032 {
4033 if constexpr (((_Sizes * 2 == _Np) && ...))
4034 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4035 {__private_init, __extract_part<1, 2>(__data(__x))}};
4036 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4037 _SizeList<_Np / 3, _Np / 3, _Np / 3>>)
4038 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4039 {__private_init, __extract_part<1, 3>(__data(__x))},
4040 {__private_init, __extract_part<2, 3>(__data(__x))}};
4041 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4042 _SizeList<2 * _Np / 3, _Np / 3>>)
4043 return {{__private_init, __extract_part<0, 3, 2>(__data(__x))},
4044 {__private_init, __extract_part<2, 3>(__data(__x))}};
4045 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4046 _SizeList<_Np / 3, 2 * _Np / 3>>)
4047 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4048 {__private_init, __extract_part<1, 3, 2>(__data(__x))}};
4049 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4050 _SizeList<_Np / 2, _Np / 4, _Np / 4>>)
4051 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4052 {__private_init, __extract_part<2, 4>(__data(__x))},
4053 {__private_init, __extract_part<3, 4>(__data(__x))}};
4054 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4055 _SizeList<_Np / 4, _Np / 4, _Np / 2>>)
4056 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4057 {__private_init, __extract_part<1, 4>(__data(__x))},
4058 {__private_init, __extract_part<1, 2>(__data(__x))}};
4059 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4060 _SizeList<_Np / 4, _Np / 2, _Np / 4>>)
4061 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4062 {__private_init, __extract_center(__data(__x))},
4063 {__private_init, __extract_part<3, 4>(__data(__x))}};
4064 else if constexpr (((_Sizes * 4 == _Np) && ...))
4065 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4066 {__private_init, __extract_part<1, 4>(__data(__x))},
4067 {__private_init, __extract_part<2, 4>(__data(__x))},
4068 {__private_init, __extract_part<3, 4>(__data(__x))}};
4069 // else fall through
4070 }
4071#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
4072 const __may_alias<_Tp>* const __element_ptr
4073 = reinterpret_cast<const __may_alias<_Tp>*>(&__x);
4074 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4075 auto __i) constexpr {
4076 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4077 constexpr size_t __offset = _SL::_S_before(__i);
4078 constexpr size_t __base_align = alignof(simd<_Tp, _Ap>);
4079 constexpr size_t __a
4080 = __base_align - ((__offset * sizeof(_Tp)) % __base_align);
4081 constexpr size_t __b = ((__a - 1) & __a) ^ __a;
4082 constexpr size_t __alignment = __b == 0 ? __a : __b;
4083 return _Vi(__element_ptr + __offset, overaligned<__alignment>);
4084 });
4085#else
4086 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4087 auto __i) constexpr {
4088 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4089 const auto& __xx = __data(__x);
4090 using _Offset = decltype(_SL::_S_before(__i));
4091 return _Vi([&](auto __j) constexpr {
4092 constexpr _SizeConstant<_Offset::value + __j> __k;
4093 return __xx[__k];
4094 });
4095 });
4096#endif
4097 }
4098
4099// }}}
4100
4101// __subscript_in_pack {{{
4102template <size_t _I, typename _Tp, typename _Ap, typename... _As>
4103 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
4104 __subscript_in_pack(const simd<_Tp, _Ap>& __x, const simd<_Tp, _As>&... __xs)
4105 {
4106 if constexpr (_I < simd_size_v<_Tp, _Ap>)
4107 return __x[_I];
4108 else
4109 return __subscript_in_pack<_I - simd_size_v<_Tp, _Ap>>(__xs...);
4110 }
4111
4112// }}}
4113// __store_pack_of_simd {{{
4114template <typename _Tp, typename _A0, typename... _As>
4115 _GLIBCXX_SIMD_INTRINSIC void
4116 __store_pack_of_simd(char* __mem, const simd<_Tp, _A0>& __x0,
4117 const simd<_Tp, _As>&... __xs)
4118 {
4119 constexpr size_t __n_bytes = sizeof(_Tp) * simd_size_v<_Tp, _A0>;
4120 __builtin_memcpy(__mem, &__data(__x0), __n_bytes);
4121 if constexpr (sizeof...(__xs) > 0)
4122 __store_pack_of_simd(__mem + __n_bytes, __xs...);
4123 }
4124
4125// }}}
4126// concat(simd...) {{{
4127template <typename _Tp, typename... _As, typename = __detail::__odr_helper>
4128 inline _GLIBCXX_SIMD_CONSTEXPR
4129 simd<_Tp, simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>
4130 concat(const simd<_Tp, _As>&... __xs)
4131 {
4132 using _Rp = __deduced_simd<_Tp, (simd_size_v<_Tp, _As> + ...)>;
4133 if constexpr (sizeof...(__xs) == 1)
4134 return simd_cast<_Rp>(__xs...);
4135 else if ((... && __xs._M_is_constprop()))
4136 return simd<_Tp,
4137 simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>([&](
4138 auto __i) constexpr { return __subscript_in_pack<__i>(__xs...); });
4139 else
4140 {
4141 _Rp __r{};
4142 __store_pack_of_simd(reinterpret_cast<char*>(&__data(__r)), __xs...);
4143 return __r;
4144 }
4145 }
4146
4147// }}}
4148// concat(array<simd>) {{{
4149template <typename _Tp, typename _Abi, size_t _Np>
4150 _GLIBCXX_SIMD_ALWAYS_INLINE
4151 _GLIBCXX_SIMD_CONSTEXPR __deduced_simd<_Tp, simd_size_v<_Tp, _Abi> * _Np>
4152 concat(const array<simd<_Tp, _Abi>, _Np>& __x)
4153 {
4154 return __call_with_subscripts<_Np>(__x, [](const auto&... __xs) {
4155 return concat(__xs...);
4156 });
4157 }
4158
4159// }}}
4160
4161/// @cond undocumented
4162// _SmartReference {{{
4163template <typename _Up, typename _Accessor = _Up,
4164 typename _ValueType = typename _Up::value_type>
4165 class _SmartReference
4166 {
4167 friend _Accessor;
4168 int _M_index;
4169 _Up& _M_obj;
4170
4171 _GLIBCXX_SIMD_INTRINSIC constexpr _ValueType _M_read() const noexcept
4172 {
4173 if constexpr (is_arithmetic_v<_Up>)
4174 return _M_obj;
4175 else
4176 return _M_obj[_M_index];
4177 }
4178
4179 template <typename _Tp>
4180 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_write(_Tp&& __x) const
4181 { _Accessor::_S_set(_M_obj, _M_index, static_cast<_Tp&&>(__x)); }
4182
4183 public:
4184 _GLIBCXX_SIMD_INTRINSIC constexpr
4185 _SmartReference(_Up& __o, int __i) noexcept
4186 : _M_index(__i), _M_obj(__o) {}
4187
4188 using value_type = _ValueType;
4189
4190 _GLIBCXX_SIMD_INTRINSIC _SmartReference(const _SmartReference&) = delete;
4191
4192 _GLIBCXX_SIMD_INTRINSIC constexpr operator value_type() const noexcept
4193 { return _M_read(); }
4194
4195 template <typename _Tp,
4196 typename
4197 = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, value_type>>
4198 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator=(_Tp&& __x) &&
4199 {
4200 _M_write(static_cast<_Tp&&>(__x));
4201 return {_M_obj, _M_index};
4202 }
4203
4204#define _GLIBCXX_SIMD_OP_(__op) \
4205 template <typename _Tp, \
4206 typename _TT \
4207 = decltype(declval<value_type>() __op declval<_Tp>()), \
4208 typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, _TT>, \
4209 typename = _ValuePreservingOrInt<_TT, value_type>> \
4210 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference \
4211 operator __op##=(_Tp&& __x) && \
4212 { \
4213 const value_type& __lhs = _M_read(); \
4214 _M_write(__lhs __op __x); \
4215 return {_M_obj, _M_index}; \
4216 }
4217 _GLIBCXX_SIMD_ALL_ARITHMETICS(_GLIBCXX_SIMD_OP_);
4218 _GLIBCXX_SIMD_ALL_SHIFTS(_GLIBCXX_SIMD_OP_);
4219 _GLIBCXX_SIMD_ALL_BINARY(_GLIBCXX_SIMD_OP_);
4220#undef _GLIBCXX_SIMD_OP_
4221
4222 template <typename _Tp = void,
4223 typename
4224 = decltype(++declval<conditional_t<true, value_type, _Tp>&>())>
4225 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator++() &&
4226 {
4227 value_type __x = _M_read();
4228 _M_write(++__x);
4229 return {_M_obj, _M_index};
4230 }
4231
4232 template <typename _Tp = void,
4233 typename
4234 = decltype(declval<conditional_t<true, value_type, _Tp>&>()++)>
4235 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator++(int) &&
4236 {
4237 const value_type __r = _M_read();
4238 value_type __x = __r;
4239 _M_write(++__x);
4240 return __r;
4241 }
4242
4243 template <typename _Tp = void,
4244 typename
4245 = decltype(--declval<conditional_t<true, value_type, _Tp>&>())>
4246 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator--() &&
4247 {
4248 value_type __x = _M_read();
4249 _M_write(--__x);
4250 return {_M_obj, _M_index};
4251 }
4252
4253 template <typename _Tp = void,
4254 typename
4255 = decltype(declval<conditional_t<true, value_type, _Tp>&>()--)>
4256 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator--(int) &&
4257 {
4258 const value_type __r = _M_read();
4259 value_type __x = __r;
4260 _M_write(--__x);
4261 return __r;
4262 }
4263
4264 _GLIBCXX_SIMD_INTRINSIC friend void
4265 swap(_SmartReference&& __a, _SmartReference&& __b) noexcept(
4266 conjunction<
4267 is_nothrow_constructible<value_type, _SmartReference&&>,
4268 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4269 {
4270 value_type __tmp = static_cast<_SmartReference&&>(__a);
4271 static_cast<_SmartReference&&>(__a) = static_cast<value_type>(__b);
4272 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4273 }
4274
4275 _GLIBCXX_SIMD_INTRINSIC friend void
4276 swap(value_type& __a, _SmartReference&& __b) noexcept(
4277 conjunction<
4278 is_nothrow_constructible<value_type, value_type&&>,
4279 is_nothrow_assignable<value_type&, value_type&&>,
4280 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4281 {
4282 value_type __tmp(std::move(__a));
4283 __a = static_cast<value_type>(__b);
4284 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4285 }
4286
4287 _GLIBCXX_SIMD_INTRINSIC friend void
4288 swap(_SmartReference&& __a, value_type& __b) noexcept(
4289 conjunction<
4290 is_nothrow_constructible<value_type, _SmartReference&&>,
4291 is_nothrow_assignable<value_type&, value_type&&>,
4292 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4293 {
4294 value_type __tmp(__a);
4295 static_cast<_SmartReference&&>(__a) = std::move(__b);
4296 __b = std::move(__tmp);
4297 }
4298 };
4299
4300// }}}
4301// __scalar_abi_wrapper {{{
4302template <int _Bytes>
4303 struct __scalar_abi_wrapper
4304 {
4305 template <typename _Tp> static constexpr size_t _S_full_size = 1;
4306 template <typename _Tp> static constexpr size_t _S_size = 1;
4307 template <typename _Tp> static constexpr size_t _S_is_partial = false;
4308
4309 template <typename _Tp, typename _Abi = simd_abi::scalar>
4310 static constexpr bool _S_is_valid_v
4311 = _Abi::template _IsValid<_Tp>::value && sizeof(_Tp) == _Bytes;
4312 };
4313
4314// }}}
4315// __decay_abi metafunction {{{
4316template <typename _Tp>
4317 struct __decay_abi { using type = _Tp; };
4318
4319template <int _Bytes>
4320 struct __decay_abi<__scalar_abi_wrapper<_Bytes>>
4321 { using type = simd_abi::scalar; };
4322
4323// }}}
4324// __find_next_valid_abi metafunction {{{1
4325// Given an ABI tag A<N>, find an N2 < N such that A<N2>::_S_is_valid_v<_Tp> ==
4326// true, N2 is a power-of-2, and A<N2>::_S_is_partial<_Tp> is false. Break
4327// recursion at 2 elements in the resulting ABI tag. In this case
4328// type::_S_is_valid_v<_Tp> may be false.
4329template <template <int> class _Abi, int _Bytes, typename _Tp>
4330 struct __find_next_valid_abi
4331 {
4332 static constexpr auto _S_choose()
4333 {
4334 constexpr int _NextBytes = std::__bit_ceil(_Bytes) / 2;
4335 using _NextAbi = _Abi<_NextBytes>;
4336 if constexpr (_NextBytes < sizeof(_Tp) * 2) // break recursion
4337 return _Abi<_Bytes>();
4338 else if constexpr (_NextAbi::template _S_is_partial<_Tp> == false
4339 && _NextAbi::template _S_is_valid_v<_Tp>)
4340 return _NextAbi();
4341 else
4342 return __find_next_valid_abi<_Abi, _NextBytes, _Tp>::_S_choose();
4343 }
4344
4345 using type = decltype(_S_choose());
4346 };
4347
4348template <int _Bytes, typename _Tp>
4349 struct __find_next_valid_abi<__scalar_abi_wrapper, _Bytes, _Tp>
4350 { using type = simd_abi::scalar; };
4351
4352// _AbiList {{{1
4353template <template <int> class...>
4354 struct _AbiList
4355 {
4356 template <typename, int> static constexpr bool _S_has_valid_abi = false;
4357 template <typename, int> using _FirstValidAbi = void;
4358 template <typename, int> using _BestAbi = void;
4359 };
4360
4361template <template <int> class _A0, template <int> class... _Rest>
4362 struct _AbiList<_A0, _Rest...>
4363 {
4364 template <typename _Tp, int _Np>
4365 static constexpr bool _S_has_valid_abi
4366 = _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<
4367 _Tp> || _AbiList<_Rest...>::template _S_has_valid_abi<_Tp, _Np>;
4368
4369 template <typename _Tp, int _Np>
4370 using _FirstValidAbi = conditional_t<
4371 _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<_Tp>,
4372 typename __decay_abi<_A0<sizeof(_Tp) * _Np>>::type,
4373 typename _AbiList<_Rest...>::template _FirstValidAbi<_Tp, _Np>>;
4374
4375 template <typename _Tp, int _Np>
4376 static constexpr auto _S_determine_best_abi()
4377 {
4378 static_assert(_Np >= 1);
4379 constexpr int _Bytes = sizeof(_Tp) * _Np;
4380 if constexpr (_Np == 1)
4381 return __make_dependent_t<_Tp, simd_abi::scalar>{};
4382 else
4383 {
4384 constexpr int __fullsize = _A0<_Bytes>::template _S_full_size<_Tp>;
4385 // _A0<_Bytes> is good if:
4386 // 1. The ABI tag is valid for _Tp
4387 // 2. The storage overhead is no more than padding to fill the next
4388 // power-of-2 number of bytes
4389 if constexpr (_A0<_Bytes>::template _S_is_valid_v<
4390 _Tp> && __fullsize / 2 < _Np)
4391 return typename __decay_abi<_A0<_Bytes>>::type{};
4392 else
4393 {
4394 using _Bp =
4395 typename __find_next_valid_abi<_A0, _Bytes, _Tp>::type;
4396 if constexpr (_Bp::template _S_is_valid_v<
4397 _Tp> && _Bp::template _S_size<_Tp> <= _Np)
4398 return _Bp{};
4399 else
4400 return
4401 typename _AbiList<_Rest...>::template _BestAbi<_Tp, _Np>{};
4402 }
4403 }
4404 }
4405
4406 template <typename _Tp, int _Np>
4407 using _BestAbi = decltype(_S_determine_best_abi<_Tp, _Np>());
4408 };
4409
4410// }}}1
4411
4412// the following lists all native ABIs, which makes them accessible to
4413// simd_abi::deduce and select_best_vector_type_t (for fixed_size). Order
4414// matters: Whatever comes first has higher priority.
4415using _AllNativeAbis = _AbiList<simd_abi::_VecBltnBtmsk, simd_abi::_VecBuiltin,
4416 __scalar_abi_wrapper>;
4417
4418// valid _SimdTraits specialization {{{1
4419template <typename _Tp, typename _Abi>
4420 struct _SimdTraits<_Tp, _Abi, void_t<typename _Abi::template _IsValid<_Tp>>>
4421 : _Abi::template __traits<_Tp> {};
4422
4423// __deduce_impl specializations {{{1
4424// try all native ABIs (including scalar) first
4425template <typename _Tp, size_t _Np>
4426 struct __deduce_impl<
4427 _Tp, _Np, enable_if_t<_AllNativeAbis::template _S_has_valid_abi<_Tp, _Np>>>
4428 { using type = _AllNativeAbis::_FirstValidAbi<_Tp, _Np>; };
4429
4430// fall back to fixed_size only if scalar and native ABIs don't match
4431template <typename _Tp, size_t _Np, typename = void>
4432 struct __deduce_fixed_size_fallback {};
4433
4434template <typename _Tp, size_t _Np>
4435 struct __deduce_fixed_size_fallback<_Tp, _Np,
4436 enable_if_t<simd_abi::fixed_size<_Np>::template _S_is_valid_v<_Tp>>>
4437 { using type = simd_abi::fixed_size<_Np>; };
4438
4439template <typename _Tp, size_t _Np, typename>
4440 struct __deduce_impl : public __deduce_fixed_size_fallback<_Tp, _Np> {};
4441
4442//}}}1
4443/// @endcond
4444
4445// simd_mask {{{
4446template <typename _Tp, typename _Abi>
4447 class simd_mask : public _SimdTraits<_Tp, _Abi>::_MaskBase
4448 {
4449 // types, tags, and friends {{{
4450 using _Traits = _SimdTraits<_Tp, _Abi>;
4451 using _MemberType = typename _Traits::_MaskMember;
4452
4453 // We map all masks with equal element sizeof to a single integer type, the
4454 // one given by __int_for_sizeof_t<_Tp>. This is the approach
4455 // [[gnu::vector_size(N)]] types take as well and it reduces the number of
4456 // template specializations in the implementation classes.
4457 using _Ip = __int_for_sizeof_t<_Tp>;
4458 static constexpr _Ip* _S_type_tag = nullptr;
4459
4460 friend typename _Traits::_MaskBase;
4461 friend class simd<_Tp, _Abi>; // to construct masks on return
4462 friend typename _Traits::_SimdImpl; // to construct masks on return and
4463 // inspect data on masked operations
4464 public:
4465 using _Impl = typename _Traits::_MaskImpl;
4466 friend _Impl;
4467
4468 // }}}
4469 // member types {{{
4470 using value_type = bool;
4471 using reference = _SmartReference<_MemberType, _Impl, value_type>;
4472 using simd_type = simd<_Tp, _Abi>;
4473 using abi_type = _Abi;
4474
4475 // }}}
4476 static constexpr size_t size() // {{{
4477 { return __size_or_zero_v<_Tp, _Abi>; }
4478
4479 // }}}
4480 // constructors & assignment {{{
4481 simd_mask() = default;
4482 simd_mask(const simd_mask&) = default;
4483 simd_mask(simd_mask&&) = default;
4484 simd_mask& operator=(const simd_mask&) = default;
4485 simd_mask& operator=(simd_mask&&) = default;
4486
4487 // }}}
4488 // access to internal representation (optional feature) {{{
4489 _GLIBCXX_SIMD_ALWAYS_INLINE explicit
4490 simd_mask(typename _Traits::_MaskCastType __init)
4491 : _M_data{__init} {}
4492 // conversions to internal type is done in _MaskBase
4493
4494 // }}}
4495 // bitset interface (extension to be proposed) {{{
4496 // TS_FEEDBACK:
4497 // Conversion of simd_mask to and from bitset makes it much easier to
4498 // interface with other facilities. I suggest adding `static
4499 // simd_mask::from_bitset` and `simd_mask::to_bitset`.
4500 _GLIBCXX_SIMD_ALWAYS_INLINE static simd_mask
4501 __from_bitset(bitset<size()> bs)
4502 { return {__bitset_init, bs}; }
4503
4504 _GLIBCXX_SIMD_ALWAYS_INLINE bitset<size()>
4505 __to_bitset() const
4506 { return _Impl::_S_to_bits(_M_data)._M_to_bitset(); }
4507
4508 // }}}
4509 // explicit broadcast constructor {{{
4510 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
4511 simd_mask(value_type __x)
4512 : _M_data(_Impl::template _S_broadcast<_Ip>(__x)) {}
4513
4514 // }}}
4515 // implicit type conversion constructor {{{
4516 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4517 // proposed improvement
4518 template <typename _Up, typename _A2,
4519 typename = enable_if_t<simd_size_v<_Up, _A2> == size()>>
4520 _GLIBCXX_SIMD_ALWAYS_INLINE explicit(sizeof(_MemberType)
4521 != sizeof(typename _SimdTraits<_Up, _A2>::_MaskMember))
4522 simd_mask(const simd_mask<_Up, _A2>& __x)
4523 : simd_mask(__proposed::static_simd_cast<simd_mask>(__x)) {}
4524 #else
4525 // conforming to ISO/IEC 19570:2018
4526 template <typename _Up, typename = enable_if_t<conjunction<
4527 is_same<abi_type, simd_abi::fixed_size<size()>>,
4528 is_same<_Up, _Up>>::value>>
4529 _GLIBCXX_SIMD_ALWAYS_INLINE
4530 simd_mask(const simd_mask<_Up, simd_abi::fixed_size<size()>>& __x)
4531 : _M_data(_Impl::_S_from_bitmask(__data(__x), _S_type_tag)) {}
4532 #endif
4533
4534 // }}}
4535 // load constructor {{{
4536 template <typename _Flags>
4537 _GLIBCXX_SIMD_ALWAYS_INLINE
4538 simd_mask(const value_type* __mem, _Flags)
4539 : _M_data(_Impl::template _S_load<_Ip>(
4540 _Flags::template _S_apply<simd_mask>(__mem))) {}
4541
4542 template <typename _Flags>
4543 _GLIBCXX_SIMD_ALWAYS_INLINE
4544 simd_mask(const value_type* __mem, simd_mask __k, _Flags)
4545 : _M_data{}
4546 {
4547 _M_data
4548 = _Impl::_S_masked_load(_M_data, __k._M_data,
4549 _Flags::template _S_apply<simd_mask>(__mem));
4550 }
4551
4552 // }}}
4553 // loads [simd_mask.load] {{{
4554 template <typename _Flags>
4555 _GLIBCXX_SIMD_ALWAYS_INLINE void
4556 copy_from(const value_type* __mem, _Flags)
4557 {
4558 _M_data = _Impl::template _S_load<_Ip>(
4559 _Flags::template _S_apply<simd_mask>(__mem));
4560 }
4561
4562 // }}}
4563 // stores [simd_mask.store] {{{
4564 template <typename _Flags>
4565 _GLIBCXX_SIMD_ALWAYS_INLINE void
4566 copy_to(value_type* __mem, _Flags) const
4567 { _Impl::_S_store(_M_data, _Flags::template _S_apply<simd_mask>(__mem)); }
4568
4569 // }}}
4570 // scalar access {{{
4571 _GLIBCXX_SIMD_ALWAYS_INLINE reference
4572 operator[](size_t __i)
4573 {
4574 if (__i >= size())
4575 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4576 return {_M_data, int(__i)};
4577 }
4578
4579 _GLIBCXX_SIMD_ALWAYS_INLINE value_type
4580 operator[](size_t __i) const
4581 {
4582 if (__i >= size())
4583 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4584 if constexpr (__is_scalar_abi<_Abi>())
4585 return _M_data;
4586 else
4587 return static_cast<bool>(_M_data[__i]);
4588 }
4589
4590 // }}}
4591 // negation {{{
4592 _GLIBCXX_SIMD_ALWAYS_INLINE simd_mask
4593 operator!() const
4594 { return {__private_init, _Impl::_S_bit_not(_M_data)}; }
4595
4596 // }}}
4597 // simd_mask binary operators [simd_mask.binary] {{{
4598 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4599 // simd_mask<int> && simd_mask<uint> needs disambiguation
4600 template <typename _Up, typename _A2,
4601 typename
4602 = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4603 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4604 operator&&(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4605 {
4606 return {__private_init,
4607 _Impl::_S_logical_and(__x._M_data, simd_mask(__y)._M_data)};
4608 }
4609
4610 template <typename _Up, typename _A2,
4611 typename
4612 = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4613 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4614 operator||(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4615 {
4616 return {__private_init,
4617 _Impl::_S_logical_or(__x._M_data, simd_mask(__y)._M_data)};
4618 }
4619 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4620
4621 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4622 operator&&(const simd_mask& __x, const simd_mask& __y)
4623 {
4624 return {__private_init, _Impl::_S_logical_and(__x._M_data, __y._M_data)};
4625 }
4626
4627 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4628 operator||(const simd_mask& __x, const simd_mask& __y)
4629 {
4630 return {__private_init, _Impl::_S_logical_or(__x._M_data, __y._M_data)};
4631 }
4632
4633 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4634 operator&(const simd_mask& __x, const simd_mask& __y)
4635 { return {__private_init, _Impl::_S_bit_and(__x._M_data, __y._M_data)}; }
4636
4637 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4638 operator|(const simd_mask& __x, const simd_mask& __y)
4639 { return {__private_init, _Impl::_S_bit_or(__x._M_data, __y._M_data)}; }
4640
4641 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4642 operator^(const simd_mask& __x, const simd_mask& __y)
4643 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4644
4645 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4646 operator&=(simd_mask& __x, const simd_mask& __y)
4647 {
4648 __x._M_data = _Impl::_S_bit_and(__x._M_data, __y._M_data);
4649 return __x;
4650 }
4651
4652 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4653 operator|=(simd_mask& __x, const simd_mask& __y)
4654 {
4655 __x._M_data = _Impl::_S_bit_or(__x._M_data, __y._M_data);
4656 return __x;
4657 }
4658
4659 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4660 operator^=(simd_mask& __x, const simd_mask& __y)
4661 {
4662 __x._M_data = _Impl::_S_bit_xor(__x._M_data, __y._M_data);
4663 return __x;
4664 }
4665
4666 // }}}
4667 // simd_mask compares [simd_mask.comparison] {{{
4668 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4669 operator==(const simd_mask& __x, const simd_mask& __y)
4670 { return !operator!=(__x, __y); }
4671
4672 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4673 operator!=(const simd_mask& __x, const simd_mask& __y)
4674 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4675
4676 // }}}
4677 // private_init ctor {{{
4678 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
4679 simd_mask(_PrivateInit, typename _Traits::_MaskMember __init)
4680 : _M_data(__init) {}
4681
4682 // }}}
4683 // private_init generator ctor {{{
4684 template <typename _Fp, typename = decltype(bool(declval<_Fp>()(size_t())))>
4685 _GLIBCXX_SIMD_INTRINSIC constexpr
4686 simd_mask(_PrivateInit, _Fp&& __gen)
4687 : _M_data()
4688 {
4689 __execute_n_times<size()>([&](auto __i) constexpr {
4690 _Impl::_S_set(_M_data, __i, __gen(__i));
4691 });
4692 }
4693
4694 // }}}
4695 // bitset_init ctor {{{
4696 _GLIBCXX_SIMD_INTRINSIC simd_mask(_BitsetInit, bitset<size()> __init)
4697 : _M_data(
4698 _Impl::_S_from_bitmask(_SanitizedBitMask<size()>(__init), _S_type_tag))
4699 {}
4700
4701 // }}}
4702 // __cvt {{{
4703 // TS_FEEDBACK:
4704 // The conversion operator this implements should be a ctor on simd_mask.
4705 // Once you call .__cvt() on a simd_mask it converts conveniently.
4706 // A useful variation: add `explicit(sizeof(_Tp) != sizeof(_Up))`
4707 struct _CvtProxy
4708 {
4709 template <typename _Up, typename _A2,
4710 typename
4711 = enable_if_t<simd_size_v<_Up, _A2> == simd_size_v<_Tp, _Abi>>>
4712 _GLIBCXX_SIMD_ALWAYS_INLINE
4713 operator simd_mask<_Up, _A2>() &&
4714 {
4715 using namespace std::experimental::__proposed;
4716 return static_simd_cast<simd_mask<_Up, _A2>>(_M_data);
4717 }
4718
4719 const simd_mask<_Tp, _Abi>& _M_data;
4720 };
4721
4722 _GLIBCXX_SIMD_INTRINSIC _CvtProxy
4723 __cvt() const
4724 { return {*this}; }
4725
4726 // }}}
4727 // operator?: overloads (suggested extension) {{{
4728 #ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
4729 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4730 operator?:(const simd_mask& __k, const simd_mask& __where_true,
4731 const simd_mask& __where_false)
4732 {
4733 auto __ret = __where_false;
4734 _Impl::_S_masked_assign(__k._M_data, __ret._M_data, __where_true._M_data);
4735 return __ret;
4736 }
4737
4738 template <typename _U1, typename _U2,
4739 typename _Rp = simd<common_type_t<_U1, _U2>, _Abi>,
4740 typename = enable_if_t<conjunction_v<
4741 is_convertible<_U1, _Rp>, is_convertible<_U2, _Rp>,
4742 is_convertible<simd_mask, typename _Rp::mask_type>>>>
4743 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend _Rp
4744 operator?:(const simd_mask& __k, const _U1& __where_true,
4745 const _U2& __where_false)
4746 {
4747 _Rp __ret = __where_false;
4748 _Rp::_Impl::_S_masked_assign(
4749 __data(static_cast<typename _Rp::mask_type>(__k)), __data(__ret),
4750 __data(static_cast<_Rp>(__where_true)));
4751 return __ret;
4752 }
4753
4754 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4755 template <typename _Kp, typename _Ak, typename _Up, typename _Au,
4756 typename = enable_if_t<
4757 conjunction_v<is_convertible<simd_mask<_Kp, _Ak>, simd_mask>,
4758 is_convertible<simd_mask<_Up, _Au>, simd_mask>>>>
4759 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4760 operator?:(const simd_mask<_Kp, _Ak>& __k, const simd_mask& __where_true,
4761 const simd_mask<_Up, _Au>& __where_false)
4762 {
4763 simd_mask __ret = __where_false;
4764 _Impl::_S_masked_assign(simd_mask(__k)._M_data, __ret._M_data,
4765 __where_true._M_data);
4766 return __ret;
4767 }
4768 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4769 #endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
4770
4771 // }}}
4772 // _M_is_constprop {{{
4773 _GLIBCXX_SIMD_INTRINSIC constexpr bool
4774 _M_is_constprop() const
4775 {
4776 if constexpr (__is_scalar_abi<_Abi>())
4777 return __builtin_constant_p(_M_data);
4778 else
4779 return _M_data._M_is_constprop();
4780 }
4781
4782 // }}}
4783
4784 private:
4785 friend const auto& __data<_Tp, abi_type>(const simd_mask&);
4786 friend auto& __data<_Tp, abi_type>(simd_mask&);
4787 alignas(_Traits::_S_mask_align) _MemberType _M_data;
4788 };
4789
4790// }}}
4791
4792/// @cond undocumented
4793// __data(simd_mask) {{{
4794template <typename _Tp, typename _Ap>
4795 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
4796 __data(const simd_mask<_Tp, _Ap>& __x)
4797 { return __x._M_data; }
4798
4799template <typename _Tp, typename _Ap>
4800 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
4801 __data(simd_mask<_Tp, _Ap>& __x)
4802 { return __x._M_data; }
4803
4804// }}}
4805/// @endcond
4806
4807// simd_mask reductions [simd_mask.reductions] {{{
4808template <typename _Tp, typename _Abi>
4809 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4810 all_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4811 {
4812 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4813 {
4814 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4815 if (!__k[__i])
4816 return false;
4817 return true;
4818 }
4819 else
4820 return _Abi::_MaskImpl::_S_all_of(__k);
4821 }
4822
4823template <typename _Tp, typename _Abi>
4824 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4825 any_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4826 {
4827 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4828 {
4829 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4830 if (__k[__i])
4831 return true;
4832 return false;
4833 }
4834 else
4835 return _Abi::_MaskImpl::_S_any_of(__k);
4836 }
4837
4838template <typename _Tp, typename _Abi>
4839 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4840 none_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4841 {
4842 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4843 {
4844 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4845 if (__k[__i])
4846 return false;
4847 return true;
4848 }
4849 else
4850 return _Abi::_MaskImpl::_S_none_of(__k);
4851 }
4852
4853template <typename _Tp, typename _Abi>
4854 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4855 some_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4856 {
4857 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4858 {
4859 for (size_t __i = 1; __i < simd_size_v<_Tp, _Abi>; ++__i)
4860 if (__k[__i] != __k[__i - 1])
4861 return true;
4862 return false;
4863 }
4864 else
4865 return _Abi::_MaskImpl::_S_some_of(__k);
4866 }
4867
4868template <typename _Tp, typename _Abi>
4869 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4870 popcount(const simd_mask<_Tp, _Abi>& __k) noexcept
4871 {
4872 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4873 {
4874 const int __r = __call_with_subscripts<simd_size_v<_Tp, _Abi>>(
4875 __k, [](auto... __elements) { return ((__elements != 0) + ...); });
4876 if (__builtin_is_constant_evaluated() || __builtin_constant_p(__r))
4877 return __r;
4878 }
4879 return _Abi::_MaskImpl::_S_popcount(__k);
4880 }
4881
4882template <typename _Tp, typename _Abi>
4883 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4884 find_first_set(const simd_mask<_Tp, _Abi>& __k)
4885 {
4886 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4887 {
4888 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4889 const size_t _Idx = __call_with_n_evaluations<_Np>(
4890 [](auto... __indexes) { return std::min({__indexes...}); },
4891 [&](auto __i) { return __k[__i] ? +__i : _Np; });
4892 if (_Idx >= _Np)
4893 __invoke_ub("find_first_set(empty mask) is UB");
4894 if (__builtin_constant_p(_Idx))
4895 return _Idx;
4896 }
4897 return _Abi::_MaskImpl::_S_find_first_set(__k);
4898 }
4899
4900template <typename _Tp, typename _Abi>
4901 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4902 find_last_set(const simd_mask<_Tp, _Abi>& __k)
4903 {
4904 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4905 {
4906 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4907 const int _Idx = __call_with_n_evaluations<_Np>(
4908 [](auto... __indexes) { return std::max({__indexes...}); },
4909 [&](auto __i) { return __k[__i] ? int(__i) : -1; });
4910 if (_Idx < 0)
4911 __invoke_ub("find_first_set(empty mask) is UB");
4912 if (__builtin_constant_p(_Idx))
4913 return _Idx;
4914 }
4915 return _Abi::_MaskImpl::_S_find_last_set(__k);
4916 }
4917
4918_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4919all_of(_ExactBool __x) noexcept
4920{ return __x; }
4921
4922_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4923any_of(_ExactBool __x) noexcept
4924{ return __x; }
4925
4926_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4927none_of(_ExactBool __x) noexcept
4928{ return !__x; }
4929
4930_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4931some_of(_ExactBool) noexcept
4932{ return false; }
4933
4934_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4935popcount(_ExactBool __x) noexcept
4936{ return __x; }
4937
4938_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4939find_first_set(_ExactBool)
4940{ return 0; }
4941
4942_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4943find_last_set(_ExactBool)
4944{ return 0; }
4945
4946// }}}
4947
4948/// @cond undocumented
4949// _SimdIntOperators{{{1
4950template <typename _V, typename _Tp, typename _Abi, bool>
4951 class _SimdIntOperators {};
4952
4953template <typename _V, typename _Tp, typename _Abi>
4954 class _SimdIntOperators<_V, _Tp, _Abi, true>
4955 {
4956 using _Impl = typename _SimdTraits<_Tp, _Abi>::_SimdImpl;
4957
4958 _GLIBCXX_SIMD_INTRINSIC const _V& __derived() const
4959 { return *static_cast<const _V*>(this); }
4960
4961 template <typename _Up>
4962 _GLIBCXX_SIMD_INTRINSIC static _GLIBCXX_SIMD_CONSTEXPR _V
4963 _S_make_derived(_Up&& __d)
4964 { return {__private_init, static_cast<_Up&&>(__d)}; }
4965
4966 public:
4967 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4968 _V&
4969 operator%=(_V& __lhs, const _V& __x)
4970 { return __lhs = __lhs % __x; }
4971
4972 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4973 _V&
4974 operator&=(_V& __lhs, const _V& __x)
4975 { return __lhs = __lhs & __x; }
4976
4977 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4978 _V&
4979 operator|=(_V& __lhs, const _V& __x)
4980 { return __lhs = __lhs | __x; }
4981
4982 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4983 _V&
4984 operator^=(_V& __lhs, const _V& __x)
4985 { return __lhs = __lhs ^ __x; }
4986
4987 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4988 _V&
4989 operator<<=(_V& __lhs, const _V& __x)
4990 { return __lhs = __lhs << __x; }
4991
4992 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4993 _V&
4994 operator>>=(_V& __lhs, const _V& __x)
4995 { return __lhs = __lhs >> __x; }
4996
4997 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4998 _V&
4999 operator<<=(_V& __lhs, int __x)
5000 { return __lhs = __lhs << __x; }
5001
5002 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5003 _V&
5004 operator>>=(_V& __lhs, int __x)
5005 { return __lhs = __lhs >> __x; }
5006
5007 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5008 _V
5009 operator%(const _V& __x, const _V& __y)
5010 {
5011 return _SimdIntOperators::_S_make_derived(
5012 _Impl::_S_modulus(__data(__x), __data(__y)));
5013 }
5014
5015 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5016 _V
5017 operator&(const _V& __x, const _V& __y)
5018 {
5019 return _SimdIntOperators::_S_make_derived(
5020 _Impl::_S_bit_and(__data(__x), __data(__y)));
5021 }
5022
5023 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5024 _V
5025 operator|(const _V& __x, const _V& __y)
5026 {
5027 return _SimdIntOperators::_S_make_derived(
5028 _Impl::_S_bit_or(__data(__x), __data(__y)));
5029 }
5030
5031 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5032 _V
5033 operator^(const _V& __x, const _V& __y)
5034 {
5035 return _SimdIntOperators::_S_make_derived(
5036 _Impl::_S_bit_xor(__data(__x), __data(__y)));
5037 }
5038
5039 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5040 _V
5041 operator<<(const _V& __x, const _V& __y)
5042 {
5043 return _SimdIntOperators::_S_make_derived(
5044 _Impl::_S_bit_shift_left(__data(__x), __data(__y)));
5045 }
5046
5047 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5048 _V
5049 operator>>(const _V& __x, const _V& __y)
5050 {
5051 return _SimdIntOperators::_S_make_derived(
5052 _Impl::_S_bit_shift_right(__data(__x), __data(__y)));
5053 }
5054
5055 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5056 _V
5057 operator<<(const _V& __x, int __y)
5058 {
5059 if (__y < 0)
5060 __invoke_ub("The behavior is undefined if the right operand of a "
5061 "shift operation is negative. [expr.shift]\nA shift by "
5062 "%d was requested",
5063 __y);
5064 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5065 __invoke_ub(
5066 "The behavior is undefined if the right operand of a "
5067 "shift operation is greater than or equal to the width of the "
5068 "promoted left operand. [expr.shift]\nA shift by %d was requested",
5069 __y);
5070 return _SimdIntOperators::_S_make_derived(
5071 _Impl::_S_bit_shift_left(__data(__x), __y));
5072 }
5073
5074 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5075 _V
5076 operator>>(const _V& __x, int __y)
5077 {
5078 if (__y < 0)
5079 __invoke_ub(
5080 "The behavior is undefined if the right operand of a shift "
5081 "operation is negative. [expr.shift]\nA shift by %d was requested",
5082 __y);
5083 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5084 __invoke_ub(
5085 "The behavior is undefined if the right operand of a shift "
5086 "operation is greater than or equal to the width of the promoted "
5087 "left operand. [expr.shift]\nA shift by %d was requested",
5088 __y);
5089 return _SimdIntOperators::_S_make_derived(
5090 _Impl::_S_bit_shift_right(__data(__x), __y));
5091 }
5092
5093 // unary operators (for integral _Tp)
5094 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5095 _V
5096 operator~() const
5097 { return {__private_init, _Impl::_S_complement(__derived()._M_data)}; }
5098 };
5099
5100//}}}1
5101/// @endcond
5102
5103// simd {{{
5104template <typename _Tp, typename _Abi>
5105 class simd : public _SimdIntOperators<
5106 simd<_Tp, _Abi>, _Tp, _Abi,
5107 conjunction<is_integral<_Tp>,
5108 typename _SimdTraits<_Tp, _Abi>::_IsValid>::value>,
5109 public _SimdTraits<_Tp, _Abi>::_SimdBase
5110 {
5111 using _Traits = _SimdTraits<_Tp, _Abi>;
5112 using _MemberType = typename _Traits::_SimdMember;
5113 using _CastType = typename _Traits::_SimdCastType;
5114 static constexpr _Tp* _S_type_tag = nullptr;
5115 friend typename _Traits::_SimdBase;
5116
5117 public:
5118 using _Impl = typename _Traits::_SimdImpl;
5119 friend _Impl;
5120 friend _SimdIntOperators<simd, _Tp, _Abi, true>;
5121
5122 using value_type = _Tp;
5123 using reference = _SmartReference<_MemberType, _Impl, value_type>;
5124 using mask_type = simd_mask<_Tp, _Abi>;
5125 using abi_type = _Abi;
5126
5127 static constexpr size_t size()
5128 { return __size_or_zero_v<_Tp, _Abi>; }
5129
5130 _GLIBCXX_SIMD_CONSTEXPR simd() = default;
5131 _GLIBCXX_SIMD_CONSTEXPR simd(const simd&) = default;
5132 _GLIBCXX_SIMD_CONSTEXPR simd(simd&&) noexcept = default;
5133 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(const simd&) = default;
5134 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(simd&&) noexcept = default;
5135
5136 // implicit broadcast constructor
5137 template <typename _Up,
5138 typename = enable_if_t<!is_same_v<__remove_cvref_t<_Up>, bool>>>
5139 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5140 simd(_ValuePreservingOrInt<_Up, value_type>&& __x)
5141 : _M_data(
5142 _Impl::_S_broadcast(static_cast<value_type>(static_cast<_Up&&>(__x))))
5143 {}
5144
5145 // implicit type conversion constructor (convert from fixed_size to
5146 // fixed_size)
5147 template <typename _Up>
5148 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5149 simd(const simd<_Up, simd_abi::fixed_size<size()>>& __x,
5151 conjunction<
5152 is_same<simd_abi::fixed_size<size()>, abi_type>,
5153 negation<__is_narrowing_conversion<_Up, value_type>>,
5154 __converts_to_higher_integer_rank<_Up, value_type>>::value,
5155 void*> = nullptr)
5156 : simd{static_cast<array<_Up, size()>>(__x).data(), vector_aligned} {}
5157
5158 // explicit type conversion constructor
5159#ifdef _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5160 template <typename _Up, typename _A2,
5161 typename = decltype(static_simd_cast<simd>(
5162 declval<const simd<_Up, _A2>&>()))>
5163 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5164 simd(const simd<_Up, _A2>& __x)
5165 : simd(static_simd_cast<simd>(__x)) {}
5166#endif // _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5167
5168 // generator constructor
5169 template <typename _Fp>
5170 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5171 simd(_Fp&& __gen, _ValuePreservingOrInt<decltype(declval<_Fp>()(
5172 declval<_SizeConstant<0>&>())),
5173 value_type>* = nullptr)
5174 : _M_data(_Impl::_S_generator(static_cast<_Fp&&>(__gen), _S_type_tag)) {}
5175
5176 // load constructor
5177 template <typename _Up, typename _Flags>
5178 _GLIBCXX_SIMD_ALWAYS_INLINE
5179 simd(const _Up* __mem, _Flags)
5180 : _M_data(
5181 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag))
5182 {}
5183
5184 // loads [simd.load]
5185 template <typename _Up, typename _Flags>
5186 _GLIBCXX_SIMD_ALWAYS_INLINE void
5187 copy_from(const _Vectorizable<_Up>* __mem, _Flags)
5188 {
5189 _M_data = static_cast<decltype(_M_data)>(
5190 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag));
5191 }
5192
5193 // stores [simd.store]
5194 template <typename _Up, typename _Flags>
5195 _GLIBCXX_SIMD_ALWAYS_INLINE void
5196 copy_to(_Vectorizable<_Up>* __mem, _Flags) const
5197 {
5198 _Impl::_S_store(_M_data, _Flags::template _S_apply<simd>(__mem),
5199 _S_type_tag);
5200 }
5201
5202 // scalar access
5203 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR reference
5204 operator[](size_t __i)
5205 { return {_M_data, int(__i)}; }
5206
5207 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR value_type
5208 operator[]([[maybe_unused]] size_t __i) const
5209 {
5210 if constexpr (__is_scalar_abi<_Abi>())
5211 {
5212 _GLIBCXX_DEBUG_ASSERT(__i == 0);
5213 return _M_data;
5214 }
5215 else
5216 return _M_data[__i];
5217 }
5218
5219 // increment and decrement:
5220 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5221 operator++()
5222 {
5223 _Impl::_S_increment(_M_data);
5224 return *this;
5225 }
5226
5227 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5228 operator++(int)
5229 {
5230 simd __r = *this;
5231 _Impl::_S_increment(_M_data);
5232 return __r;
5233 }
5234
5235 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5236 operator--()
5237 {
5238 _Impl::_S_decrement(_M_data);
5239 return *this;
5240 }
5241
5242 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5243 operator--(int)
5244 {
5245 simd __r = *this;
5246 _Impl::_S_decrement(_M_data);
5247 return __r;
5248 }
5249
5250 // unary operators (for any _Tp)
5251 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR mask_type
5252 operator!() const
5253 { return {__private_init, _Impl::_S_negate(_M_data)}; }
5254
5255 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5256 operator+() const
5257 { return *this; }
5258
5259 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5260 operator-() const
5261 { return {__private_init, _Impl::_S_unary_minus(_M_data)}; }
5262
5263 // access to internal representation (suggested extension)
5264 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5265 simd(_CastType __init) : _M_data(__init) {}
5266
5267 // compound assignment [simd.cassign]
5268 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5269 operator+=(simd& __lhs, const simd& __x)
5270 { return __lhs = __lhs + __x; }
5271
5272 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5273 operator-=(simd& __lhs, const simd& __x)
5274 { return __lhs = __lhs - __x; }
5275
5276 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5277 operator*=(simd& __lhs, const simd& __x)
5278 { return __lhs = __lhs * __x; }
5279
5280 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5281 operator/=(simd& __lhs, const simd& __x)
5282 { return __lhs = __lhs / __x; }
5283
5284 // binary operators [simd.binary]
5285 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5286 operator+(const simd& __x, const simd& __y)
5287 { return {__private_init, _Impl::_S_plus(__x._M_data, __y._M_data)}; }
5288
5289 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5290 operator-(const simd& __x, const simd& __y)
5291 { return {__private_init, _Impl::_S_minus(__x._M_data, __y._M_data)}; }
5292
5293 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5294 operator*(const simd& __x, const simd& __y)
5295 { return {__private_init, _Impl::_S_multiplies(__x._M_data, __y._M_data)}; }
5296
5297 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5298 operator/(const simd& __x, const simd& __y)
5299 { return {__private_init, _Impl::_S_divides(__x._M_data, __y._M_data)}; }
5300
5301 // compares [simd.comparison]
5302 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5303 operator==(const simd& __x, const simd& __y)
5304 { return simd::_S_make_mask(_Impl::_S_equal_to(__x._M_data, __y._M_data)); }
5305
5306 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5307 operator!=(const simd& __x, const simd& __y)
5308 {
5309 return simd::_S_make_mask(
5310 _Impl::_S_not_equal_to(__x._M_data, __y._M_data));
5311 }
5312
5313 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5314 operator<(const simd& __x, const simd& __y)
5315 { return simd::_S_make_mask(_Impl::_S_less(__x._M_data, __y._M_data)); }
5316
5317 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5318 operator<=(const simd& __x, const simd& __y)
5319 {
5320 return simd::_S_make_mask(_Impl::_S_less_equal(__x._M_data, __y._M_data));
5321 }
5322
5323 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5324 operator>(const simd& __x, const simd& __y)
5325 { return simd::_S_make_mask(_Impl::_S_less(__y._M_data, __x._M_data)); }
5326
5327 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5328 operator>=(const simd& __x, const simd& __y)
5329 {
5330 return simd::_S_make_mask(_Impl::_S_less_equal(__y._M_data, __x._M_data));
5331 }
5332
5333 // operator?: overloads (suggested extension) {{{
5334#ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
5335 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5336 operator?:(const mask_type& __k, const simd& __where_true,
5337 const simd& __where_false)
5338 {
5339 auto __ret = __where_false;
5340 _Impl::_S_masked_assign(__data(__k), __data(__ret), __data(__where_true));
5341 return __ret;
5342 }
5343
5344#endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
5345 // }}}
5346
5347 // "private" because of the first arguments's namespace
5348 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5349 simd(_PrivateInit, const _MemberType& __init)
5350 : _M_data(__init) {}
5351
5352 // "private" because of the first arguments's namespace
5353 _GLIBCXX_SIMD_INTRINSIC
5354 simd(_BitsetInit, bitset<size()> __init) : _M_data()
5355 { where(mask_type(__bitset_init, __init), *this) = ~*this; }
5356
5357 _GLIBCXX_SIMD_INTRINSIC constexpr bool
5358 _M_is_constprop() const
5359 {
5360 if constexpr (__is_scalar_abi<_Abi>())
5361 return __builtin_constant_p(_M_data);
5362 else
5363 return _M_data._M_is_constprop();
5364 }
5365
5366 private:
5367 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR static mask_type
5368 _S_make_mask(typename mask_type::_MemberType __k)
5369 { return {__private_init, __k}; }
5370
5371 friend const auto& __data<value_type, abi_type>(const simd&);
5372 friend auto& __data<value_type, abi_type>(simd&);
5373 alignas(_Traits::_S_simd_align) _MemberType _M_data;
5374 };
5375
5376// }}}
5377/// @cond undocumented
5378// __data {{{
5379template <typename _Tp, typename _Ap>
5380 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
5381 __data(const simd<_Tp, _Ap>& __x)
5382 { return __x._M_data; }
5383
5384template <typename _Tp, typename _Ap>
5385 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
5386 __data(simd<_Tp, _Ap>& __x)
5387 { return __x._M_data; }
5388
5389// }}}
5390namespace __float_bitwise_operators { //{{{
5391template <typename _Tp, typename _Ap>
5392 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5393 operator^(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5394 {
5395 return {__private_init,
5396 _Ap::_SimdImpl::_S_bit_xor(__data(__a), __data(__b))};
5397 }
5398
5399template <typename _Tp, typename _Ap>
5400 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5401 operator|(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5402 {
5403 return {__private_init,
5404 _Ap::_SimdImpl::_S_bit_or(__data(__a), __data(__b))};
5405 }
5406
5407template <typename _Tp, typename _Ap>
5408 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5409 operator&(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5410 {
5411 return {__private_init,
5412 _Ap::_SimdImpl::_S_bit_and(__data(__a), __data(__b))};
5413 }
5414
5415template <typename _Tp, typename _Ap>
5416 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5417 enable_if_t<is_floating_point_v<_Tp>, simd<_Tp, _Ap>>
5418 operator~(const simd<_Tp, _Ap>& __a)
5419 { return {__private_init, _Ap::_SimdImpl::_S_complement(__data(__a))}; }
5420} // namespace __float_bitwise_operators }}}
5421/// @endcond
5422
5423/// @}
5424_GLIBCXX_SIMD_END_NAMESPACE
5425
5426#endif // __cplusplus >= 201703L
5427#endif // _GLIBCXX_EXPERIMENTAL_SIMD_H
5428
5429// vim: foldmethod=marker foldmarker={{{,}}}
constexpr _If_is_unsigned_integer< _Tp, int > popcount(_Tp __x) noexcept
The number of bits set in x.
Definition: bit:426
constexpr complex< _Tp > operator*(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x times y.
Definition: std/complex:392
constexpr complex< _Tp > operator-(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x minus y.
Definition: std/complex:362
basic_istream< _CharT, _Traits > & operator>>(basic_istream< _CharT, _Traits > &__is, complex< _Tp > &__x)
Extraction operator for complex values.
Definition: std/complex:501
constexpr complex< _Tp > operator+(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x plus y.
Definition: std/complex:332
constexpr complex< _Tp > operator/(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x divided by y.
Definition: std/complex:422
constexpr bool operator==(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return true if x is equal to y.
Definition: std/complex:464
typename remove_reference< _Tp >::type remove_reference_t
Alias template for remove_reference.
typename make_unsigned< _Tp >::type make_unsigned_t
Alias template for make_unsigned.
void void_t
A metafunction that always yields void, used for detecting valid types.
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: std/type_traits:82
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: std/type_traits:85
typename remove_const< _Tp >::type remove_const_t
Alias template for remove_const.
typename enable_if< _Cond, _Tp >::type enable_if_t
Alias template for enable_if.
constexpr auto tuple_cat(_Tpls &&... __tpls) -> typename __tuple_cat_result< _Tpls... >::__type
tuple_cat
Definition: std/tuple:1730
auto declval() noexcept -> decltype(__declval< _Tp >(0))
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: std/any:429
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:230
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: std/numeric:278
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: std/bitset:1435
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: std/bitset:1444
std::basic_ostream< _CharT, _Traits > & operator<<(std::basic_ostream< _CharT, _Traits > &__os, const bitset< _Nb > &__x)
Global I/O operators for bitsets.
Definition: std/bitset:1540
bitset< _Nb > operator^(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: std/bitset:1453