libstdc++
simd_fixed_size.h
1// Simd fixed_size ABI specific implementations -*- C++ -*-
2
3// Copyright (C) 2020-2023 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 * The fixed_size ABI gives the following guarantees:
27 * - simd objects are passed via the stack
28 * - memory layout of `simd<_Tp, _Np>` is equivalent to `array<_Tp, _Np>`
29 * - alignment of `simd<_Tp, _Np>` is `_Np * sizeof(_Tp)` if _Np is __a
30 * power-of-2 value, otherwise `std::__bit_ceil(_Np * sizeof(_Tp))` (Note:
31 * if the alignment were to exceed the system/compiler maximum, it is bounded
32 * to that maximum)
33 * - simd_mask objects are passed like bitset<_Np>
34 * - memory layout of `simd_mask<_Tp, _Np>` is equivalent to `bitset<_Np>`
35 * - alignment of `simd_mask<_Tp, _Np>` is equal to the alignment of
36 * `bitset<_Np>`
37 */
38
39#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
40#define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
41
42#if __cplusplus >= 201703L
43
44#include <array>
45
46_GLIBCXX_SIMD_BEGIN_NAMESPACE
47
48// __simd_tuple_element {{{
49template <size_t _I, typename _Tp>
50 struct __simd_tuple_element;
51
52template <typename _Tp, typename _A0, typename... _As>
53 struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
54 { using type = simd<_Tp, _A0>; };
55
56template <size_t _I, typename _Tp, typename _A0, typename... _As>
57 struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
58 {
59 using type =
60 typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type;
61 };
62
63template <size_t _I, typename _Tp>
64 using __simd_tuple_element_t = typename __simd_tuple_element<_I, _Tp>::type;
65
66// }}}
67// __simd_tuple_concat {{{
68
69template <typename _Tp, typename... _A0s, typename... _A1s>
70 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
71 __simd_tuple_concat(const _SimdTuple<_Tp, _A0s...>& __left,
72 const _SimdTuple<_Tp, _A1s...>& __right)
73 {
74 if constexpr (sizeof...(_A0s) == 0)
75 return __right;
76 else if constexpr (sizeof...(_A1s) == 0)
77 return __left;
78 else
79 return {__left.first, __simd_tuple_concat(__left.second, __right)};
80 }
81
82template <typename _Tp, typename _A10, typename... _A1s>
83 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10,
84 _A1s...>
85 __simd_tuple_concat(const _Tp& __left,
86 const _SimdTuple<_Tp, _A10, _A1s...>& __right)
87 { return {__left, __right}; }
88
89// }}}
90// __simd_tuple_pop_front {{{
91// Returns the next _SimdTuple in __x that has _Np elements less.
92// Precondition: _Np must match the number of elements in __first (recursively)
93template <size_t _Np, typename _Tp>
94 _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
95 __simd_tuple_pop_front(_Tp&& __x)
96 {
97 if constexpr (_Np == 0)
98 return static_cast<_Tp&&>(__x);
99 else
100 {
101 using _Up = __remove_cvref_t<_Tp>;
102 static_assert(_Np >= _Up::_S_first_size);
103 return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
104 }
105 }
106
107// }}}
108// __get_simd_at<_Np> {{{1
109struct __as_simd {};
110
111struct __as_simd_tuple {};
112
113template <typename _Tp, typename _A0, typename... _Abis>
114 _GLIBCXX_SIMD_INTRINSIC constexpr simd<_Tp, _A0>
115 __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
116 _SizeConstant<0>)
117 { return {__private_init, __t.first}; }
118
119template <typename _Tp, typename _A0, typename... _Abis>
120 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
121 __simd_tuple_get_impl(__as_simd_tuple,
122 const _SimdTuple<_Tp, _A0, _Abis...>& __t,
123 _SizeConstant<0>)
124 { return __t.first; }
125
126template <typename _Tp, typename _A0, typename... _Abis>
127 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
128 __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t,
129 _SizeConstant<0>)
130 { return __t.first; }
131
132template <typename _R, size_t _Np, typename _Tp, typename... _Abis>
133 _GLIBCXX_SIMD_INTRINSIC constexpr auto
134 __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t,
135 _SizeConstant<_Np>)
136 { return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
137
138template <size_t _Np, typename _Tp, typename... _Abis>
139 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
140 __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t,
141 _SizeConstant<_Np>)
142 {
143 return __simd_tuple_get_impl(__as_simd_tuple(), __t.second,
144 _SizeConstant<_Np - 1>());
145 }
146
147template <size_t _Np, typename _Tp, typename... _Abis>
148 _GLIBCXX_SIMD_INTRINSIC constexpr auto
149 __get_simd_at(const _SimdTuple<_Tp, _Abis...>& __t)
150 { return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
151
152// }}}
153// __get_tuple_at<_Np> {{{
154template <size_t _Np, typename _Tp, typename... _Abis>
155 _GLIBCXX_SIMD_INTRINSIC constexpr auto
156 __get_tuple_at(const _SimdTuple<_Tp, _Abis...>& __t)
157 {
158 return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
159 }
160
161template <size_t _Np, typename _Tp, typename... _Abis>
162 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
163 __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
164 {
165 return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
166 }
167
168// __tuple_element_meta {{{1
169template <typename _Tp, typename _Abi, size_t _Offset>
170 struct __tuple_element_meta : public _Abi::_SimdImpl
171 {
172 static_assert(is_same_v<typename _Abi::_SimdImpl::abi_type,
173 _Abi>); // this fails e.g. when _SimdImpl is an
174 // alias for _SimdImplBuiltin<_DifferentAbi>
175 using value_type = _Tp;
176 using abi_type = _Abi;
177 using _Traits = _SimdTraits<_Tp, _Abi>;
178 using _MaskImpl = typename _Abi::_MaskImpl;
179 using _MaskMember = typename _Traits::_MaskMember;
180 using simd_type = simd<_Tp, _Abi>;
181 static constexpr size_t _S_offset = _Offset;
182 static constexpr size_t _S_size() { return simd_size<_Tp, _Abi>::value; }
183 static constexpr _MaskImpl _S_mask_impl = {};
184
185 template <size_t _Np, bool _Sanitized>
186 _GLIBCXX_SIMD_INTRINSIC static auto
187 _S_submask(_BitMask<_Np, _Sanitized> __bits)
188 { return __bits.template _M_extract<_Offset, _S_size()>(); }
189
190 template <size_t _Np, bool _Sanitized>
191 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
192 _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
193 {
194 return _MaskImpl::template _S_convert<_Tp>(
195 __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
196 }
197
198 _GLIBCXX_SIMD_INTRINSIC static _ULLong
199 _S_mask_to_shifted_ullong(_MaskMember __k)
200 { return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
201 };
202
203template <size_t _Offset, typename _Tp, typename _Abi, typename... _As>
204 _GLIBCXX_SIMD_INTRINSIC
205 __tuple_element_meta<_Tp, _Abi, _Offset>
206 __make_meta(const _SimdTuple<_Tp, _Abi, _As...>&)
207 { return {}; }
208
209// }}}1
210// _WithOffset wrapper class {{{
211template <size_t _Offset, typename _Base>
212 struct _WithOffset : public _Base
213 {
214 static inline constexpr size_t _S_offset = _Offset;
215
216 _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
217 {
218 return reinterpret_cast<char*>(this)
219 + _S_offset * sizeof(typename _Base::value_type);
220 }
221
222 _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
223 {
224 return reinterpret_cast<const char*>(this)
225 + _S_offset * sizeof(typename _Base::value_type);
226 }
227 };
228
229// make _WithOffset<_WithOffset> ill-formed to use:
230template <size_t _O0, size_t _O1, typename _Base>
231 struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
232
233template <size_t _Offset, typename _Tp>
234 _GLIBCXX_SIMD_INTRINSIC
235 decltype(auto)
236 __add_offset(_Tp& __base)
237 { return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(__base); }
238
239template <size_t _Offset, typename _Tp>
240 _GLIBCXX_SIMD_INTRINSIC
241 decltype(auto)
242 __add_offset(const _Tp& __base)
243 {
244 return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(
245 __base);
246 }
247
248template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
249 _GLIBCXX_SIMD_INTRINSIC
250 decltype(auto)
251 __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
252 {
253 return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(
254 static_cast<_Tp&>(__base));
255 }
256
257template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
258 _GLIBCXX_SIMD_INTRINSIC
259 decltype(auto)
260 __add_offset(const _WithOffset<_ExistingOffset, _Tp>& __base)
261 {
262 return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&>(
263 static_cast<const _Tp&>(__base));
264 }
265
266template <typename _Tp>
267 constexpr inline size_t __offset = 0;
268
269template <size_t _Offset, typename _Tp>
270 constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
271 = _WithOffset<_Offset, _Tp>::_S_offset;
272
273template <typename _Tp>
274 constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
275
276template <typename _Tp>
277 constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
278
279template <typename _Tp>
280 constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
281
282// }}}
283// _SimdTuple specializations {{{1
284// empty {{{2
285template <typename _Tp>
286 struct _SimdTuple<_Tp>
287 {
288 using value_type = _Tp;
289 static constexpr size_t _S_tuple_size = 0;
290 static constexpr size_t _S_size() { return 0; }
291 };
292
293// _SimdTupleData {{{2
294template <typename _FirstType, typename _SecondType>
295 struct _SimdTupleData
296 {
297 _FirstType first;
298 _SecondType second;
299
300 _GLIBCXX_SIMD_INTRINSIC
301 constexpr bool _M_is_constprop() const
302 {
303 if constexpr (is_class_v<_FirstType>)
304 return first._M_is_constprop() && second._M_is_constprop();
305 else
306 return __builtin_constant_p(first) && second._M_is_constprop();
307 }
308 };
309
310template <typename _FirstType, typename _Tp>
311 struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
312 {
313 _FirstType first;
314 static constexpr _SimdTuple<_Tp> second = {};
315
316 _GLIBCXX_SIMD_INTRINSIC
317 constexpr bool _M_is_constprop() const
318 {
319 if constexpr (is_class_v<_FirstType>)
320 return first._M_is_constprop();
321 else
322 return __builtin_constant_p(first);
323 }
324 };
325
326// 1 or more {{{2
327template <typename _Tp, typename _Abi0, typename... _Abis>
328 struct _SimdTuple<_Tp, _Abi0, _Abis...>
329 : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
330 _SimdTuple<_Tp, _Abis...>>
331 {
332 static_assert(!__is_fixed_size_abi_v<_Abi0>);
333 using value_type = _Tp;
334 using _FirstType = typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
335 using _FirstAbi = _Abi0;
336 using _SecondType = _SimdTuple<_Tp, _Abis...>;
337 static constexpr size_t _S_tuple_size = sizeof...(_Abis) + 1;
338
339 static constexpr size_t _S_size()
340 { return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
341
342 static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
343 static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
344
345 using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
346 _SimdTuple<_Tp, _Abis...>>;
347 using _Base::first;
348 using _Base::second;
349
350 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple() = default;
351 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(const _SimdTuple&) = default;
352 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple& operator=(const _SimdTuple&)
353 = default;
354
355 template <typename _Up>
356 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x)
357 : _Base{static_cast<_Up&&>(__x)} {}
358
359 template <typename _Up, typename _Up2>
360 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _Up2&& __y)
361 : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
362
363 template <typename _Up>
364 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
365 : _Base{static_cast<_Up&&>(__x)} {}
366
367 _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
368 { return reinterpret_cast<char*>(this); }
369
370 _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
371 { return reinterpret_cast<const char*>(this); }
372
373 template <size_t _Np>
374 _GLIBCXX_SIMD_INTRINSIC constexpr auto& _M_at()
375 {
376 if constexpr (_Np == 0)
377 return first;
378 else
379 return second.template _M_at<_Np - 1>();
380 }
381
382 template <size_t _Np>
383 _GLIBCXX_SIMD_INTRINSIC constexpr const auto& _M_at() const
384 {
385 if constexpr (_Np == 0)
386 return first;
387 else
388 return second.template _M_at<_Np - 1>();
389 }
390
391 template <size_t _Np>
392 _GLIBCXX_SIMD_INTRINSIC constexpr auto _M_simd_at() const
393 {
394 if constexpr (_Np == 0)
395 return simd<_Tp, _Abi0>(__private_init, first);
396 else
397 return second.template _M_simd_at<_Np - 1>();
398 }
399
400 template <size_t _Offset = 0, typename _Fp>
401 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple
402 _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
403 {
404 auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
405 if constexpr (_S_tuple_size == 1)
406 return {__first};
407 else
408 return {__first,
409 _SecondType::_S_generate(
410 static_cast<_Fp&&>(__gen),
411 _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
412 }
413
414 template <size_t _Offset = 0, typename _Fp, typename... _More>
415 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
416 _M_apply_wrapped(_Fp&& __fun, const _More&... __more) const
417 {
418 auto&& __first
419 = __fun(__make_meta<_Offset>(*this), first, __more.first...);
420 if constexpr (_S_tuple_size == 1)
421 return {__first};
422 else
423 return {
424 __first,
425 second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
426 static_cast<_Fp&&>(__fun), __more.second...)};
427 }
428
429 template <typename _Tup>
430 _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
431 _M_extract_argument(_Tup&& __tup) const
432 {
433 using _TupT = typename __remove_cvref_t<_Tup>::value_type;
434 if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
435 return __tup.first;
436 else if (__builtin_is_constant_evaluated())
437 return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate(
438 [&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
439 return __meta._S_generator(
440 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
441 return __tup[__i];
442 }, static_cast<_TupT*>(nullptr));
443 });
444 else
445 return [&]() { // not always_inline; allow the compiler to decide
446 __fixed_size_storage_t<_TupT, _S_first_size> __r;
447 __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
448 sizeof(__r));
449 return __r;
450 }();
451 }
452
453 template <typename _Tup>
454 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
455 _M_skip_argument(_Tup&& __tup) const
456 {
457 static_assert(_S_tuple_size > 1);
458 using _Up = __remove_cvref_t<_Tup>;
459 constexpr size_t __off = __offset<_Up>;
460 if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
461 return __tup.second;
462 else if constexpr (_S_first_size > _Up::_S_first_size
463 && _S_first_size % _Up::_S_first_size == 0
464 && __off == 0)
465 return __simd_tuple_pop_front<_S_first_size>(__tup);
466 else if constexpr (_S_first_size + __off < _Up::_S_first_size)
467 return __add_offset<_S_first_size>(__tup);
468 else if constexpr (_S_first_size + __off == _Up::_S_first_size)
469 return __tup.second;
470 else
471 __assert_unreachable<_Tup>();
472 }
473
474 template <size_t _Offset, typename... _More>
475 _GLIBCXX_SIMD_INTRINSIC constexpr void
476 _M_assign_front(const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
477 {
478 static_assert(_Offset == 0);
479 first = __x.first;
480 if constexpr (sizeof...(_More) > 0)
481 {
482 static_assert(sizeof...(_Abis) >= sizeof...(_More));
483 second.template _M_assign_front<0>(__x.second);
484 }
485 }
486
487 template <size_t _Offset>
488 _GLIBCXX_SIMD_INTRINSIC constexpr void
489 _M_assign_front(const _FirstType& __x) &
490 {
491 static_assert(_Offset == 0);
492 first = __x;
493 }
494
495 template <size_t _Offset, typename... _As>
496 _GLIBCXX_SIMD_INTRINSIC constexpr void
497 _M_assign_front(const _SimdTuple<_Tp, _As...>& __x) &
498 {
499 __builtin_memcpy(_M_as_charptr() + _Offset * sizeof(value_type),
500 __x._M_as_charptr(),
501 sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
502 }
503
504 /*
505 * Iterate over the first objects in this _SimdTuple and call __fun for each
506 * of them. If additional arguments are passed via __more, chunk them into
507 * _SimdTuple or __vector_type_t objects of the same number of values.
508 */
509 template <typename _Fp, typename... _More>
510 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple
511 _M_apply_per_chunk(_Fp&& __fun, _More&&... __more) const
512 {
513 if constexpr ((...
514 || conjunction_v<
515 is_lvalue_reference<_More>,
516 negation<is_const<remove_reference_t<_More>>>>) )
517 {
518 // need to write back at least one of __more after calling __fun
519 auto&& __first = [&](auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
520 auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
521 __args...);
522 [[maybe_unused]] auto&& __ignore_me = {(
523 [](auto&& __dst, const auto& __src) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
524 if constexpr (is_assignable_v<decltype(__dst),
525 decltype(__dst)>)
526 {
527 __dst.template _M_assign_front<__offset<decltype(__dst)>>(
528 __src);
529 }
530 }(static_cast<_More&&>(__more), __args),
531 0)...};
532 return __r;
533 }(_M_extract_argument(__more)...);
534 if constexpr (_S_tuple_size == 1)
535 return {__first};
536 else
537 return {__first,
538 second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
539 _M_skip_argument(__more)...)};
540 }
541 else
542 {
543 auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
544 _M_extract_argument(__more)...);
545 if constexpr (_S_tuple_size == 1)
546 return {__first};
547 else
548 return {__first,
549 second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
550 _M_skip_argument(__more)...)};
551 }
552 }
553
554 template <typename _R = _Tp, typename _Fp, typename... _More>
555 _GLIBCXX_SIMD_INTRINSIC auto _M_apply_r(_Fp&& __fun,
556 const _More&... __more) const
557 {
558 auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
559 __more.first...);
560 if constexpr (_S_tuple_size == 1)
561 return __first;
562 else
563 return __simd_tuple_concat<_R>(
564 __first, second.template _M_apply_r<_R>(static_cast<_Fp&&>(__fun),
565 __more.second...));
566 }
567
568 template <typename _Fp, typename... _More>
569 _GLIBCXX_SIMD_INTRINSIC constexpr friend _SanitizedBitMask<_S_size()>
570 _M_test(const _Fp& __fun, const _SimdTuple& __x, const _More&... __more)
571 {
572 const _SanitizedBitMask<_S_first_size> __first
573 = _Abi0::_MaskImpl::_S_to_bits(
574 __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
575 __more.first...));
576 if constexpr (_S_tuple_size == 1)
577 return __first;
578 else
579 return _M_test(__fun, __x.second, __more.second...)
580 ._M_prepend(__first);
581 }
582
583 template <typename _Up, _Up _I>
584 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
585 operator[](integral_constant<_Up, _I>) const noexcept
586 {
587 if constexpr (_I < simd_size_v<_Tp, _Abi0>)
588 return _M_subscript_read(_I);
589 else
590 return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
591 }
592
593 _GLIBCXX_SIMD_INTRINSIC
594 _Tp operator[](size_t __i) const noexcept
595 {
596 if constexpr (_S_tuple_size == 1)
597 return _M_subscript_read(__i);
598 else
599 {
600#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
601 return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
602#else
603 if constexpr (__is_scalar_abi<_Abi0>())
604 {
605 const _Tp* ptr = &first;
606 return ptr[__i];
607 }
608 else
609 return __i < simd_size_v<_Tp, _Abi0>
610 ? _M_subscript_read(__i)
611 : second[__i - simd_size_v<_Tp, _Abi0>];
612#endif
613 }
614 }
615
616 _GLIBCXX_SIMD_INTRINSIC
617 void _M_set(size_t __i, _Tp __val) noexcept
618 {
619 if constexpr (_S_tuple_size == 1)
620 return _M_subscript_write(__i, __val);
621 else
622 {
623#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
624 reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
625#else
626 if (__i < simd_size_v<_Tp, _Abi0>)
627 _M_subscript_write(__i, __val);
628 else
629 second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
630#endif
631 }
632 }
633
634 private:
635 // _M_subscript_read/_write {{{
636 _GLIBCXX_SIMD_INTRINSIC
637 _Tp _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
638 {
639 if constexpr (__is_vectorizable_v<_FirstType>)
640 return first;
641 else
642 return first[__i];
643 }
644
645 _GLIBCXX_SIMD_INTRINSIC
646 void _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
647 {
648 if constexpr (__is_vectorizable_v<_FirstType>)
649 first = __y;
650 else
651 first._M_set(__i, __y);
652 }
653
654 // }}}
655 };
656
657// __make_simd_tuple {{{1
658template <typename _Tp, typename _A0>
659 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
660 __make_simd_tuple(simd<_Tp, _A0> __x0)
661 { return {__data(__x0)}; }
662
663template <typename _Tp, typename _A0, typename... _As>
664 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
665 __make_simd_tuple(const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
666 { return {__data(__x0), __make_simd_tuple(__xs...)}; }
667
668template <typename _Tp, typename _A0>
669 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
670 __make_simd_tuple(const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
671 { return {__arg0}; }
672
673template <typename _Tp, typename _A0, typename _A1, typename... _Abis>
674 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
675 __make_simd_tuple(
676 const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
677 const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
678 const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
679 { return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
680
681// __to_simd_tuple {{{1
682template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
683 _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
684 __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX);
685
686template <typename _Tp, size_t _Np,
687 size_t _Offset = 0, // skip this many elements in __from0
688 typename _R = __fixed_size_storage_t<_Tp, _Np>, typename _V0,
689 typename _V0VT = _VectorTraits<_V0>, typename... _VX>
690 _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0,
691 const _VX... __fromX)
692 {
693 static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
694 static_assert(_Offset < _V0VT::_S_full_size);
695 using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
696 if constexpr (_R::_S_tuple_size == 1)
697 {
698 if constexpr (_Np == 1)
699 return _R{__from0[_Offset]};
700 else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
701 return _R{__intrin_bitcast<_R0>(__from0)};
702 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
703 && _V0VT::_S_full_size / 2 >= _Np)
704 return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
705 else if constexpr (_Offset * 4 == _V0VT::_S_full_size
706 && _V0VT::_S_full_size / 4 >= _Np)
707 return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
708 else
709 __assert_unreachable<_Tp>();
710 }
711 else
712 {
713 if constexpr (1 == _R::_S_first_size)
714 { // extract one scalar and recurse
715 if constexpr (_Offset + 1 < _V0VT::_S_full_size)
716 return _R{__from0[_Offset],
717 __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
718 __fromX...)};
719 else
720 return _R{__from0[_Offset],
721 __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
722 }
723
724 // place __from0 into _R::first and recurse for __fromX -> _R::second
725 else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
726 && _Offset == 0)
727 return _R{__from0,
728 __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
729
730 // place lower part of __from0 into _R::first and recurse with _Offset
731 else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
732 && _Offset == 0)
733 return _R{__intrin_bitcast<_R0>(__from0),
734 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
735 _R::_S_first_size>(__from0, __fromX...)};
736
737 // place lower part of second quarter of __from0 into _R::first and
738 // recurse with _Offset
739 else if constexpr (_Offset * 4 == _V0VT::_S_full_size
740 && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
741 return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
742 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
743 _Offset + _R::_S_first_size>(__from0,
744 __fromX...)};
745
746 // place lower half of high half of __from0 into _R::first and recurse
747 // with _Offset
748 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
749 && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
750 return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
751 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
752 _Offset + _R::_S_first_size>(__from0,
753 __fromX...)};
754
755 // place high half of __from0 into _R::first and recurse with __fromX
756 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
757 && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
758 return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
759 __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
760 __fromX...)};
761
762 // ill-formed if some unforseen pattern is needed
763 else
764 __assert_unreachable<_Tp>();
765 }
766 }
767
768template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
769 _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
770 __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX)
771 {
772 if constexpr (is_same_v<_Tp, _V>)
773 {
774 static_assert(
775 sizeof...(_VX) == 0,
776 "An array of scalars must be the last argument to __to_simd_tuple");
777 return __call_with_subscripts(
778 __from, make_index_sequence<_NV>(),
779 [&](const auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
780 return __simd_tuple_concat(
781 _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
782 });
783 }
784 else
785 return __call_with_subscripts(
786 __from, make_index_sequence<_NV>(),
787 [&](const auto... __args) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
788 return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
789 });
790 }
791
792template <size_t, typename _Tp>
793 using __to_tuple_helper = _Tp;
794
795template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
796 size_t... _Indexes>
797 _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
798 __to_simd_tuple_impl(index_sequence<_Indexes...>,
799 const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
800 {
801 return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
802 __args[_Indexes]...);
803 }
804
805template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
806 typename _R = __fixed_size_storage_t<_Tp, _NOut>>
807 _GLIBCXX_SIMD_INTRINSIC _R
808 __to_simd_tuple_sized(
809 const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
810 {
811 static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
812 return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
813 make_index_sequence<_R::_S_tuple_size>(), __args);
814 }
815
816// __optimize_simd_tuple {{{1
817template <typename _Tp>
818 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
819 __optimize_simd_tuple(const _SimdTuple<_Tp>)
820 { return {}; }
821
822template <typename _Tp, typename _Ap>
823 _GLIBCXX_SIMD_INTRINSIC const _SimdTuple<_Tp, _Ap>&
824 __optimize_simd_tuple(const _SimdTuple<_Tp, _Ap>& __x)
825 { return __x; }
826
827template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
828 typename _R = __fixed_size_storage_t<
829 _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
830 _GLIBCXX_SIMD_INTRINSIC _R
831 __optimize_simd_tuple(const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
832 {
833 using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
834 if constexpr (is_same_v<_R, _Tup>)
835 return __x;
836 else if constexpr (is_same_v<typename _R::_FirstType,
837 typename _Tup::_FirstType>)
838 return {__x.first, __optimize_simd_tuple(__x.second)};
839 else if constexpr (__is_scalar_abi<_A0>()
840 || _A0::template _S_is_partial<_Tp>)
841 return {__generate_from_n_evaluations<_R::_S_first_size,
842 typename _R::_FirstType>(
843 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; }),
844 __optimize_simd_tuple(
845 __simd_tuple_pop_front<_R::_S_first_size>(__x))};
846 else if constexpr (is_same_v<_A0, _A1>
847 && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
848 return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
849 __optimize_simd_tuple(__x.second.second)};
850 else if constexpr (sizeof...(_Abis) >= 2
851 && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
852 && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
853 (sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::size())
854 return {
855 __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
856 __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
857 __optimize_simd_tuple(__x.second.second.second.second)};
858 else
859 {
860 static_assert(sizeof(_R) == sizeof(__x));
861 _R __r;
862 __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
863 sizeof(_Tp) * _R::_S_size());
864 return __r;
865 }
866 }
867
868// __for_each(const _SimdTuple &, Fun) {{{1
869template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
870 _GLIBCXX_SIMD_INTRINSIC constexpr void
871 __for_each(const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
872 { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
873
874template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
875 typename... _As, typename _Fp>
876 _GLIBCXX_SIMD_INTRINSIC constexpr void
877 __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
878 {
879 __fun(__make_meta<_Offset>(__t), __t.first);
880 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
881 static_cast<_Fp&&>(__fun));
882 }
883
884// __for_each(_SimdTuple &, Fun) {{{1
885template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
886 _GLIBCXX_SIMD_INTRINSIC constexpr void
887 __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
888 { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
889
890template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
891 typename... _As, typename _Fp>
892 _GLIBCXX_SIMD_INTRINSIC constexpr void
893 __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
894 {
895 __fun(__make_meta<_Offset>(__t), __t.first);
896 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
897 static_cast<_Fp&&>(__fun));
898 }
899
900// __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
901template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
902 _GLIBCXX_SIMD_INTRINSIC constexpr void
903 __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
904 _Fp&& __fun)
905 {
906 static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
907 }
908
909template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
910 typename... _As, typename _Fp>
911 _GLIBCXX_SIMD_INTRINSIC constexpr void
912 __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
913 const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
914 {
915 __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
916 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
917 static_cast<_Fp&&>(__fun));
918 }
919
920// __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
921template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
922 _GLIBCXX_SIMD_INTRINSIC constexpr void
923 __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
924 _Fp&& __fun)
925 {
926 static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
927 }
928
929template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
930 typename... _As, typename _Fp>
931 _GLIBCXX_SIMD_INTRINSIC constexpr void
932 __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
933 const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
934 {
935 __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
936 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
937 static_cast<_Fp&&>(__fun));
938 }
939
940// }}}1
941// __extract_part(_SimdTuple) {{{
942template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
943 typename... _As>
944 _GLIBCXX_SIMD_INTRINSIC auto // __vector_type_t or _SimdTuple
945 __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x)
946 {
947 // worst cases:
948 // (a) 4, 4, 4 => 3, 3, 3, 3 (_Total = 4)
949 // (b) 2, 2, 2 => 3, 3 (_Total = 2)
950 // (c) 4, 2 => 2, 2, 2 (_Total = 3)
951 using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
952 static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
953 constexpr size_t _Np = _Tuple::_S_size();
954 static_assert(_Np >= _Total && _Np % _Total == 0);
955 constexpr size_t __values_per_part = _Np / _Total;
956 [[maybe_unused]] constexpr size_t __values_to_skip
957 = _Index * __values_per_part;
958 constexpr size_t __return_size = __values_per_part * _Combine;
959 using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
960
961 // handle (optimize) the simple cases
962 if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
963 return __x.first._M_data;
964 else if constexpr (_Index == 0 && _Total == _Combine)
965 return __x;
966 else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
967 return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
968 __as_vector(__x.first));
969
970 // recurse to skip unused data members at the beginning of _SimdTuple
971 else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
972 { // recurse
973 if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
974 {
975 constexpr int __parts_in_first
976 = _Tuple::_S_first_size / __values_per_part;
977 return __extract_part<_Index - __parts_in_first,
978 _Total - __parts_in_first, _Combine>(
979 __x.second);
980 }
981 else
982 return __extract_part<__values_to_skip - _Tuple::_S_first_size,
983 _Np - _Tuple::_S_first_size, __return_size>(
984 __x.second);
985 }
986
987 // extract from multiple _SimdTuple data members
988 else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
989 {
990#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
991 const __may_alias<_Tp>* const element_ptr
992 = reinterpret_cast<const __may_alias<_Tp>*>(&__x) + __values_to_skip;
993 return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
994#else
995 [[maybe_unused]] constexpr size_t __offset = __values_to_skip;
996 return __as_vector(simd<_Tp, _RetAbi>(
997 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
998 constexpr _SizeConstant<__i + __offset> __k;
999 return __x[__k];
1000 }));
1001#endif
1002 }
1003
1004 // all of the return values are in __x.first
1005 else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
1006 return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
1007 _Combine>(__x.first);
1008 else
1009 return __extract_part<__values_to_skip, _Tuple::_S_first_size,
1010 _Combine * __values_per_part>(__x.first);
1011 }
1012
1013// }}}
1014// __fixed_size_storage_t<_Tp, _Np>{{{
1015template <typename _Tp, int _Np, typename _Tuple,
1016 typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
1017 int _Remain = _Np - int(_Next::size())>
1018 struct __fixed_size_storage_builder;
1019
1020template <typename _Tp, int _Np>
1021 struct __fixed_size_storage
1022 : public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
1023
1024template <typename _Tp, int _Np, typename... _As, typename _Next>
1025 struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1026 0>
1027 { using type = _SimdTuple<_Tp, _As..., typename _Next::abi_type>; };
1028
1029template <typename _Tp, int _Np, typename... _As, typename _Next, int _Remain>
1030 struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1031 _Remain>
1032 {
1033 using type = typename __fixed_size_storage_builder<
1034 _Tp, _Remain, _SimdTuple<_Tp, _As..., typename _Next::abi_type>>::type;
1035 };
1036
1037// }}}
1038// __autocvt_to_simd {{{
1039template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
1040 struct __autocvt_to_simd
1041 {
1042 _Tp _M_data;
1043 using _TT = __remove_cvref_t<_Tp>;
1044
1045 _GLIBCXX_SIMD_INTRINSIC
1046 operator _TT()
1047 { return _M_data; }
1048
1049 _GLIBCXX_SIMD_INTRINSIC
1050 operator _TT&()
1051 {
1052 static_assert(is_lvalue_reference<_Tp>::value, "");
1053 static_assert(!is_const<_Tp>::value, "");
1054 return _M_data;
1055 }
1056
1057 _GLIBCXX_SIMD_INTRINSIC
1058 operator _TT*()
1059 {
1060 static_assert(is_lvalue_reference<_Tp>::value, "");
1061 static_assert(!is_const<_Tp>::value, "");
1062 return &_M_data;
1063 }
1064
1065 _GLIBCXX_SIMD_INTRINSIC
1066 constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
1067
1068 template <typename _Abi>
1069 _GLIBCXX_SIMD_INTRINSIC
1070 operator simd<typename _TT::value_type, _Abi>()
1071 { return {__private_init, _M_data}; }
1072
1073 template <typename _Abi>
1074 _GLIBCXX_SIMD_INTRINSIC
1075 operator simd<typename _TT::value_type, _Abi>&()
1076 {
1077 return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1078 &_M_data);
1079 }
1080
1081 template <typename _Abi>
1082 _GLIBCXX_SIMD_INTRINSIC
1083 operator simd<typename _TT::value_type, _Abi>*()
1084 {
1085 return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1086 &_M_data);
1087 }
1088 };
1089
1090template <typename _Tp>
1091 __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
1092
1093template <typename _Tp>
1094 struct __autocvt_to_simd<_Tp, true>
1095 {
1096 using _TT = __remove_cvref_t<_Tp>;
1097 _Tp _M_data;
1098 fixed_size_simd<_TT, 1> _M_fd;
1099
1100 _GLIBCXX_SIMD_INTRINSIC
1101 constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
1102
1103 _GLIBCXX_SIMD_INTRINSIC
1104 ~__autocvt_to_simd()
1105 { _M_data = __data(_M_fd).first; }
1106
1107 _GLIBCXX_SIMD_INTRINSIC
1108 operator fixed_size_simd<_TT, 1>()
1109 { return _M_fd; }
1110
1111 _GLIBCXX_SIMD_INTRINSIC
1112 operator fixed_size_simd<_TT, 1> &()
1113 {
1114 static_assert(is_lvalue_reference<_Tp>::value, "");
1115 static_assert(!is_const<_Tp>::value, "");
1116 return _M_fd;
1117 }
1118
1119 _GLIBCXX_SIMD_INTRINSIC
1120 operator fixed_size_simd<_TT, 1> *()
1121 {
1122 static_assert(is_lvalue_reference<_Tp>::value, "");
1123 static_assert(!is_const<_Tp>::value, "");
1124 return &_M_fd;
1125 }
1126 };
1127
1128// }}}
1129
1130struct _CommonImplFixedSize;
1131template <int _Np, typename = __detail::__odr_helper> struct _SimdImplFixedSize;
1132template <int _Np, typename = __detail::__odr_helper> struct _MaskImplFixedSize;
1133// simd_abi::_Fixed {{{
1134template <int _Np>
1135 struct simd_abi::_Fixed
1136 {
1137 template <typename _Tp> static constexpr size_t _S_size = _Np;
1138 template <typename _Tp> static constexpr size_t _S_full_size = _Np;
1139 // validity traits {{{
1140 struct _IsValidAbiTag : public __bool_constant<(_Np > 0)> {};
1141
1142 template <typename _Tp>
1143 struct _IsValidSizeFor
1144 : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
1145
1146 template <typename _Tp>
1147 struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
1148 _IsValidSizeFor<_Tp>> {};
1149
1150 template <typename _Tp>
1151 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1152
1153 // }}}
1154 // _S_masked {{{
1155 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1156 _S_masked(_BitMask<_Np> __x)
1157 { return __x._M_sanitized(); }
1158
1159 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1160 _S_masked(_SanitizedBitMask<_Np> __x)
1161 { return __x; }
1162
1163 // }}}
1164 // _*Impl {{{
1165 using _CommonImpl = _CommonImplFixedSize;
1166 using _SimdImpl = _SimdImplFixedSize<_Np>;
1167 using _MaskImpl = _MaskImplFixedSize<_Np>;
1168
1169 // }}}
1170 // __traits {{{
1171 template <typename _Tp, bool = _S_is_valid_v<_Tp>>
1172 struct __traits : _InvalidTraits {};
1173
1174 template <typename _Tp>
1175 struct __traits<_Tp, true>
1176 {
1177 using _IsValid = true_type;
1178 using _SimdImpl = _SimdImplFixedSize<_Np>;
1179 using _MaskImpl = _MaskImplFixedSize<_Np>;
1180
1181 // simd and simd_mask member types {{{
1182 using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1183 using _MaskMember = _SanitizedBitMask<_Np>;
1184
1185 static constexpr size_t _S_simd_align
1186 = std::__bit_ceil(_Np * sizeof(_Tp));
1187
1188 static constexpr size_t _S_mask_align = alignof(_MaskMember);
1189
1190 // }}}
1191 // _SimdBase / base class for simd, providing extra conversions {{{
1192 struct _SimdBase
1193 {
1194 // The following ensures, function arguments are passed via the stack.
1195 // This is important for ABI compatibility across TU boundaries
1196 _GLIBCXX_SIMD_ALWAYS_INLINE
1197 _SimdBase(const _SimdBase&) {}
1198 _SimdBase() = default;
1199
1200 _GLIBCXX_SIMD_ALWAYS_INLINE
1201 explicit operator const _SimdMember &() const
1202 { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
1203
1204 _GLIBCXX_SIMD_ALWAYS_INLINE
1205 explicit operator array<_Tp, _Np>() const
1206 {
1207 array<_Tp, _Np> __r;
1208 // _SimdMember can be larger because of higher alignment
1209 static_assert(sizeof(__r) <= sizeof(_SimdMember), "");
1210 __builtin_memcpy(__r.data(), &static_cast<const _SimdMember&>(*this),
1211 sizeof(__r));
1212 return __r;
1213 }
1214 };
1215
1216 // }}}
1217 // _MaskBase {{{
1218 // empty. The bitset interface suffices
1219 struct _MaskBase {};
1220
1221 // }}}
1222 // _SimdCastType {{{
1223 struct _SimdCastType
1224 {
1225 _GLIBCXX_SIMD_ALWAYS_INLINE
1226 _SimdCastType(const array<_Tp, _Np>&);
1227 _GLIBCXX_SIMD_ALWAYS_INLINE
1228 _SimdCastType(const _SimdMember& dd) : _M_data(dd) {}
1229 _GLIBCXX_SIMD_ALWAYS_INLINE
1230 explicit operator const _SimdMember &() const { return _M_data; }
1231
1232 private:
1233 const _SimdMember& _M_data;
1234 };
1235
1236 // }}}
1237 // _MaskCastType {{{
1238 class _MaskCastType
1239 {
1240 _MaskCastType() = delete;
1241 };
1242 // }}}
1243 };
1244 // }}}
1245 };
1246
1247// }}}
1248// _CommonImplFixedSize {{{
1249struct _CommonImplFixedSize
1250{
1251 // _S_store {{{
1252 template <typename _Tp, typename... _As>
1253 _GLIBCXX_SIMD_INTRINSIC static void
1254 _S_store(const _SimdTuple<_Tp, _As...>& __x, void* __addr)
1255 {
1256 constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
1257 __builtin_memcpy(__addr, &__x, _Np * sizeof(_Tp));
1258 }
1259
1260 // }}}
1261};
1262
1263// }}}
1264// _SimdImplFixedSize {{{1
1265// fixed_size should not inherit from _SimdMathFallback in order for
1266// specializations in the used _SimdTuple Abis to get used
1267template <int _Np, typename>
1268 struct _SimdImplFixedSize
1269 {
1270 // member types {{{2
1271 using _MaskMember = _SanitizedBitMask<_Np>;
1272
1273 template <typename _Tp>
1274 using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1275
1276 template <typename _Tp>
1277 static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
1278
1279 template <typename _Tp>
1280 using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
1281
1282 template <typename _Tp>
1283 using _TypeTag = _Tp*;
1284
1285 // broadcast {{{2
1286 template <typename _Tp>
1287 static constexpr inline _SimdMember<_Tp> _S_broadcast(_Tp __x) noexcept
1288 {
1289 return _SimdMember<_Tp>::_S_generate(
1290 [&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1291 return __meta._S_broadcast(__x);
1292 });
1293 }
1294
1295 // _S_generator {{{2
1296 template <typename _Fp, typename _Tp>
1297 static constexpr inline _SimdMember<_Tp> _S_generator(_Fp&& __gen,
1298 _TypeTag<_Tp>)
1299 {
1300 return _SimdMember<_Tp>::_S_generate(
1301 [&__gen](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1302 return __meta._S_generator(
1303 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1304 return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
1305 : 0;
1306 },
1307 _TypeTag<_Tp>());
1308 });
1309 }
1310
1311 // _S_load {{{2
1312 template <typename _Tp, typename _Up>
1313 static inline _SimdMember<_Tp> _S_load(const _Up* __mem,
1314 _TypeTag<_Tp>) noexcept
1315 {
1316 return _SimdMember<_Tp>::_S_generate(
1317 [&](auto __meta) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1318 return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
1319 });
1320 }
1321
1322 // _S_masked_load {{{2
1323 template <typename _Tp, typename... _As, typename _Up>
1324 static inline _SimdTuple<_Tp, _As...>
1325 _S_masked_load(const _SimdTuple<_Tp, _As...>& __old,
1326 const _MaskMember __bits, const _Up* __mem) noexcept
1327 {
1328 auto __merge = __old;
1329 __for_each(__merge, [&](auto __meta, auto& __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1330 if (__meta._S_submask(__bits).any())
1331#pragma GCC diagnostic push
1332 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1333 // the responsibility for avoiding UB to the caller of the masked load
1334 // via the mask. Consequently, the compiler may assume this branch is
1335 // unreachable, if the pointer arithmetic is UB.
1336#pragma GCC diagnostic ignored "-Warray-bounds"
1337 __native
1338 = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
1339 __mem + __meta._S_offset);
1340#pragma GCC diagnostic pop
1341 });
1342 return __merge;
1343 }
1344
1345 // _S_store {{{2
1346 template <typename _Tp, typename _Up>
1347 static inline void _S_store(const _SimdMember<_Tp>& __v, _Up* __mem,
1348 _TypeTag<_Tp>) noexcept
1349 {
1350 __for_each(__v, [&](auto __meta, auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1351 __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
1352 });
1353 }
1354
1355 // _S_masked_store {{{2
1356 template <typename _Tp, typename... _As, typename _Up>
1357 static inline void _S_masked_store(const _SimdTuple<_Tp, _As...>& __v,
1358 _Up* __mem,
1359 const _MaskMember __bits) noexcept
1360 {
1361 __for_each(__v, [&](auto __meta, auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1362 if (__meta._S_submask(__bits).any())
1363#pragma GCC diagnostic push
1364 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1365 // the responsibility for avoiding UB to the caller of the masked
1366 // store via the mask. Consequently, the compiler may assume this
1367 // branch is unreachable, if the pointer arithmetic is UB.
1368#pragma GCC diagnostic ignored "-Warray-bounds"
1369 __meta._S_masked_store(__native, __mem + __meta._S_offset,
1370 __meta._S_make_mask(__bits));
1371#pragma GCC diagnostic pop
1372 });
1373 }
1374
1375 // negation {{{2
1376 template <typename _Tp, typename... _As>
1377 static inline _MaskMember
1378 _S_negate(const _SimdTuple<_Tp, _As...>& __x) noexcept
1379 {
1380 _MaskMember __bits = 0;
1381 __for_each(
1382 __x, [&__bits](auto __meta, auto __native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1383 __bits
1384 |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
1385 });
1386 return __bits;
1387 }
1388
1389 // reductions {{{2
1390 template <typename _Tp, typename _BinaryOperation>
1391 static constexpr inline _Tp _S_reduce(const _Simd<_Tp>& __x,
1392 const _BinaryOperation& __binary_op)
1393 {
1394 using _Tup = _SimdMember<_Tp>;
1395 const _Tup& __tup = __data(__x);
1396 if constexpr (_Tup::_S_tuple_size == 1)
1397 return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
1398 __tup.template _M_simd_at<0>(), __binary_op);
1399 else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
1400 && _Tup::_SecondType::_S_size() == 1)
1401 {
1402 return __binary_op(simd<_Tp, simd_abi::scalar>(
1403 reduce(__tup.template _M_simd_at<0>(),
1404 __binary_op)),
1405 __tup.template _M_simd_at<1>())[0];
1406 }
1407 else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
1408 && _Tup::_SecondType::_S_size() == 2)
1409 {
1410 return __binary_op(
1411 simd<_Tp, simd_abi::scalar>(
1412 reduce(__tup.template _M_simd_at<0>(), __binary_op)),
1413 simd<_Tp, simd_abi::scalar>(
1414 reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
1415 }
1416 else
1417 {
1418 const auto& __x2 = __call_with_n_evaluations<
1419 __div_roundup(_Tup::_S_tuple_size, 2)>(
1420 [](auto __first_simd, auto... __remaining) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1421 if constexpr (sizeof...(__remaining) == 0)
1422 return __first_simd;
1423 else
1424 {
1425 using _Tup2
1426 = _SimdTuple<_Tp,
1427 typename decltype(__first_simd)::abi_type,
1428 typename decltype(__remaining)::abi_type...>;
1429 return fixed_size_simd<_Tp, _Tup2::_S_size()>(
1430 __private_init,
1431 __make_simd_tuple(__first_simd, __remaining...));
1432 }
1433 },
1434 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1435 auto __left = __tup.template _M_simd_at<2 * __i>();
1436 if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
1437 return __left;
1438 else
1439 {
1440 auto __right = __tup.template _M_simd_at<2 * __i + 1>();
1441 using _LT = decltype(__left);
1442 using _RT = decltype(__right);
1443 if constexpr (_LT::size() == _RT::size())
1444 return __binary_op(__left, __right);
1445 else
1446 {
1447 _GLIBCXX_SIMD_USE_CONSTEXPR_API
1448 typename _LT::mask_type __k(
1449 __private_init,
1450 [](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1451 return __j < _RT::size();
1452 });
1453 _LT __ext_right = __left;
1454 where(__k, __ext_right)
1455 = __proposed::resizing_simd_cast<_LT>(__right);
1456 where(__k, __left) = __binary_op(__left, __ext_right);
1457 return __left;
1458 }
1459 }
1460 });
1461 return reduce(__x2, __binary_op);
1462 }
1463 }
1464
1465 // _S_min, _S_max {{{2
1466 template <typename _Tp, typename... _As>
1467 static inline constexpr _SimdTuple<_Tp, _As...>
1468 _S_min(const _SimdTuple<_Tp, _As...>& __a,
1469 const _SimdTuple<_Tp, _As...>& __b)
1470 {
1471 return __a._M_apply_per_chunk(
1472 [](auto __impl, auto __aa, auto __bb) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1473 return __impl._S_min(__aa, __bb);
1474 },
1475 __b);
1476 }
1477
1478 template <typename _Tp, typename... _As>
1479 static inline constexpr _SimdTuple<_Tp, _As...>
1480 _S_max(const _SimdTuple<_Tp, _As...>& __a,
1481 const _SimdTuple<_Tp, _As...>& __b)
1482 {
1483 return __a._M_apply_per_chunk(
1484 [](auto __impl, auto __aa, auto __bb) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1485 return __impl._S_max(__aa, __bb);
1486 },
1487 __b);
1488 }
1489
1490 // _S_complement {{{2
1491 template <typename _Tp, typename... _As>
1492 static inline constexpr _SimdTuple<_Tp, _As...>
1493 _S_complement(const _SimdTuple<_Tp, _As...>& __x) noexcept
1494 {
1495 return __x._M_apply_per_chunk(
1496 [](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1497 return __impl._S_complement(__xx);
1498 });
1499 }
1500
1501 // _S_unary_minus {{{2
1502 template <typename _Tp, typename... _As>
1503 static inline constexpr _SimdTuple<_Tp, _As...>
1504 _S_unary_minus(const _SimdTuple<_Tp, _As...>& __x) noexcept
1505 {
1506 return __x._M_apply_per_chunk(
1507 [](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1508 return __impl._S_unary_minus(__xx);
1509 });
1510 }
1511
1512 // arithmetic operators {{{2
1513
1514#define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
1515 template <typename _Tp, typename... _As> \
1516 static inline constexpr _SimdTuple<_Tp, _As...> name_( \
1517 const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y) \
1518 { \
1519 return __x._M_apply_per_chunk( \
1520 [](auto __impl, auto __xx, auto __yy) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1521 return __impl.name_(__xx, __yy); \
1522 }, \
1523 __y); \
1524 }
1525
1526 _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
1527 _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
1528 _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
1529 _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
1530 _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
1531 _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
1532 _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
1533 _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
1534 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
1535 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
1536#undef _GLIBCXX_SIMD_FIXED_OP
1537
1538 template <typename _Tp, typename... _As>
1539 static inline constexpr _SimdTuple<_Tp, _As...>
1540 _S_bit_shift_left(const _SimdTuple<_Tp, _As...>& __x, int __y)
1541 {
1542 return __x._M_apply_per_chunk(
1543 [__y](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1544 return __impl._S_bit_shift_left(__xx, __y);
1545 });
1546 }
1547
1548 template <typename _Tp, typename... _As>
1549 static inline constexpr _SimdTuple<_Tp, _As...>
1550 _S_bit_shift_right(const _SimdTuple<_Tp, _As...>& __x, int __y)
1551 {
1552 return __x._M_apply_per_chunk(
1553 [__y](auto __impl, auto __xx) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1554 return __impl._S_bit_shift_right(__xx, __y);
1555 });
1556 }
1557
1558 // math {{{2
1559#define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name) \
1560 template <typename _Tp, typename... _As, typename... _More> \
1561 static inline __fixed_size_storage_t<_RetTp, _Np> \
1562 _S_##__name(const _SimdTuple<_Tp, _As...>& __x, \
1563 const _More&... __more) \
1564 { \
1565 if constexpr (sizeof...(_More) == 0) \
1566 { \
1567 if constexpr (is_same_v<_Tp, _RetTp>) \
1568 return __x._M_apply_per_chunk( \
1569 [](auto __impl, auto __xx) \
1570 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
1571 { \
1572 using _V = typename decltype(__impl)::simd_type; \
1573 return __data(__name(_V(__private_init, __xx))); \
1574 }); \
1575 else \
1576 return __optimize_simd_tuple( \
1577 __x.template _M_apply_r<_RetTp>( \
1578 [](auto __impl, auto __xx) \
1579 _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
1580 { return __impl._S_##__name(__xx); })); \
1581 } \
1582 else if constexpr ( \
1583 is_same_v< \
1584 _Tp, \
1585 _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
1586 return __x._M_apply_per_chunk( \
1587 [](auto __impl, auto __xx, auto... __pack) \
1588 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
1589 { \
1590 using _V = typename decltype(__impl)::simd_type; \
1591 return __data(__name(_V(__private_init, __xx), \
1592 _V(__private_init, __pack)...)); \
1593 }, __more...); \
1594 else if constexpr (is_same_v<_Tp, _RetTp>) \
1595 return __x._M_apply_per_chunk( \
1596 [](auto __impl, auto __xx, auto... __pack) \
1597 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
1598 { \
1599 using _V = typename decltype(__impl)::simd_type; \
1600 return __data(__name(_V(__private_init, __xx), \
1601 __autocvt_to_simd(__pack)...)); \
1602 }, __more...); \
1603 else \
1604 __assert_unreachable<_Tp>(); \
1605 }
1606
1607 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
1608 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
1609 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
1610 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
1611 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
1612 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
1613 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
1614 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
1615 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
1616 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
1617 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
1618 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
1619 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
1620 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
1621 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
1622 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
1623 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, ilogb)
1624 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
1625 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
1626 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
1627 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
1628 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
1629 // modf implemented in simd_math.h
1630 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
1631 scalbn) // double scalbn(double x, int exp);
1632 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
1633 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
1634 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
1635 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
1636 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
1637 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
1638 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
1639 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
1640 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
1641 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
1642 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
1643 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
1644 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
1645 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
1646
1647 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
1648 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lrint)
1649 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llrint)
1650
1651 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
1652 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lround)
1653 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llround)
1654
1655 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
1656 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
1657 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
1658 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, copysign)
1659 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
1660 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
1661 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
1662 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
1663 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
1664 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, fpclassify)
1665#undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
1666
1667 template <typename _Tp, typename... _Abis>
1668 static _SimdTuple<_Tp, _Abis...> _S_remquo(
1669 const _SimdTuple<_Tp, _Abis...>& __x,
1670 const _SimdTuple<_Tp, _Abis...>& __y,
1671 __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
1672 {
1673 return __x._M_apply_per_chunk(
1674 [](auto __impl, const auto __xx, const auto __yy, auto& __zz)
1675 _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
1676 { return __impl._S_remquo(__xx, __yy, &__zz); },
1677 __y, *__z);
1678 }
1679
1680 template <typename _Tp, typename... _As>
1681 static inline _SimdTuple<_Tp, _As...>
1682 _S_frexp(const _SimdTuple<_Tp, _As...>& __x,
1683 __fixed_size_storage_t<int, _Np>& __exp) noexcept
1684 {
1685 return __x._M_apply_per_chunk(
1686 [](auto __impl, const auto& __a, auto& __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1687 return __data(frexp(typename decltype(__impl)::simd_type(__private_init, __a),
1688 __autocvt_to_simd(__b)));
1689 }, __exp);
1690 }
1691
1692#define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
1693 template <typename _Tp, typename... _As> \
1694 static inline _MaskMember \
1695 _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
1696 { \
1697 return _M_test([](auto __impl, \
1698 auto __xx) { return __impl._S_##name_(__xx); }, \
1699 __x); \
1700 }
1701
1702 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
1703 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
1704 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
1705 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
1706 _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
1707#undef _GLIBCXX_SIMD_TEST_ON_TUPLE_
1708
1709 // _S_increment & _S_decrement{{{2
1710 template <typename... _Ts>
1711 _GLIBCXX_SIMD_INTRINSIC static constexpr void
1712 _S_increment(_SimdTuple<_Ts...>& __x)
1713 {
1714 __for_each(
1715 __x, [](auto __meta, auto& native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1716 __meta._S_increment(native);
1717 });
1718 }
1719
1720 template <typename... _Ts>
1721 _GLIBCXX_SIMD_INTRINSIC static constexpr void
1722 _S_decrement(_SimdTuple<_Ts...>& __x)
1723 {
1724 __for_each(
1725 __x, [](auto __meta, auto& native) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1726 __meta._S_decrement(native);
1727 });
1728 }
1729
1730 // compares {{{2
1731#define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
1732 template <typename _Tp, typename... _As> \
1733 _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
1734 __cmp(const _SimdTuple<_Tp, _As...>& __x, \
1735 const _SimdTuple<_Tp, _As...>& __y) \
1736 { \
1737 return _M_test([](auto __impl, auto __xx, auto __yy) \
1738 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
1739 { return __impl.__cmp(__xx, __yy); }, \
1740 __x, __y); \
1741 }
1742
1743 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
1744 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
1745 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
1746 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
1747 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
1748 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
1749 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
1750 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
1751 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
1752 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
1753#undef _GLIBCXX_SIMD_CMP_OPERATIONS
1754
1755 // smart_reference access {{{2
1756 template <typename _Tp, typename... _As, typename _Up>
1757 _GLIBCXX_SIMD_INTRINSIC static void _S_set(_SimdTuple<_Tp, _As...>& __v,
1758 int __i, _Up&& __x) noexcept
1759 { __v._M_set(__i, static_cast<_Up&&>(__x)); }
1760
1761 // _S_masked_assign {{{2
1762 template <typename _Tp, typename... _As>
1763 _GLIBCXX_SIMD_INTRINSIC static void
1764 _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1765 const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
1766 {
1767 __for_each(__lhs, __rhs,
1768 [&](auto __meta, auto& __native_lhs, auto __native_rhs)
1769 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
1770 {
1771 __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1772 __native_rhs);
1773 });
1774 }
1775
1776 // Optimization for the case where the RHS is a scalar. No need to broadcast
1777 // the scalar to a simd first.
1778 template <typename _Tp, typename... _As>
1779 _GLIBCXX_SIMD_INTRINSIC static void
1780 _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1781 const __type_identity_t<_Tp> __rhs)
1782 {
1783 __for_each(
1784 __lhs, [&](auto __meta, auto& __native_lhs) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1785 __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1786 __rhs);
1787 });
1788 }
1789
1790 // _S_masked_cassign {{{2
1791 template <typename _Op, typename _Tp, typename... _As>
1792 static inline void _S_masked_cassign(const _MaskMember __bits,
1793 _SimdTuple<_Tp, _As...>& __lhs,
1794 const _SimdTuple<_Tp, _As...>& __rhs,
1795 _Op __op)
1796 {
1797 __for_each(__lhs, __rhs,
1798 [&](auto __meta, auto& __native_lhs, auto __native_rhs)
1799 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
1800 {
1801 __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1802 __native_lhs, __native_rhs, __op);
1803 });
1804 }
1805
1806 // Optimization for the case where the RHS is a scalar. No need to broadcast
1807 // the scalar to a simd first.
1808 template <typename _Op, typename _Tp, typename... _As>
1809 static inline void _S_masked_cassign(const _MaskMember __bits,
1810 _SimdTuple<_Tp, _As...>& __lhs,
1811 const _Tp& __rhs, _Op __op)
1812 {
1813 __for_each(
1814 __lhs, [&](auto __meta, auto& __native_lhs) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1815 __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1816 __native_lhs, __rhs, __op);
1817 });
1818 }
1819
1820 // _S_masked_unary {{{2
1821 template <template <typename> class _Op, typename _Tp, typename... _As>
1822 static inline _SimdTuple<_Tp, _As...>
1823 _S_masked_unary(const _MaskMember __bits, const _SimdTuple<_Tp, _As...>& __v)
1824 {
1825 return __v._M_apply_wrapped([&__bits](auto __meta,
1826 auto __native) constexpr {
1827 return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
1828 __bits),
1829 __native);
1830 });
1831 }
1832
1833 // }}}2
1834 };
1835
1836// _MaskImplFixedSize {{{1
1837template <int _Np, typename>
1838 struct _MaskImplFixedSize
1839 {
1840 static_assert(
1841 sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
1842 "The fixed_size implementation relies on one _ULLong being able to store "
1843 "all boolean elements."); // required in load & store
1844
1845 // member types {{{
1846 using _Abi = simd_abi::fixed_size<_Np>;
1847
1848 using _MaskMember = _SanitizedBitMask<_Np>;
1849
1850 template <typename _Tp>
1851 using _FirstAbi = typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
1852
1853 template <typename _Tp>
1854 using _TypeTag = _Tp*;
1855
1856 // }}}
1857 // _S_broadcast {{{
1858 template <typename>
1859 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1860 _S_broadcast(bool __x)
1861 { return __x ? ~_MaskMember() : _MaskMember(); }
1862
1863 // }}}
1864 // _S_load {{{
1865 template <typename>
1866 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1867 _S_load(const bool* __mem)
1868 {
1869 using _Ip = __int_for_sizeof_t<bool>;
1870 // the following load uses element_aligned and relies on __mem already
1871 // carrying alignment information from when this load function was
1872 // called.
1873 const simd<_Ip, _Abi> __bools(reinterpret_cast<const __may_alias<_Ip>*>(
1874 __mem),
1875 element_aligned);
1876 return __data(__bools != 0);
1877 }
1878
1879 // }}}
1880 // _S_to_bits {{{
1881 template <bool _Sanitized>
1882 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1883 _S_to_bits(_BitMask<_Np, _Sanitized> __x)
1884 {
1885 if constexpr (_Sanitized)
1886 return __x;
1887 else
1888 return __x._M_sanitized();
1889 }
1890
1891 // }}}
1892 // _S_convert {{{
1893 template <typename _Tp, typename _Up, typename _UAbi>
1894 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1895 _S_convert(simd_mask<_Up, _UAbi> __x)
1896 {
1897 return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
1898 .template _M_extract<0, _Np>();
1899 }
1900
1901 // }}}
1902 // _S_from_bitmask {{{2
1903 template <typename _Tp>
1904 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1905 _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) noexcept
1906 { return __bits; }
1907
1908 // _S_load {{{2
1909 static inline _MaskMember _S_load(const bool* __mem) noexcept
1910 {
1911 // TODO: _UChar is not necessarily the best type to use here. For smaller
1912 // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
1913 _ULLong __r = 0;
1914 using _Vs = __fixed_size_storage_t<_UChar, _Np>;
1915 __for_each(_Vs{}, [&](auto __meta, auto) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1916 __r |= __meta._S_mask_to_shifted_ullong(
1917 __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
1918 _SizeConstant<__meta._S_size()>()));
1919 });
1920 return __r;
1921 }
1922
1923 // _S_masked_load {{{2
1924 static inline _MaskMember _S_masked_load(_MaskMember __merge,
1925 _MaskMember __mask,
1926 const bool* __mem) noexcept
1927 {
1928 _BitOps::_S_bit_iteration(__mask.to_ullong(),
1929 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1930 __merge.set(__i, __mem[__i]);
1931 });
1932 return __merge;
1933 }
1934
1935 // _S_store {{{2
1936 static inline void _S_store(const _MaskMember __bitmask,
1937 bool* __mem) noexcept
1938 {
1939 if constexpr (_Np == 1)
1940 __mem[0] = __bitmask[0];
1941 else
1942 _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
1943 }
1944
1945 // _S_masked_store {{{2
1946 static inline void _S_masked_store(const _MaskMember __v, bool* __mem,
1947 const _MaskMember __k) noexcept
1948 {
1949 _BitOps::_S_bit_iteration(
1950 __k, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { __mem[__i] = __v[__i]; });
1951 }
1952
1953 // logical and bitwise operators {{{2
1954 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1955 _S_logical_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1956 { return __x & __y; }
1957
1958 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1959 _S_logical_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1960 { return __x | __y; }
1961
1962 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1963 _S_bit_not(const _MaskMember& __x) noexcept
1964 { return ~__x; }
1965
1966 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1967 _S_bit_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1968 { return __x & __y; }
1969
1970 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1971 _S_bit_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1972 { return __x | __y; }
1973
1974 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1975 _S_bit_xor(const _MaskMember& __x, const _MaskMember& __y) noexcept
1976 { return __x ^ __y; }
1977
1978 // smart_reference access {{{2
1979 _GLIBCXX_SIMD_INTRINSIC static void _S_set(_MaskMember& __k, int __i,
1980 bool __x) noexcept
1981 { __k.set(__i, __x); }
1982
1983 // _S_masked_assign {{{2
1984 _GLIBCXX_SIMD_INTRINSIC static void
1985 _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs,
1986 const _MaskMember __rhs)
1987 { __lhs = (__lhs & ~__k) | (__rhs & __k); }
1988
1989 // Optimization for the case where the RHS is a scalar.
1990 _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(const _MaskMember __k,
1991 _MaskMember& __lhs,
1992 const bool __rhs)
1993 {
1994 if (__rhs)
1995 __lhs |= __k;
1996 else
1997 __lhs &= ~__k;
1998 }
1999
2000 // }}}2
2001 // _S_all_of {{{
2002 template <typename _Tp>
2003 _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
2004 { return __data(__k).all(); }
2005
2006 // }}}
2007 // _S_any_of {{{
2008 template <typename _Tp>
2009 _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
2010 { return __data(__k).any(); }
2011
2012 // }}}
2013 // _S_none_of {{{
2014 template <typename _Tp>
2015 _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
2016 { return __data(__k).none(); }
2017
2018 // }}}
2019 // _S_some_of {{{
2020 template <typename _Tp>
2021 _GLIBCXX_SIMD_INTRINSIC static bool
2022 _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
2023 {
2024 if constexpr (_Np == 1)
2025 return false;
2026 else
2027 return __data(__k).any() && !__data(__k).all();
2028 }
2029
2030 // }}}
2031 // _S_popcount {{{
2032 template <typename _Tp>
2033 _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
2034 { return __data(__k).count(); }
2035
2036 // }}}
2037 // _S_find_first_set {{{
2038 template <typename _Tp>
2039 _GLIBCXX_SIMD_INTRINSIC static int
2040 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2041 { return std::__countr_zero(__data(__k).to_ullong()); }
2042
2043 // }}}
2044 // _S_find_last_set {{{
2045 template <typename _Tp>
2046 _GLIBCXX_SIMD_INTRINSIC static int
2047 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2048 { return std::__bit_width(__data(__k).to_ullong()) - 1; }
2049
2050 // }}}
2051 };
2052// }}}1
2053
2054_GLIBCXX_SIMD_END_NAMESPACE
2055#endif // __cplusplus >= 201703L
2056#endif // _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
2057
2058// vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: numeric:287
constexpr _Iterator __base(_Iterator __it)