This documentation is automatically generated by competitive-verifier/competitive-verifier
#include "cp-algo/util/simd.hpp"#ifndef CP_ALGO_UTIL_SIMD_HPP
#define CP_ALGO_UTIL_SIMD_HPP
#include <experimental/simd>
#include <cstdint>
#include <cstddef>
#include <memory>
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif
#define CP_ALGO_SIMD_PRAGMA_PUSH \
_Pragma("GCC push_options") \
CP_ALGO_SIMD_AVX2_TARGET
CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo {
template<typename T, size_t len>
using simd [[gnu::vector_size(len * sizeof(T))]] = T;
using u64x8 = simd<uint64_t, 8>;
using u32x16 = simd<uint32_t, 16>;
using i64x4 = simd<int64_t, 4>;
using u64x4 = simd<uint64_t, 4>;
using u32x8 = simd<uint32_t, 8>;
using u16x16 = simd<uint16_t, 16>;
using i32x4 = simd<int32_t, 4>;
using u32x4 = simd<uint32_t, 4>;
using u16x8 = simd<uint16_t, 8>;
using u16x4 = simd<uint16_t, 4>;
using i16x4 = simd<int16_t, 4>;
using u8x32 = simd<uint8_t, 32>;
using u8x16 = simd<uint8_t, 16>;
using u8x8 = simd<uint8_t, 8>;
using u8x4 = simd<uint8_t, 4>;
using dx4 = simd<double, 4>;
inline dx4 abs(dx4 a) {
return dx4{
std::abs(a[0]),
std::abs(a[1]),
std::abs(a[2]),
std::abs(a[3])
};
}
// https://stackoverflow.com/a/77376595
// works for ints in (-2^51, 2^51)
static constexpr dx4 magic = dx4() + (3ULL << 51);
inline i64x4 lround(dx4 x) {
return i64x4(x + magic) - i64x4(magic);
}
inline dx4 to_double(i64x4 x) {
return dx4(x + i64x4(magic)) - magic;
}
inline dx4 round(dx4 a) {
return dx4{
std::nearbyint(a[0]),
std::nearbyint(a[1]),
std::nearbyint(a[2]),
std::nearbyint(a[3])
};
}
inline u64x4 low32(u64x4 x) {
return x & uint32_t(-1);
}
inline auto swap_bytes(auto x) {
return decltype(x)(__builtin_shufflevector(u32x8(x), u32x8(x), 1, 0, 3, 2, 5, 4, 7, 6));
}
inline u64x4 montgomery_reduce(u64x4 x, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
auto x_ninv = u64x4(_mm256_mul_epu32(__m256i(x), __m256i() + imod));
x += u64x4(_mm256_mul_epu32(__m256i(x_ninv), __m256i() + mod));
#else
auto x_ninv = u64x4(u32x8(low32(x)) * imod);
x += x_ninv * uint64_t(mod);
#endif
return swap_bytes(x);
}
inline u64x4 montgomery_mul(u64x4 x, u64x4 y, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x), __m256i(y))), mod, imod);
#else
return montgomery_reduce(x * y, mod, imod);
#endif
}
inline u32x8 montgomery_mul(u32x8 x, u32x8 y, uint32_t mod, uint32_t imod) {
return u32x8(montgomery_mul(u64x4(x), u64x4(y), mod, imod)) |
u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)), u64x4(swap_bytes(y)), mod, imod)));
}
inline dx4 rotate_right(dx4 x) {
static constexpr u64x4 shuffler = {3, 0, 1, 2};
return __builtin_shuffle(x, shuffler);
}
template<std::size_t Align = 32>
inline bool is_aligned(const auto* p) noexcept {
return (reinterpret_cast<std::uintptr_t>(p) % Align) == 0;
}
template<class Target>
inline Target& vector_cast(auto &&p) {
return *reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));
}
}
#pragma GCC pop_options
#endif // CP_ALGO_UTIL_SIMD_HPP
#line 1 "cp-algo/util/simd.hpp"
#include <experimental/simd>
#include <cstdint>
#include <cstddef>
#include <memory>
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif
#define CP_ALGO_SIMD_PRAGMA_PUSH \
_Pragma("GCC push_options") \
CP_ALGO_SIMD_AVX2_TARGET
CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo {
template<typename T, size_t len>
using simd [[gnu::vector_size(len * sizeof(T))]] = T;
using u64x8 = simd<uint64_t, 8>;
using u32x16 = simd<uint32_t, 16>;
using i64x4 = simd<int64_t, 4>;
using u64x4 = simd<uint64_t, 4>;
using u32x8 = simd<uint32_t, 8>;
using u16x16 = simd<uint16_t, 16>;
using i32x4 = simd<int32_t, 4>;
using u32x4 = simd<uint32_t, 4>;
using u16x8 = simd<uint16_t, 8>;
using u16x4 = simd<uint16_t, 4>;
using i16x4 = simd<int16_t, 4>;
using u8x32 = simd<uint8_t, 32>;
using u8x16 = simd<uint8_t, 16>;
using u8x8 = simd<uint8_t, 8>;
using u8x4 = simd<uint8_t, 4>;
using dx4 = simd<double, 4>;
inline dx4 abs(dx4 a) {
return dx4{
std::abs(a[0]),
std::abs(a[1]),
std::abs(a[2]),
std::abs(a[3])
};
}
// https://stackoverflow.com/a/77376595
// works for ints in (-2^51, 2^51)
static constexpr dx4 magic = dx4() + (3ULL << 51);
inline i64x4 lround(dx4 x) {
return i64x4(x + magic) - i64x4(magic);
}
inline dx4 to_double(i64x4 x) {
return dx4(x + i64x4(magic)) - magic;
}
inline dx4 round(dx4 a) {
return dx4{
std::nearbyint(a[0]),
std::nearbyint(a[1]),
std::nearbyint(a[2]),
std::nearbyint(a[3])
};
}
inline u64x4 low32(u64x4 x) {
return x & uint32_t(-1);
}
inline auto swap_bytes(auto x) {
return decltype(x)(__builtin_shufflevector(u32x8(x), u32x8(x), 1, 0, 3, 2, 5, 4, 7, 6));
}
inline u64x4 montgomery_reduce(u64x4 x, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
auto x_ninv = u64x4(_mm256_mul_epu32(__m256i(x), __m256i() + imod));
x += u64x4(_mm256_mul_epu32(__m256i(x_ninv), __m256i() + mod));
#else
auto x_ninv = u64x4(u32x8(low32(x)) * imod);
x += x_ninv * uint64_t(mod);
#endif
return swap_bytes(x);
}
inline u64x4 montgomery_mul(u64x4 x, u64x4 y, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x), __m256i(y))), mod, imod);
#else
return montgomery_reduce(x * y, mod, imod);
#endif
}
inline u32x8 montgomery_mul(u32x8 x, u32x8 y, uint32_t mod, uint32_t imod) {
return u32x8(montgomery_mul(u64x4(x), u64x4(y), mod, imod)) |
u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)), u64x4(swap_bytes(y)), mod, imod)));
}
inline dx4 rotate_right(dx4 x) {
static constexpr u64x4 shuffler = {3, 0, 1, 2};
return __builtin_shuffle(x, shuffler);
}
template<std::size_t Align = 32>
inline bool is_aligned(const auto* p) noexcept {
return (reinterpret_cast<std::uintptr_t>(p) % Align) == 0;
}
template<class Target>
inline Target& vector_cast(auto &&p) {
return *reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));
}
}
#pragma GCC pop_options
#ifndef CP_ALGO_UTIL_SIMD_HPP
#define CP_ALGO_UTIL_SIMD_HPP
#include <experimental/simd>
#include <cstdint>
#include <cstddef>
#include <memory>
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif
#define CP_ALGO_SIMD_PRAGMA_PUSH _Pragma("GCC push_options") CP_ALGO_SIMD_AVX2_TARGET
CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo{template<typename T,size_t len>using simd[[gnu::vector_size(len*sizeof(T))]]=T;using u64x8=simd<uint64_t,8>;using u32x16=simd<uint32_t,16>;using i64x4=simd<int64_t,4>;using u64x4=simd<uint64_t,4>;using u32x8=simd<uint32_t,8>;using u16x16=simd<uint16_t,16>;using i32x4=simd<int32_t,4>;using u32x4=simd<uint32_t,4>;using u16x8=simd<uint16_t,8>;using u16x4=simd<uint16_t,4>;using i16x4=simd<int16_t,4>;using u8x32=simd<uint8_t,32>;using u8x16=simd<uint8_t,16>;using u8x8=simd<uint8_t,8>;using u8x4=simd<uint8_t,4>;using dx4=simd<double,4>;inline dx4 abs(dx4 a){return dx4{std::abs(a[0]),std::abs(a[1]),std::abs(a[2]),std::abs(a[3])};}static constexpr dx4 magic=dx4()+(3ULL<<51);inline i64x4 lround(dx4 x){return i64x4(x+magic)-i64x4(magic);}inline dx4 to_double(i64x4 x){return dx4(x+i64x4(magic))-magic;}inline dx4 round(dx4 a){return dx4{std::nearbyint(a[0]),std::nearbyint(a[1]),std::nearbyint(a[2]),std::nearbyint(a[3])};}inline u64x4 low32(u64x4 x){return x&uint32_t(-1);}inline auto swap_bytes(auto x){return decltype(x)(__builtin_shufflevector(u32x8(x),u32x8(x),1,0,3,2,5,4,7,6));}inline u64x4 montgomery_reduce(u64x4 x,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
auto x_ninv=u64x4(_mm256_mul_epu32(__m256i(x),__m256i()+imod));x+=u64x4(_mm256_mul_epu32(__m256i(x_ninv),__m256i()+mod));
#else
auto x_ninv=u64x4(u32x8(low32(x))*imod);x+=x_ninv*uint64_t(mod);
#endif
return swap_bytes(x);}inline u64x4 montgomery_mul(u64x4 x,u64x4 y,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x),__m256i(y))),mod,imod);
#else
return montgomery_reduce(x*y,mod,imod);
#endif
}inline u32x8 montgomery_mul(u32x8 x,u32x8 y,uint32_t mod,uint32_t imod){return u32x8(montgomery_mul(u64x4(x),u64x4(y),mod,imod))|u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)),u64x4(swap_bytes(y)),mod,imod)));}inline dx4 rotate_right(dx4 x){static constexpr u64x4 shuffler={3,0,1,2};return __builtin_shuffle(x,shuffler);}template<std::size_t Align=32>inline bool is_aligned(const auto*p)noexcept{return(reinterpret_cast<std::uintptr_t>(p)%Align)==0;}template<class Target>inline Target&vector_cast(auto&&p){return*reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));}}
#pragma GCC pop_options
#endif
#line 1 "cp-algo/util/simd.hpp"
#include <experimental/simd>
#include <cstdint>
#include <cstddef>
#include <memory>
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif
#define CP_ALGO_SIMD_PRAGMA_PUSH _Pragma("GCC push_options") CP_ALGO_SIMD_AVX2_TARGET
CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo{template<typename T,size_t len>using simd[[gnu::vector_size(len*sizeof(T))]]=T;using u64x8=simd<uint64_t,8>;using u32x16=simd<uint32_t,16>;using i64x4=simd<int64_t,4>;using u64x4=simd<uint64_t,4>;using u32x8=simd<uint32_t,8>;using u16x16=simd<uint16_t,16>;using i32x4=simd<int32_t,4>;using u32x4=simd<uint32_t,4>;using u16x8=simd<uint16_t,8>;using u16x4=simd<uint16_t,4>;using i16x4=simd<int16_t,4>;using u8x32=simd<uint8_t,32>;using u8x16=simd<uint8_t,16>;using u8x8=simd<uint8_t,8>;using u8x4=simd<uint8_t,4>;using dx4=simd<double,4>;inline dx4 abs(dx4 a){return dx4{std::abs(a[0]),std::abs(a[1]),std::abs(a[2]),std::abs(a[3])};}static constexpr dx4 magic=dx4()+(3ULL<<51);inline i64x4 lround(dx4 x){return i64x4(x+magic)-i64x4(magic);}inline dx4 to_double(i64x4 x){return dx4(x+i64x4(magic))-magic;}inline dx4 round(dx4 a){return dx4{std::nearbyint(a[0]),std::nearbyint(a[1]),std::nearbyint(a[2]),std::nearbyint(a[3])};}inline u64x4 low32(u64x4 x){return x&uint32_t(-1);}inline auto swap_bytes(auto x){return decltype(x)(__builtin_shufflevector(u32x8(x),u32x8(x),1,0,3,2,5,4,7,6));}inline u64x4 montgomery_reduce(u64x4 x,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
auto x_ninv=u64x4(_mm256_mul_epu32(__m256i(x),__m256i()+imod));x+=u64x4(_mm256_mul_epu32(__m256i(x_ninv),__m256i()+mod));
#else
auto x_ninv=u64x4(u32x8(low32(x))*imod);x+=x_ninv*uint64_t(mod);
#endif
return swap_bytes(x);}inline u64x4 montgomery_mul(u64x4 x,u64x4 y,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x),__m256i(y))),mod,imod);
#else
return montgomery_reduce(x*y,mod,imod);
#endif
}inline u32x8 montgomery_mul(u32x8 x,u32x8 y,uint32_t mod,uint32_t imod){return u32x8(montgomery_mul(u64x4(x),u64x4(y),mod,imod))|u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)),u64x4(swap_bytes(y)),mod,imod)));}inline dx4 rotate_right(dx4 x){static constexpr u64x4 shuffler={3,0,1,2};return __builtin_shuffle(x,shuffler);}template<std::size_t Align=32>inline bool is_aligned(const auto*p)noexcept{return(reinterpret_cast<std::uintptr_t>(p)%Align)==0;}template<class Target>inline Target&vector_cast(auto&&p){return*reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));}}
#pragma GCC pop_options