CP-Algorithms Library

This documentation is automatically generated by competitive-verifier/competitive-verifier

View the Project on GitHub cp-algorithms/cp-algorithms-aux

:heavy_check_mark: cp-algo/math/karatsuba.hpp

Depends on

Verified with

Code

#ifndef CP_ALGO_MATH_KARATSUBA_HPP
#define CP_ALGO_MATH_KARATSUBA_HPP
#include "../number_theory/nimber.hpp"
#include "../util/big_alloc.hpp"
#include "../util/bit.hpp"
#include <vector>
#include <bit>
#include <cstdint>
#include <span>

namespace cp_algo::math {
    constexpr size_t NN = 8;

    // Optimized base case for F2_64: uses 256-bit VPCLMULQDQ
    // Computes 4 products per iteration
    template<size_t N>
    [[gnu::target("avx2,vpclmulqdq")]]
    void base_conv_f2_64(uint64_t const* a, uint64_t const* b, uint64_t* c) {
        alignas(32) __m128i pr0[2 * N] = {};
        alignas(32) __m128i pr1[2 * N] = {};
        
        for (size_t i = 0; i + 1 < N; i += 2) {
            auto va = (__m256i)u64x4{a[i], 0, a[i + 1], 0};
            for (size_t j = 0; j + 1 < N; j += 2) {
                auto vb = (__m256i)u64x4{b[j], b[j + 1], b[j], b[j + 1]};
                (__m256i&)pr0[i + j] ^= _mm256_clmulepi64_epi128(va, vb, 0);
                (__m256i&)pr1[i + j] ^= _mm256_clmulepi64_epi128(va, vb, 16);
            }
        }
        if constexpr (N % 2) {
            static_assert(N == 1);
            pr0[0] = nimber::clmul(a[0], b[0]);
        }
        c[0] = nimber::reduce_mod(pr0[0]);
        for (size_t i = 1; i < 2 * N - 1; i++) {
            c[i] ^= nimber::reduce_mod(pr0[i] ^ pr1[i - 1]);
        }
    }

    template<auto N, auto Add, auto Mul>
    void base_conv(auto &&_a, auto &&_b, auto &&_c) {
        auto a = std::assume_aligned<32>(&_a[0]);
        auto b = std::assume_aligned<32>(&_b[0]);
        auto c = std::assume_aligned<32>(&_c[0]);
        for (size_t i = 0; i < N; i++) {
            for (size_t j = 0; j < N; j++) {
                c[i + j] = Add(c[i + j], Mul(a[i], b[j]));
            }
        }
    }

    // Generic Karatsuba multiplication algorithm for polynomials
    // Template parameters:
    //   N - Size of input arrays (must be power of 2)
    //   Add, Sub, Mul - Operations for addition, subtraction, and coefficient multiplication
    template<auto N, auto Add, auto Sub, auto Mul>
    void _karatsuba(auto &&_a, auto &&_b, auto &&_c) {
        auto a = std::assume_aligned<32>(&_a[0]);
        auto b = std::assume_aligned<32>(&_b[0]);
        auto c = std::assume_aligned<32>(&_c[0]);
        [[gnu::assume(N <= 1<<20)]];
        if constexpr (N <= NN) {
            if constexpr (Mul == nimber::f2_64_product) {
                base_conv_f2_64<N>(a, b, c);
            } else {
                base_conv<N, Add, Mul>(_a, _b, _c);
            }
        } else {
            constexpr auto h = N / 2;
            auto a0 = a, a1 = a + h, b0 = b, b1 = b + h;
            _karatsuba<h, Add, Sub, Mul>(a0, b0, c);
            _karatsuba<h, Add, Sub, Mul>(a1, b1, c + 2 * h);
            using base = std::decay_t<decltype(a[0])>;
            static big_vector<base> buf(4 * h);
            auto f = std::assume_aligned<32>(buf.data());
            auto sum_a = std::assume_aligned<32>(buf.data() + 2 * h);
            auto sum_b = std::assume_aligned<32>(buf.data() + 3 * h);
            for (size_t i = 0; i < h; i++) {
                sum_a[i] = Add(a0[i], a1[i]);
                sum_b[i] = Add(b0[i], b1[i]);
            }
            memset(f, 0, sizeof(base) * 2 * h);
            _karatsuba<h, Add, Sub, Mul>(sum_a, sum_b, f);
            auto c0 = std::assume_aligned<32>(c);
            auto c1 = c0 + h, c2 = c0 + 2 * h;
            for(size_t i = 0; i < h; i++) {
                auto &A = c0[i], &B = c1[i], &C = c2[i], &D = c2[i + h];
                auto BC = Sub(B, C);
                B = Sub(Add(BC, f[i]), A);
                C = Sub(f[i + h], Add(D, BC));
            }
        }
    }

    // Runtime wrapper that deduces N at compile time
    // Resizes inputs to the next power of 2 and result to n + m - 1
    template<typename Cont, auto Add, auto Sub, auto Mul>
    Cont karatsuba(auto &a, auto &b) {
        auto n = std::size(a);
        auto m = std::size(b);
        auto N = std::bit_ceil(std::max(n, m));
        a.resize(N);
        b.resize(N);
        Cont c(2 * N - 1);
        with_bit_ceil(N, [&]<auto NN>() {
            _karatsuba<NN, Add, Sub, Mul>(a, b, c);
        });
        c.resize(n + m - 1);
        return c;
    }

    // Specialization: Convolution over GF(2^64) using Karatsuba
    // Uses XOR for addition/subtraction and f2_64_product for coefficient multiplication
    template<typename Cont = big_vector<uint64_t>>
    Cont convolution_F2_64(auto &a, auto &b) {
        return karatsuba<Cont, std::bit_xor<>{}, std::bit_xor<>{}, nimber::f2_64_product>(a, b);
    }
}

#endif // CP_ALGO_MATH_KARATSUBA_HPP
#line 1 "cp-algo/math/karatsuba.hpp"


#line 1 "cp-algo/number_theory/nimber.hpp"


#include <array>
#include <bit>
#include <cstdint>
#include <immintrin.h>
// Ensure PCLMULQDQ is available at compile time
#if defined(__PCLMUL__)
static constexpr bool CP_ALGO_HAS_PCLMUL = true;
#else
static constexpr bool CP_ALGO_HAS_PCLMUL = false;
#endif
static_assert(CP_ALGO_HAS_PCLMUL,
    "PCLMULQDQ intrinsics not available. Enable it with '-mpclmul' or add '#pragma GCC target(\"pclmul\")' or compile with '-march=native' on supported CPUs.");

namespace cp_algo::math::nimber {
    inline constexpr std::array<uint64_t, 64> BASIS_COL = {
        0x0000000000000001ull, 0x5211145c804b6109ull, 0x7c8bc2cad259879full, 0x565854b4c60c1e0bull,
        0x4068acf7104c20c3ull, 0x662d2bd0f2739155ull, 0x7a90c83701fa8323ull, 0x21cfa750247e8755ull,
        0x67d1044e545abf47ull, 0x4d9d3b5a8568f839ull, 0x567a9d7331b6b3c6ull, 0x1ca54bfdd6d1ae59ull,
        0x454fa483275db25cull, 0x6766df6fec4e9d44ull, 0x35cb621cec1fe7f9ull, 0x4c606d3e52faf263ull,
        0x57640dc825a57954ull, 0x7aca87838b7f6315ull, 0x6d53c884ebf2b0edull, 0x3721d998bb50164bull,
        0x7aa7c62fd6cd53abull, 0x47cbb2c51f7c040full, 0x132063b7f5e42489ull, 0x0c1b36c8b2993f8aull,
        0x60119ecff680497aull, 0x5175da444cc11791ull, 0x5792ff4554765b09ull, 0x0c9fdb8a01334e82ull,
        0x2be0a763a68a4725ull, 0x3c2dc8260ad051f6ull, 0x6c4c9fed8816bb9cull, 0x630062753ffaf766ull,
        0x7b37d31b5d519225ull, 0x2364f7f79705691cull, 0x453eb8a83e2fec71ull, 0x7c0121b37e828666ull,
        0x59190d3250e66011ull, 0x103207f9dda18caeull, 0x28233dce01c69b76ull, 0x4fa519899227a5e7ull,
        0x4567ba46ee7bc6cdull, 0x0a284773d021afd5ull, 0x63894079bbe3a824ull, 0x11013c7fdfaaa5c2ull,
        0x1aa984f18574f3b0ull, 0x0cbaba126fd0c4dbull, 0x0b8797719e6dc725ull, 0x4a2845680aefaa72ull,
        0x536d2535f6934e15ull, 0x01db7a57effcd689ull, 0x7e1ed0ad01e2a5adull, 0x0aedc9b3cee826f6ull,
        0x7ba716eccf9f68e1ull, 0x5d5e23bc0f3dc38full, 0x0b5f2a3b88674d83ull, 0x2de9bafc2f00f8d4ull,
        0x3b56712ad419c7e0ull, 0x3ab4be8c30c19253ull, 0x2708522ffaa654b0ull, 0x2b8bca57bf643598ull,
        0x588825d1a5fa8e1cull, 0x86adf8bf4d45962full, 0x51b4c15d8719dd73ull, 0xe4a2b3b59783d0aaull
    };

    inline constexpr std::array<uint64_t, 64> INV_COL = {
        0x0000000000000001ull, 0x19c9369f278adc02ull, 0xa181e7d66f5ff795ull, 0x5db84357ce785d09ull,
        0xa0bae2f9d2430cc8ull, 0xb7ea5a9705b771c0ull, 0xba4f3cd82801769dull, 0x4886cde01b8241d0ull,
        0x0a6f43f2aaf612edull, 0xebd0142f98030a32ull, 0xa81f89cda43f3792ull, 0xe99aec6b66ccb814ull,
        0xa69d1ff025fc2f82ull, 0x48a81132d25db068ull, 0x4a900f9dcaa9644full, 0xe5ce4ea88259972aull,
        0xf7094c336029f04cull, 0xe191dde287bc9c6bull, 0xaacaff12bff239b8ull, 0x49bc5212be1bc1caull,
        0xfe57defb454446cfull, 0xa1dffcf944bdf6a7ull, 0xb9f1bdb5cee941eeull, 0x12e5e889275c22deull,
        0x5bcb6b117b77eeedull, 0x03eb1ab59d05ae4bull, 0x02a25d7076ddd386ull, 0x53164a606c612245ull,
        0xebb33f5822f66059ull, 0xe9be765f5747b93eull, 0x552a78df373a354full, 0xbcf5ac65f31fb8bfull,
        0xe411e728becdc77bull, 0xf35c26d7b57cdca6ull, 0x4499da83de4ca5f7ull, 0x40ab25bdca4ae226ull,
        0xee004b6f1dff7218ull, 0x0d122da9821c5b41ull, 0x51fbfcb058120efeull, 0xa148b1fa84905b22ull,
        0xbb8ed3e647604d8dull, 0xe2d93fef2472776full, 0x4c17a2541a10e6b5ull, 0x1d879e08903708e7ull,
        0x0fbe7d0d1934da90ull, 0x5bf977d9c6f61d30ull, 0x06832fc918260412ull, 0x0fe22e843ebf73e3ull,
        0x4d7ef4e4fa28d60dull, 0x402250d979afbed5ull, 0x067902b8c8ca2d4full, 0xf38d113fe1d6bb16ull,
        0x414f0248b02b5b7dull, 0xf041922915824ce9ull, 0x11a72fb5e30c93d9ull, 0x12e54f4d63102aeeull,
        0xbc46ac14b3141c6cull, 0x1f172b3c16c645bbull, 0x584b492ed4e8fa6cull, 0x00a852e9a32cc133ull,
        0xa180861bce00a45eull, 0xa194b6bcb4645fb9ull, 0x4509002ad808a4fbull, 0xc5172a0055602f69ull
    };

    template <const auto& COLS>
    consteval auto make_byte_tables() {
        std::array<std::array<uint64_t, 1 << 8>, 8> T{};
        for (int pos = 0; pos < 8; pos++) {
            for (int col = 0; col < 8; col++) {
                for (int mask = 0; mask < (1 << col); mask++) {
                    T[pos][mask | (1 << col)] = T[pos][mask] ^ COLS[pos * 8 + col];
                }
            }
        }
        return T;
    }

    inline constexpr auto INV_BYTE = make_byte_tables<INV_COL>();
    inline constexpr auto BASIS_BYTE = make_byte_tables<BASIS_COL>();

    [[gnu::always_inline]]
    inline uint64_t nim_to_poly(uint64_t x) {
        auto xb = std::bit_cast<std::array<uint8_t, 8>>(x);
        return INV_BYTE[0][xb[0]] ^ INV_BYTE[1][xb[1]]
             ^ INV_BYTE[2][xb[2]] ^ INV_BYTE[3][xb[3]]
             ^ INV_BYTE[4][xb[4]] ^ INV_BYTE[5][xb[5]]
             ^ INV_BYTE[6][xb[6]] ^ INV_BYTE[7][xb[7]];
    }

    [[gnu::always_inline]]
    inline uint64_t poly_to_nim(uint64_t c) {
        auto cb = std::bit_cast<std::array<uint8_t, 8>>(c);
        return BASIS_BYTE[0][cb[0]] ^ BASIS_BYTE[1][cb[1]]
             ^ BASIS_BYTE[2][cb[2]] ^ BASIS_BYTE[3][cb[3]]
             ^ BASIS_BYTE[4][cb[4]] ^ BASIS_BYTE[5][cb[5]]
             ^ BASIS_BYTE[6][cb[6]] ^ BASIS_BYTE[7][cb[7]];
    }

    // Carryless multiply over GF(2) using PCLMULQDQ
    [[gnu::always_inline]]
    inline __m128i clmul(int64_t a, int64_t b) {
        return _mm_clmulepi64_si128(__m128i{a, 0}, __m128i{b, 0}, 0);
    }

    // Reduction table for high bits overflow
    inline constexpr std::array<uint64_t, 16> RED_OVER = [] {
        std::array<uint64_t, 16> red{};
        for (int q = 0; q < 16; ++q) {
            uint64_t o = q ^ (q >> 1) ^ (q >> 3);
            red[q] = o ^ (o << 1) ^ (o << 3) ^ (o << 4);
        }
        return red;
    }();

    // Reduce modulo x^64 + x^4 + x^3 + x + 1
    [[gnu::always_inline]]
    inline uint64_t reduce_mod(__m128i v) {
        uint64_t h = v[1];
        return v[0] ^ h ^ (h << 1) ^ (h << 3) ^ (h << 4) ^ RED_OVER[h >> 60];
    }

    [[gnu::always_inline]]
    inline uint64_t f2_64_product(uint64_t a, uint64_t b) {
        return reduce_mod(clmul(a, b));
    }

    // Public nimber product via isomorphism (no recursion, no Gauss at runtime)
    [[gnu::always_inline]]
    inline uint64_t nim_product(uint64_t a, uint64_t b) {
        return poly_to_nim(f2_64_product(
            nim_to_poly(a),
            nim_to_poly(b)
        ));
    }
}


#line 1 "cp-algo/util/big_alloc.hpp"



#include <set>
#include <map>
#include <deque>
#include <stack>
#include <queue>
#include <vector>
#include <string>
#include <cstddef>
#include <iostream>
#include <generator>
#include <forward_list>

// Single macro to detect POSIX platforms (Linux, Unix, macOS)
#if defined(__linux__) || defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
#  define CP_ALGO_USE_MMAP 1
#  include <sys/mman.h>
#else
#  define CP_ALGO_USE_MMAP 0
#endif

namespace cp_algo {
    template <typename T, size_t Align = 32>
    class big_alloc {
        static_assert( Align >= alignof(void*), "Align must be at least pointer-size");
        static_assert(std::popcount(Align) == 1, "Align must be a power of two");
    public:
        using value_type = T;
        template <class U> struct rebind { using other = big_alloc<U, Align>; };
        constexpr bool operator==(const big_alloc&) const = default;
        constexpr bool operator!=(const big_alloc&) const = default;

        big_alloc() noexcept = default;
        template <typename U, std::size_t A>
        big_alloc(const big_alloc<U, A>&) noexcept {}

        [[nodiscard]] T* allocate(std::size_t n) {
            std::size_t padded = round_up(n * sizeof(T));
            std::size_t align = std::max<std::size_t>(alignof(T),  Align);
#if CP_ALGO_USE_MMAP
            if (padded >= MEGABYTE) {
                void* raw = mmap(nullptr, padded,
                                PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
                madvise(raw, padded, MADV_HUGEPAGE);
                madvise(raw, padded, MADV_POPULATE_WRITE);
                return static_cast<T*>(raw);
            }
#endif
            return static_cast<T*>(::operator new(padded, std::align_val_t(align)));
        }

        void deallocate(T* p, std::size_t n) noexcept {
            if (!p) return;
            std::size_t padded = round_up(n * sizeof(T));
            std::size_t align  = std::max<std::size_t>(alignof(T),  Align);
    #if CP_ALGO_USE_MMAP
            if (padded >= MEGABYTE) { munmap(p, padded); return; }
    #endif
            ::operator delete(p, padded, std::align_val_t(align));
        }

    private:
        static constexpr std::size_t MEGABYTE = 1 << 20;
        static constexpr std::size_t round_up(std::size_t x) noexcept {
            return (x + Align - 1) / Align * Align;
        }
    };

    template<typename T> using big_vector = std::vector<T, big_alloc<T>>;
    template<typename T> using big_basic_string = std::basic_string<T, std::char_traits<T>, big_alloc<T>>;
    template<typename T> using big_deque = std::deque<T, big_alloc<T>>;
    template<typename T> using big_stack = std::stack<T, big_deque<T>>;
    template<typename T> using big_queue = std::queue<T, big_deque<T>>;
    template<typename T> using big_priority_queue = std::priority_queue<T, big_vector<T>>;
    template<typename T> using big_forward_list = std::forward_list<T, big_alloc<T>>;
    using big_string = big_basic_string<char>;

    template<typename Key, typename Value, typename Compare = std::less<Key>>
    using big_map = std::map<Key, Value, Compare, big_alloc<std::pair<const Key, Value>>>;
    template<typename T, typename Compare = std::less<T>>
    using big_multiset = std::multiset<T, Compare, big_alloc<T>>;
    template<typename T, typename Compare = std::less<T>>
    using big_set = std::set<T, Compare, big_alloc<T>>;
    template<typename Ref, typename V = void>

    using big_generator = std::generator<Ref, V, big_alloc<std::byte>>;
}

// Deduction guide to make elements_of with big_generator default to big_alloc
namespace std::ranges {
    template<typename Ref, typename V>
    elements_of(cp_algo::big_generator<Ref, V>&&) -> elements_of<cp_algo::big_generator<Ref, V>&&, cp_algo::big_alloc<std::byte>>;
}


#line 1 "cp-algo/util/bit.hpp"


#line 1 "cp-algo/util/simd.hpp"


#include <experimental/simd>
#line 6 "cp-algo/util/simd.hpp"
#include <memory>

#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif

#define CP_ALGO_SIMD_PRAGMA_PUSH \
    _Pragma("GCC push_options") \
    CP_ALGO_SIMD_AVX2_TARGET

CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo {
    template<typename T, size_t len>
    using simd [[gnu::vector_size(len * sizeof(T))]] = T;
    using u64x8 = simd<uint64_t, 8>;
    using u32x16 = simd<uint32_t, 16>;
    using i64x4 = simd<int64_t, 4>;
    using u64x4 = simd<uint64_t, 4>;
    using u32x8 = simd<uint32_t, 8>;
    using u16x16 = simd<uint16_t, 16>;
    using i32x4 = simd<int32_t, 4>;
    using u32x4 = simd<uint32_t, 4>;
    using u16x8 = simd<uint16_t, 8>;
    using u16x4 = simd<uint16_t, 4>;
    using i16x4 = simd<int16_t, 4>;
    using u8x32 = simd<uint8_t, 32>;
    using u8x16 = simd<uint8_t, 16>;
    using u8x8 = simd<uint8_t, 8>;
    using u8x4 = simd<uint8_t, 4>;
    using dx4 = simd<double, 4>;

    inline dx4 abs(dx4 a) {
        return dx4{
            std::abs(a[0]),
            std::abs(a[1]),
            std::abs(a[2]),
            std::abs(a[3])
        };
    }

    // https://stackoverflow.com/a/77376595
    // works for ints in (-2^51, 2^51)
    static constexpr dx4 magic = dx4() + (3ULL << 51);
    inline i64x4 lround(dx4 x) {
        return i64x4(x + magic) - i64x4(magic);
    }
    inline dx4 to_double(i64x4 x) {
        return dx4(x + i64x4(magic)) - magic;
    }

    inline dx4 round(dx4 a) {
        return dx4{
            std::nearbyint(a[0]),
            std::nearbyint(a[1]),
            std::nearbyint(a[2]),
            std::nearbyint(a[3])
        };
    }

    inline u64x4 low32(u64x4 x) {
        return x & uint32_t(-1);
    }
    inline auto swap_bytes(auto x) {
        return decltype(x)(__builtin_shufflevector(u32x8(x), u32x8(x), 1, 0, 3, 2, 5, 4, 7, 6));
    }
    inline u64x4 montgomery_reduce(u64x4 x, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
        auto x_ninv = u64x4(_mm256_mul_epu32(__m256i(x), __m256i() + imod));
        x += u64x4(_mm256_mul_epu32(__m256i(x_ninv), __m256i() + mod));
#else
        auto x_ninv = u64x4(u32x8(low32(x)) * imod);
        x += x_ninv * uint64_t(mod);
#endif
        return swap_bytes(x);
    }

    inline u64x4 montgomery_mul(u64x4 x, u64x4 y, uint32_t mod, uint32_t imod) {
#ifdef __AVX2__
        return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x), __m256i(y))), mod, imod);
#else
        return montgomery_reduce(x * y, mod, imod);
#endif
    }
    inline u32x8 montgomery_mul(u32x8 x, u32x8 y, uint32_t mod, uint32_t imod) {
        return u32x8(montgomery_mul(u64x4(x), u64x4(y), mod, imod)) |
               u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)), u64x4(swap_bytes(y)), mod, imod)));
    }
    inline dx4 rotate_right(dx4 x) {
        static constexpr u64x4 shuffler = {3, 0, 1, 2};
        return __builtin_shuffle(x, shuffler);
    }

    template<std::size_t Align = 32>
    inline bool is_aligned(const auto* p) noexcept {
        return (reinterpret_cast<std::uintptr_t>(p) % Align) == 0;
    }

    template<class Target>
    inline Target& vector_cast(auto &&p) {
        return *reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));
    }
}
#pragma GCC pop_options

#line 8 "cp-algo/util/bit.hpp"

#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_BIT_OPS_TARGET _Pragma("GCC target(\"avx2,bmi,bmi2,lzcnt,popcnt\")")
#else
#define CP_ALGO_BIT_OPS_TARGET _Pragma("GCC target(\"bmi,bmi2,lzcnt,popcnt\")")
#endif

#define CP_ALGO_BIT_PRAGMA_PUSH \
    _Pragma("GCC push_options") \
    CP_ALGO_BIT_OPS_TARGET

CP_ALGO_BIT_PRAGMA_PUSH
namespace cp_algo {
    template<typename Uint>
    constexpr size_t bit_width = sizeof(Uint) * 8;

    // n < 64
    uint64_t mask(size_t n) {
        return (1ULL << n) - 1;
    }
    size_t order_of_bit(auto x, size_t k) {
        return k ? std::popcount(x << (bit_width<decltype(x)> - k)) : 0;
    }
    inline size_t kth_set_bit(uint64_t x, size_t k) {
        return std::countr_zero(_pdep_u64(1ULL << k, x));
    }
    template<int fl = 0>
    void with_bit_floor(size_t n, auto &&callback) {
        if constexpr (fl >= 63) {
            return;
        } else if (n >> (fl + 1)) {
            with_bit_floor<fl + 1>(n, callback);
        } else {
            callback.template operator()<1ULL << fl>();
        }
    }
    void with_bit_ceil(size_t n, auto &&callback) {
        with_bit_floor(n, [&]<size_t N>() {
            if(N == n) {
                callback.template operator()<N>();
            } else {
                callback.template operator()<N << 1>();
            }
        });
    }

    inline uint32_t read_bits(char const* p) {
        return _mm256_movemask_epi8(__m256i(vector_cast<u8x32 const>(p[0]) + (127 - '0')));
    }
    inline uint64_t read_bits64(char const* p) {
        return read_bits(p) | (uint64_t(read_bits(p + 32)) << 32);
    }

    inline void write_bits(char *p, uint32_t bits) {
        static constexpr u8x32 shuffler = {
            0, 0, 0, 0, 0, 0, 0, 0,
            1, 1, 1, 1, 1, 1, 1, 1,
            2, 2, 2, 2, 2, 2, 2, 2,
            3, 3, 3, 3, 3, 3, 3, 3
        };
        auto shuffled = u8x32(_mm256_shuffle_epi8(__m256i() + bits, __m256i(shuffler)));
        static constexpr u8x32 mask = {
            1, 2, 4, 8, 16, 32, 64, 128,
            1, 2, 4, 8, 16, 32, 64, 128,
            1, 2, 4, 8, 16, 32, 64, 128,
            1, 2, 4, 8, 16, 32, 64, 128
        };
        for(int z = 0; z < 32; z++) {
            p[z] = shuffled[z] & mask[z] ? '1' : '0';
        }
    }
    inline void write_bits64(char *p, uint64_t bits) {
        write_bits(p, uint32_t(bits));
        write_bits(p + 32, uint32_t(bits >> 32));
    }
}
#pragma GCC pop_options

#line 9 "cp-algo/math/karatsuba.hpp"
#include <span>

namespace cp_algo::math {
    constexpr size_t NN = 8;

    // Optimized base case for F2_64: uses 256-bit VPCLMULQDQ
    // Computes 4 products per iteration
    template<size_t N>
    [[gnu::target("avx2,vpclmulqdq")]]
    void base_conv_f2_64(uint64_t const* a, uint64_t const* b, uint64_t* c) {
        alignas(32) __m128i pr0[2 * N] = {};
        alignas(32) __m128i pr1[2 * N] = {};
        
        for (size_t i = 0; i + 1 < N; i += 2) {
            auto va = (__m256i)u64x4{a[i], 0, a[i + 1], 0};
            for (size_t j = 0; j + 1 < N; j += 2) {
                auto vb = (__m256i)u64x4{b[j], b[j + 1], b[j], b[j + 1]};
                (__m256i&)pr0[i + j] ^= _mm256_clmulepi64_epi128(va, vb, 0);
                (__m256i&)pr1[i + j] ^= _mm256_clmulepi64_epi128(va, vb, 16);
            }
        }
        if constexpr (N % 2) {
            static_assert(N == 1);
            pr0[0] = nimber::clmul(a[0], b[0]);
        }
        c[0] = nimber::reduce_mod(pr0[0]);
        for (size_t i = 1; i < 2 * N - 1; i++) {
            c[i] ^= nimber::reduce_mod(pr0[i] ^ pr1[i - 1]);
        }
    }

    template<auto N, auto Add, auto Mul>
    void base_conv(auto &&_a, auto &&_b, auto &&_c) {
        auto a = std::assume_aligned<32>(&_a[0]);
        auto b = std::assume_aligned<32>(&_b[0]);
        auto c = std::assume_aligned<32>(&_c[0]);
        for (size_t i = 0; i < N; i++) {
            for (size_t j = 0; j < N; j++) {
                c[i + j] = Add(c[i + j], Mul(a[i], b[j]));
            }
        }
    }

    // Generic Karatsuba multiplication algorithm for polynomials
    // Template parameters:
    //   N - Size of input arrays (must be power of 2)
    //   Add, Sub, Mul - Operations for addition, subtraction, and coefficient multiplication
    template<auto N, auto Add, auto Sub, auto Mul>
    void _karatsuba(auto &&_a, auto &&_b, auto &&_c) {
        auto a = std::assume_aligned<32>(&_a[0]);
        auto b = std::assume_aligned<32>(&_b[0]);
        auto c = std::assume_aligned<32>(&_c[0]);
        [[gnu::assume(N <= 1<<20)]];
        if constexpr (N <= NN) {
            if constexpr (Mul == nimber::f2_64_product) {
                base_conv_f2_64<N>(a, b, c);
            } else {
                base_conv<N, Add, Mul>(_a, _b, _c);
            }
        } else {
            constexpr auto h = N / 2;
            auto a0 = a, a1 = a + h, b0 = b, b1 = b + h;
            _karatsuba<h, Add, Sub, Mul>(a0, b0, c);
            _karatsuba<h, Add, Sub, Mul>(a1, b1, c + 2 * h);
            using base = std::decay_t<decltype(a[0])>;
            static big_vector<base> buf(4 * h);
            auto f = std::assume_aligned<32>(buf.data());
            auto sum_a = std::assume_aligned<32>(buf.data() + 2 * h);
            auto sum_b = std::assume_aligned<32>(buf.data() + 3 * h);
            for (size_t i = 0; i < h; i++) {
                sum_a[i] = Add(a0[i], a1[i]);
                sum_b[i] = Add(b0[i], b1[i]);
            }
            memset(f, 0, sizeof(base) * 2 * h);
            _karatsuba<h, Add, Sub, Mul>(sum_a, sum_b, f);
            auto c0 = std::assume_aligned<32>(c);
            auto c1 = c0 + h, c2 = c0 + 2 * h;
            for(size_t i = 0; i < h; i++) {
                auto &A = c0[i], &B = c1[i], &C = c2[i], &D = c2[i + h];
                auto BC = Sub(B, C);
                B = Sub(Add(BC, f[i]), A);
                C = Sub(f[i + h], Add(D, BC));
            }
        }
    }

    // Runtime wrapper that deduces N at compile time
    // Resizes inputs to the next power of 2 and result to n + m - 1
    template<typename Cont, auto Add, auto Sub, auto Mul>
    Cont karatsuba(auto &a, auto &b) {
        auto n = std::size(a);
        auto m = std::size(b);
        auto N = std::bit_ceil(std::max(n, m));
        a.resize(N);
        b.resize(N);
        Cont c(2 * N - 1);
        with_bit_ceil(N, [&]<auto NN>() {
            _karatsuba<NN, Add, Sub, Mul>(a, b, c);
        });
        c.resize(n + m - 1);
        return c;
    }

    // Specialization: Convolution over GF(2^64) using Karatsuba
    // Uses XOR for addition/subtraction and f2_64_product for coefficient multiplication
    template<typename Cont = big_vector<uint64_t>>
    Cont convolution_F2_64(auto &a, auto &b) {
        return karatsuba<Cont, std::bit_xor<>{}, std::bit_xor<>{}, nimber::f2_64_product>(a, b);
    }
}


#ifndef CP_ALGO_MATH_KARATSUBA_HPP
#define CP_ALGO_MATH_KARATSUBA_HPP
#include "../number_theory/nimber.hpp"
#include "../util/big_alloc.hpp"
#include "../util/bit.hpp"
#include <vector>
#include <bit>
#include <cstdint>
#include <span>
namespace cp_algo::math{constexpr size_t NN=8;template<size_t N>[[gnu::target("avx2,vpclmulqdq")]]void base_conv_f2_64(uint64_t const*a,uint64_t const*b,uint64_t*c){alignas(32)__m128i pr0[2*N]={};alignas(32)__m128i pr1[2*N]={};for(size_t i=0;i+1<N;i+=2){auto va=(__m256i)u64x4{a[i],0,a[i+1],0};for(size_t j=0;j+1<N;j+=2){auto vb=(__m256i)u64x4{b[j],b[j+1],b[j],b[j+1]};(__m256i&)pr0[i+j]^=_mm256_clmulepi64_epi128(va,vb,0);(__m256i&)pr1[i+j]^=_mm256_clmulepi64_epi128(va,vb,16);}}if constexpr(N%2){static_assert(N==1);pr0[0]=nimber::clmul(a[0],b[0]);}c[0]=nimber::reduce_mod(pr0[0]);for(size_t i=1;i<2*N-1;i++){c[i]^=nimber::reduce_mod(pr0[i]^pr1[i-1]);}}template<auto N,auto Add,auto Mul>void base_conv(auto&&_a,auto&&_b,auto&&_c){auto a=std::assume_aligned<32>(&_a[0]);auto b=std::assume_aligned<32>(&_b[0]);auto c=std::assume_aligned<32>(&_c[0]);for(size_t i=0;i<N;i++){for(size_t j=0;j<N;j++){c[i+j]=Add(c[i+j],Mul(a[i],b[j]));}}}template<auto N,auto Add,auto Sub,auto Mul>void _karatsuba(auto&&_a,auto&&_b,auto&&_c){auto a=std::assume_aligned<32>(&_a[0]);auto b=std::assume_aligned<32>(&_b[0]);auto c=std::assume_aligned<32>(&_c[0]);[[gnu::assume(N<=1<<20)]];if constexpr(N<=NN){if constexpr(Mul==nimber::f2_64_product){base_conv_f2_64<N>(a,b,c);}else{base_conv<N,Add,Mul>(_a,_b,_c);}}else{constexpr auto h=N/2;auto a0=a,a1=a+h,b0=b,b1=b+h;_karatsuba<h,Add,Sub,Mul>(a0,b0,c);_karatsuba<h,Add,Sub,Mul>(a1,b1,c+2*h);using base=std::decay_t<decltype(a[0])>;static big_vector<base>buf(4*h);auto f=std::assume_aligned<32>(buf.data());auto sum_a=std::assume_aligned<32>(buf.data()+2*h);auto sum_b=std::assume_aligned<32>(buf.data()+3*h);for(size_t i=0;i<h;i++){sum_a[i]=Add(a0[i],a1[i]);sum_b[i]=Add(b0[i],b1[i]);}memset(f,0,sizeof(base)*2*h);_karatsuba<h,Add,Sub,Mul>(sum_a,sum_b,f);auto c0=std::assume_aligned<32>(c);auto c1=c0+h,c2=c0+2*h;for(size_t i=0;i<h;i++){auto&A=c0[i],&B=c1[i],&C=c2[i],&D=c2[i+h];auto BC=Sub(B,C);B=Sub(Add(BC,f[i]),A);C=Sub(f[i+h],Add(D,BC));}}}template<typename Cont,auto Add,auto Sub,auto Mul>Cont karatsuba(auto&a,auto&b){auto n=std::size(a);auto m=std::size(b);auto N=std::bit_ceil(std::max(n,m));a.resize(N);b.resize(N);Cont c(2*N-1);with_bit_ceil(N,[&]<auto NN>(){_karatsuba<NN,Add,Sub,Mul>(a,b,c);});c.resize(n+m-1);return c;}template<typename Cont=big_vector<uint64_t>>Cont convolution_F2_64(auto&a,auto&b){return karatsuba<Cont,std::bit_xor<>{},std::bit_xor<>{},nimber::f2_64_product>(a,b);}}
#endif
#line 1 "cp-algo/math/karatsuba.hpp"
#line 1 "cp-algo/number_theory/nimber.hpp"
#include <array>
#include <bit>
#include <cstdint>
#include <immintrin.h>
#if defined(__PCLMUL__)
static constexpr bool CP_ALGO_HAS_PCLMUL=true;
#else
static constexpr bool CP_ALGO_HAS_PCLMUL=false;
#endif
static_assert(CP_ALGO_HAS_PCLMUL,"PCLMULQDQ intrinsics not available. Enable it with '-mpclmul' or add '#pragma GCC target(\"pclmul\")' or compile with '-march=native' on supported CPUs.");namespace cp_algo::math::nimber{inline constexpr std::array<uint64_t,64>BASIS_COL={0x0000000000000001ull,0x5211145c804b6109ull,0x7c8bc2cad259879full,0x565854b4c60c1e0bull,0x4068acf7104c20c3ull,0x662d2bd0f2739155ull,0x7a90c83701fa8323ull,0x21cfa750247e8755ull,0x67d1044e545abf47ull,0x4d9d3b5a8568f839ull,0x567a9d7331b6b3c6ull,0x1ca54bfdd6d1ae59ull,0x454fa483275db25cull,0x6766df6fec4e9d44ull,0x35cb621cec1fe7f9ull,0x4c606d3e52faf263ull,0x57640dc825a57954ull,0x7aca87838b7f6315ull,0x6d53c884ebf2b0edull,0x3721d998bb50164bull,0x7aa7c62fd6cd53abull,0x47cbb2c51f7c040full,0x132063b7f5e42489ull,0x0c1b36c8b2993f8aull,0x60119ecff680497aull,0x5175da444cc11791ull,0x5792ff4554765b09ull,0x0c9fdb8a01334e82ull,0x2be0a763a68a4725ull,0x3c2dc8260ad051f6ull,0x6c4c9fed8816bb9cull,0x630062753ffaf766ull,0x7b37d31b5d519225ull,0x2364f7f79705691cull,0x453eb8a83e2fec71ull,0x7c0121b37e828666ull,0x59190d3250e66011ull,0x103207f9dda18caeull,0x28233dce01c69b76ull,0x4fa519899227a5e7ull,0x4567ba46ee7bc6cdull,0x0a284773d021afd5ull,0x63894079bbe3a824ull,0x11013c7fdfaaa5c2ull,0x1aa984f18574f3b0ull,0x0cbaba126fd0c4dbull,0x0b8797719e6dc725ull,0x4a2845680aefaa72ull,0x536d2535f6934e15ull,0x01db7a57effcd689ull,0x7e1ed0ad01e2a5adull,0x0aedc9b3cee826f6ull,0x7ba716eccf9f68e1ull,0x5d5e23bc0f3dc38full,0x0b5f2a3b88674d83ull,0x2de9bafc2f00f8d4ull,0x3b56712ad419c7e0ull,0x3ab4be8c30c19253ull,0x2708522ffaa654b0ull,0x2b8bca57bf643598ull,0x588825d1a5fa8e1cull,0x86adf8bf4d45962full,0x51b4c15d8719dd73ull,0xe4a2b3b59783d0aaull};inline constexpr std::array<uint64_t,64>INV_COL={0x0000000000000001ull,0x19c9369f278adc02ull,0xa181e7d66f5ff795ull,0x5db84357ce785d09ull,0xa0bae2f9d2430cc8ull,0xb7ea5a9705b771c0ull,0xba4f3cd82801769dull,0x4886cde01b8241d0ull,0x0a6f43f2aaf612edull,0xebd0142f98030a32ull,0xa81f89cda43f3792ull,0xe99aec6b66ccb814ull,0xa69d1ff025fc2f82ull,0x48a81132d25db068ull,0x4a900f9dcaa9644full,0xe5ce4ea88259972aull,0xf7094c336029f04cull,0xe191dde287bc9c6bull,0xaacaff12bff239b8ull,0x49bc5212be1bc1caull,0xfe57defb454446cfull,0xa1dffcf944bdf6a7ull,0xb9f1bdb5cee941eeull,0x12e5e889275c22deull,0x5bcb6b117b77eeedull,0x03eb1ab59d05ae4bull,0x02a25d7076ddd386ull,0x53164a606c612245ull,0xebb33f5822f66059ull,0xe9be765f5747b93eull,0x552a78df373a354full,0xbcf5ac65f31fb8bfull,0xe411e728becdc77bull,0xf35c26d7b57cdca6ull,0x4499da83de4ca5f7ull,0x40ab25bdca4ae226ull,0xee004b6f1dff7218ull,0x0d122da9821c5b41ull,0x51fbfcb058120efeull,0xa148b1fa84905b22ull,0xbb8ed3e647604d8dull,0xe2d93fef2472776full,0x4c17a2541a10e6b5ull,0x1d879e08903708e7ull,0x0fbe7d0d1934da90ull,0x5bf977d9c6f61d30ull,0x06832fc918260412ull,0x0fe22e843ebf73e3ull,0x4d7ef4e4fa28d60dull,0x402250d979afbed5ull,0x067902b8c8ca2d4full,0xf38d113fe1d6bb16ull,0x414f0248b02b5b7dull,0xf041922915824ce9ull,0x11a72fb5e30c93d9ull,0x12e54f4d63102aeeull,0xbc46ac14b3141c6cull,0x1f172b3c16c645bbull,0x584b492ed4e8fa6cull,0x00a852e9a32cc133ull,0xa180861bce00a45eull,0xa194b6bcb4645fb9ull,0x4509002ad808a4fbull,0xc5172a0055602f69ull};template<const auto&COLS>consteval auto make_byte_tables(){std::array<std::array<uint64_t,1<<8>,8>T{};for(int pos=0;pos<8;pos++){for(int col=0;col<8;col++){for(int mask=0;mask<(1<<col);mask++){T[pos][mask|(1<<col)]=T[pos][mask]^COLS[pos*8+col];}}}return T;}inline constexpr auto INV_BYTE=make_byte_tables<INV_COL>();inline constexpr auto BASIS_BYTE=make_byte_tables<BASIS_COL>();[[gnu::always_inline]]inline uint64_t nim_to_poly(uint64_t x){auto xb=std::bit_cast<std::array<uint8_t,8>>(x);return INV_BYTE[0][xb[0]]^INV_BYTE[1][xb[1]]^INV_BYTE[2][xb[2]]^INV_BYTE[3][xb[3]]^INV_BYTE[4][xb[4]]^INV_BYTE[5][xb[5]]^INV_BYTE[6][xb[6]]^INV_BYTE[7][xb[7]];}[[gnu::always_inline]]inline uint64_t poly_to_nim(uint64_t c){auto cb=std::bit_cast<std::array<uint8_t,8>>(c);return BASIS_BYTE[0][cb[0]]^BASIS_BYTE[1][cb[1]]^BASIS_BYTE[2][cb[2]]^BASIS_BYTE[3][cb[3]]^BASIS_BYTE[4][cb[4]]^BASIS_BYTE[5][cb[5]]^BASIS_BYTE[6][cb[6]]^BASIS_BYTE[7][cb[7]];}[[gnu::always_inline]]inline __m128i clmul(int64_t a,int64_t b){return _mm_clmulepi64_si128(__m128i{a,0},__m128i{b,0},0);}inline constexpr std::array<uint64_t,16>RED_OVER=[]{std::array<uint64_t,16>red{};for(int q=0;q<16;++q){uint64_t o=q^(q>>1)^(q>>3);red[q]=o^(o<<1)^(o<<3)^(o<<4);}return red;}();[[gnu::always_inline]]inline uint64_t reduce_mod(__m128i v){uint64_t h=v[1];return v[0]^h^(h<<1)^(h<<3)^(h<<4)^RED_OVER[h>>60];}[[gnu::always_inline]]inline uint64_t f2_64_product(uint64_t a,uint64_t b){return reduce_mod(clmul(a,b));}[[gnu::always_inline]]inline uint64_t nim_product(uint64_t a,uint64_t b){return poly_to_nim(f2_64_product(nim_to_poly(a),nim_to_poly(b)));}}
#line 1 "cp-algo/util/big_alloc.hpp"
#include <set>
#include <map>
#include <deque>
#include <stack>
#include <queue>
#include <vector>
#include <string>
#include <cstddef>
#include <iostream>
#include <generator>
#include <forward_list>
#if defined(__linux__) || defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
#  define CP_ALGO_USE_MMAP 1
#  include <sys/mman.h>
#else
#  define CP_ALGO_USE_MMAP 0
#endif
namespace cp_algo{template<typename T,size_t Align=32>class big_alloc{static_assert(Align>=alignof(void*),"Align must be at least pointer-size");static_assert(std::popcount(Align)==1,"Align must be a power of two");public:using value_type=T;template<class U>struct rebind{using other=big_alloc<U,Align>;};constexpr bool operator==(const big_alloc&)const=default;constexpr bool operator!=(const big_alloc&)const=default;big_alloc()noexcept=default;template<typename U,std::size_t A>big_alloc(const big_alloc<U,A>&)noexcept{}[[nodiscard]]T*allocate(std::size_t n){std::size_t padded=round_up(n*sizeof(T));std::size_t align=std::max<std::size_t>(alignof(T),Align);
#if CP_ALGO_USE_MMAP
if(padded>=MEGABYTE){void*raw=mmap(nullptr,padded,PROT_READ|PROT_WRITE,MAP_PRIVATE|MAP_ANONYMOUS,-1,0);madvise(raw,padded,MADV_HUGEPAGE);madvise(raw,padded,MADV_POPULATE_WRITE);return static_cast<T*>(raw);}
#endif
return static_cast<T*>(::operator new(padded,std::align_val_t(align)));}void deallocate(T*p,std::size_t n)noexcept{if(!p)return;std::size_t padded=round_up(n*sizeof(T));std::size_t align=std::max<std::size_t>(alignof(T),Align);
#if CP_ALGO_USE_MMAP
if(padded>=MEGABYTE){munmap(p,padded);return;}
#endif
::operator delete(p,padded,std::align_val_t(align));}private:static constexpr std::size_t MEGABYTE=1<<20;static constexpr std::size_t round_up(std::size_t x)noexcept{return(x+Align-1)/Align*Align;}};template<typename T>using big_vector=std::vector<T,big_alloc<T>>;template<typename T>using big_basic_string=std::basic_string<T,std::char_traits<T>,big_alloc<T>>;template<typename T>using big_deque=std::deque<T,big_alloc<T>>;template<typename T>using big_stack=std::stack<T,big_deque<T>>;template<typename T>using big_queue=std::queue<T,big_deque<T>>;template<typename T>using big_priority_queue=std::priority_queue<T,big_vector<T>>;template<typename T>using big_forward_list=std::forward_list<T,big_alloc<T>>;using big_string=big_basic_string<char>;template<typename Key,typename Value,typename Compare=std::less<Key>>using big_map=std::map<Key,Value,Compare,big_alloc<std::pair<const Key,Value>>>;template<typename T,typename Compare=std::less<T>>using big_multiset=std::multiset<T,Compare,big_alloc<T>>;template<typename T,typename Compare=std::less<T>>using big_set=std::set<T,Compare,big_alloc<T>>;template<typename Ref,typename V=void>using big_generator=std::generator<Ref,V,big_alloc<std::byte>>;}namespace std::ranges{template<typename Ref,typename V>elements_of(cp_algo::big_generator<Ref,V>&&)->elements_of<cp_algo::big_generator<Ref,V>&&,cp_algo::big_alloc<std::byte>>;}
#line 1 "cp-algo/util/bit.hpp"
#line 1 "cp-algo/util/simd.hpp"
#include <experimental/simd>
#line 6 "cp-algo/util/simd.hpp"
#include <memory>
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_SIMD_AVX2_TARGET _Pragma("GCC target(\"avx2\")")
#else
#define CP_ALGO_SIMD_AVX2_TARGET
#endif
#define CP_ALGO_SIMD_PRAGMA_PUSH  _Pragma("GCC push_options")  CP_ALGO_SIMD_AVX2_TARGET
CP_ALGO_SIMD_PRAGMA_PUSH
namespace cp_algo{template<typename T,size_t len>using simd[[gnu::vector_size(len*sizeof(T))]]=T;using u64x8=simd<uint64_t,8>;using u32x16=simd<uint32_t,16>;using i64x4=simd<int64_t,4>;using u64x4=simd<uint64_t,4>;using u32x8=simd<uint32_t,8>;using u16x16=simd<uint16_t,16>;using i32x4=simd<int32_t,4>;using u32x4=simd<uint32_t,4>;using u16x8=simd<uint16_t,8>;using u16x4=simd<uint16_t,4>;using i16x4=simd<int16_t,4>;using u8x32=simd<uint8_t,32>;using u8x16=simd<uint8_t,16>;using u8x8=simd<uint8_t,8>;using u8x4=simd<uint8_t,4>;using dx4=simd<double,4>;inline dx4 abs(dx4 a){return dx4{std::abs(a[0]),std::abs(a[1]),std::abs(a[2]),std::abs(a[3])};}static constexpr dx4 magic=dx4()+(3ULL<<51);inline i64x4 lround(dx4 x){return i64x4(x+magic)-i64x4(magic);}inline dx4 to_double(i64x4 x){return dx4(x+i64x4(magic))-magic;}inline dx4 round(dx4 a){return dx4{std::nearbyint(a[0]),std::nearbyint(a[1]),std::nearbyint(a[2]),std::nearbyint(a[3])};}inline u64x4 low32(u64x4 x){return x&uint32_t(-1);}inline auto swap_bytes(auto x){return decltype(x)(__builtin_shufflevector(u32x8(x),u32x8(x),1,0,3,2,5,4,7,6));}inline u64x4 montgomery_reduce(u64x4 x,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
auto x_ninv=u64x4(_mm256_mul_epu32(__m256i(x),__m256i()+imod));x+=u64x4(_mm256_mul_epu32(__m256i(x_ninv),__m256i()+mod));
#else
auto x_ninv=u64x4(u32x8(low32(x))*imod);x+=x_ninv*uint64_t(mod);
#endif
return swap_bytes(x);}inline u64x4 montgomery_mul(u64x4 x,u64x4 y,uint32_t mod,uint32_t imod){
#ifdef __AVX2__
return montgomery_reduce(u64x4(_mm256_mul_epu32(__m256i(x),__m256i(y))),mod,imod);
#else
return montgomery_reduce(x*y,mod,imod);
#endif
}inline u32x8 montgomery_mul(u32x8 x,u32x8 y,uint32_t mod,uint32_t imod){return u32x8(montgomery_mul(u64x4(x),u64x4(y),mod,imod))|u32x8(swap_bytes(montgomery_mul(u64x4(swap_bytes(x)),u64x4(swap_bytes(y)),mod,imod)));}inline dx4 rotate_right(dx4 x){static constexpr u64x4 shuffler={3,0,1,2};return __builtin_shuffle(x,shuffler);}template<std::size_t Align=32>inline bool is_aligned(const auto*p)noexcept{return(reinterpret_cast<std::uintptr_t>(p)%Align)==0;}template<class Target>inline Target&vector_cast(auto&&p){return*reinterpret_cast<Target*>(std::assume_aligned<alignof(Target)>(&p));}}
#pragma GCC pop_options
#line 8 "cp-algo/util/bit.hpp"
#if defined(__x86_64__) && !defined(CP_ALGO_DISABLE_AVX2)
#define CP_ALGO_BIT_OPS_TARGET _Pragma("GCC target(\"avx2,bmi,bmi2,lzcnt,popcnt\")")
#else
#define CP_ALGO_BIT_OPS_TARGET _Pragma("GCC target(\"bmi,bmi2,lzcnt,popcnt\")")
#endif
#define CP_ALGO_BIT_PRAGMA_PUSH  _Pragma("GCC push_options")  CP_ALGO_BIT_OPS_TARGET
CP_ALGO_BIT_PRAGMA_PUSH
namespace cp_algo{template<typename Uint>constexpr size_t bit_width=sizeof(Uint)*8;uint64_t mask(size_t n){return(1ULL<<n)-1;}size_t order_of_bit(auto x,size_t k){return k?std::popcount(x<<(bit_width<decltype(x)>-k)):0;}inline size_t kth_set_bit(uint64_t x,size_t k){return std::countr_zero(_pdep_u64(1ULL<<k,x));}template<int fl=0>void with_bit_floor(size_t n,auto&&callback){if constexpr(fl>=63){return;}else if(n>>(fl+1)){with_bit_floor<fl+1>(n,callback);}else{callback.template operator()<1ULL<<fl>();}}void with_bit_ceil(size_t n,auto&&callback){with_bit_floor(n,[&]<size_t N>(){if(N==n){callback.template operator()<N>();}else{callback.template operator()<N<<1>();}});}inline uint32_t read_bits(char const*p){return _mm256_movemask_epi8(__m256i(vector_cast<u8x32 const>(p[0])+(127-'0')));}inline uint64_t read_bits64(char const*p){return read_bits(p)|(uint64_t(read_bits(p+32))<<32);}inline void write_bits(char*p,uint32_t bits){static constexpr u8x32 shuffler={0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3};auto shuffled=u8x32(_mm256_shuffle_epi8(__m256i()+bits,__m256i(shuffler)));static constexpr u8x32 mask={1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128};for(int z=0;z<32;z++){p[z]=shuffled[z]&mask[z]?'1':'0';}}inline void write_bits64(char*p,uint64_t bits){write_bits(p,uint32_t(bits));write_bits(p+32,uint32_t(bits>>32));}}
#pragma GCC pop_options
#line 9 "cp-algo/math/karatsuba.hpp"
#include <span>
namespace cp_algo::math{constexpr size_t NN=8;template<size_t N>[[gnu::target("avx2,vpclmulqdq")]]void base_conv_f2_64(uint64_t const*a,uint64_t const*b,uint64_t*c){alignas(32)__m128i pr0[2*N]={};alignas(32)__m128i pr1[2*N]={};for(size_t i=0;i+1<N;i+=2){auto va=(__m256i)u64x4{a[i],0,a[i+1],0};for(size_t j=0;j+1<N;j+=2){auto vb=(__m256i)u64x4{b[j],b[j+1],b[j],b[j+1]};(__m256i&)pr0[i+j]^=_mm256_clmulepi64_epi128(va,vb,0);(__m256i&)pr1[i+j]^=_mm256_clmulepi64_epi128(va,vb,16);}}if constexpr(N%2){static_assert(N==1);pr0[0]=nimber::clmul(a[0],b[0]);}c[0]=nimber::reduce_mod(pr0[0]);for(size_t i=1;i<2*N-1;i++){c[i]^=nimber::reduce_mod(pr0[i]^pr1[i-1]);}}template<auto N,auto Add,auto Mul>void base_conv(auto&&_a,auto&&_b,auto&&_c){auto a=std::assume_aligned<32>(&_a[0]);auto b=std::assume_aligned<32>(&_b[0]);auto c=std::assume_aligned<32>(&_c[0]);for(size_t i=0;i<N;i++){for(size_t j=0;j<N;j++){c[i+j]=Add(c[i+j],Mul(a[i],b[j]));}}}template<auto N,auto Add,auto Sub,auto Mul>void _karatsuba(auto&&_a,auto&&_b,auto&&_c){auto a=std::assume_aligned<32>(&_a[0]);auto b=std::assume_aligned<32>(&_b[0]);auto c=std::assume_aligned<32>(&_c[0]);[[gnu::assume(N<=1<<20)]];if constexpr(N<=NN){if constexpr(Mul==nimber::f2_64_product){base_conv_f2_64<N>(a,b,c);}else{base_conv<N,Add,Mul>(_a,_b,_c);}}else{constexpr auto h=N/2;auto a0=a,a1=a+h,b0=b,b1=b+h;_karatsuba<h,Add,Sub,Mul>(a0,b0,c);_karatsuba<h,Add,Sub,Mul>(a1,b1,c+2*h);using base=std::decay_t<decltype(a[0])>;static big_vector<base>buf(4*h);auto f=std::assume_aligned<32>(buf.data());auto sum_a=std::assume_aligned<32>(buf.data()+2*h);auto sum_b=std::assume_aligned<32>(buf.data()+3*h);for(size_t i=0;i<h;i++){sum_a[i]=Add(a0[i],a1[i]);sum_b[i]=Add(b0[i],b1[i]);}memset(f,0,sizeof(base)*2*h);_karatsuba<h,Add,Sub,Mul>(sum_a,sum_b,f);auto c0=std::assume_aligned<32>(c);auto c1=c0+h,c2=c0+2*h;for(size_t i=0;i<h;i++){auto&A=c0[i],&B=c1[i],&C=c2[i],&D=c2[i+h];auto BC=Sub(B,C);B=Sub(Add(BC,f[i]),A);C=Sub(f[i+h],Add(D,BC));}}}template<typename Cont,auto Add,auto Sub,auto Mul>Cont karatsuba(auto&a,auto&b){auto n=std::size(a);auto m=std::size(b);auto N=std::bit_ceil(std::max(n,m));a.resize(N);b.resize(N);Cont c(2*N-1);with_bit_ceil(N,[&]<auto NN>(){_karatsuba<NN,Add,Sub,Mul>(a,b,c);});c.resize(n+m-1);return c;}template<typename Cont=big_vector<uint64_t>>Cont convolution_F2_64(auto&a,auto&b){return karatsuba<Cont,std::bit_xor<>{},std::bit_xor<>{},nimber::f2_64_product>(a,b);}}
Back to top page