/*
 *  Copyright (c) 2015-2018 Jose Osvaldo Suarez Domingos
 *
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
 * 
 */

///////////////////////////////////////////////////////////////////////////////

//#ifndef BIG_UINT_X86_64_H
//#define BIG_UINT_X86_64_H

///////////////////////////////////////////////////////////////////////////////

#if !defined(REENTRANT_INCLUDE)

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint8_t addc(uint8_t a,
             uint8_t b,
             uint8_t & carry)
{
    asm volatile(
        "add al, 0xff  \n\t"
        "adc %[a], %[b]\n\t"
        "mov eax, 0    \n\t"
        "setc al       \n\t"
        : [a]"+r"(a),
          [carry]"+a"(carry) //eax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint16_t addc(uint16_t a,
              uint16_t b,
              uint16_t & carry)
{
    asm volatile(
        "add al, 0xff  \n\t"
        "adc %[a], %[b]\n\t"
        "mov eax, 0    \n\t"
        "setc al       \n\t"
        : [a]"+r"(a),
          [carry]"+a"(carry) //eax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint32_t addc(uint32_t a,
              uint32_t b,
              uint32_t & carry)
{
    asm volatile(
        "add al, 0xff  \n\t"
        "adc %[a], %[b]\n\t"
        "mov eax, 0    \n\t"
        "setc al       \n\t"
        : [a]"+r"(a),
          [carry]"+a"(carry) //eax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint64_t addc(uint64_t a,
              uint64_t b,
              uint64_t & carry)
{
    asm volatile(
        "add al, 0xff  \n\t"
        "adc %[a], %[b]\n\t"
        "mov eax, 0    \n\t"
        "setc al       \n\t"
        : [a]"+r"(a),
          [carry]"+a"(carry) //rax
        : [b]"r"(b)
        : "cc");

    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
void addc_blocks<uint64_t>(uint64_t * result,
                           const uint64_t * a,
                           const uint64_t * b,
                           uint64_t & carry,
                           const unsigned int blocks)
{
    asm volatile(
        "mov ecx, %[blocks]           \n\t"
        "add al, 0xff                 \n\t" //CF = carry
        "mov edx, 0                   \n\t" //i = 0
        "WHILE_I%=:                   \n\t"
        "mov rax, QWORD PTR [%[a]+rdx]\n\t" //a[i]
        "adc rax, QWORD PTR [%[b]+rdx]\n\t" //a[i] + b[i] + carry
        "mov QWORD PTR [%[r]+rdx], rax\n\t" //result[i] = a[i] + b[i] + carry
        "lea edx, [edx+8]             \n\t"
        "dec ecx                      \n\t" //blocks--
        "jnz WHILE_I%=                \n\t"
        "mov eax, 0                   \n\t"
        "setc al                      \n\t" //carry = CF
        : [carry]"+a"(carry)
        : [blocks]"rm"(blocks),
          [r]"r"(result),
          [a]"r"(a),
          [b]"r"(b)
        : "cc", "memory", "rcx", "rdx");
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
void inc_blocks(uint64_t * result,
                uint64_t & carry,
                const unsigned int blocks)
{
    asm volatile(
        "mov ecx, %[blocks]    \n\t"
        "cmp ecx, 0            \n\t"
        "je FIN%=              \n\t"
        "add al, 0xff          \n\t"
        "jnc FIN%=             \n\t"
        "mov rax, %[result]    \n\t"
        "REPE%=:               \n\t"
        "add QWORD PTR [rax], 1\n\t"
        "jnc FIN%=             \n\t"
        "lea rax, [rax+8]      \n\t"
        "dec ecx               \n\t"
        "jnz REPE%=            \n\t"
        "FIN%=:                \n\t"
        "mov eax, 0            \n\t"
        "setc al               \n\t"
        : [carry]"+a"(carry)
        : [blocks]"rm"(blocks),
          [result]"rm"(result)
        : "cc", "memory", "ecx");
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
void subc_blocks<uint64_t>(uint64_t * result,
                           const uint64_t * a,
                           const uint64_t * b,
                           uint64_t & carry,
                           const unsigned int blocks)
{
    asm volatile(
        "mov ecx, %[blocks]           \n\t"
        "add al, 0xff                 \n\t" //CF = carry
        "mov edx, 0                   \n\t" //i = 0
        "WHILE_I%=:                   \n\t"
        "mov rax, QWORD PTR [%[b]+rdx]\n\t" //b[i]
        "not rax                      \n\t" //~b[i]
        "adc rax, QWORD PTR [%[a]+rdx]\n\t" //a[i] + ~b[i] + carry
        "mov QWORD PTR [%[r]+rdx], rax\n\t" //result[i] = a[i] + b[i] + carry
        "lea edx, [edx+8]             \n\t"
        "dec ecx                      \n\t" //blocks--
        "jnz WHILE_I%=                \n\t"
        "mov eax, 0                   \n\t"
        "setc al                      \n\t" //carry = CF
        : [carry]"+a"(carry)
        : [blocks]"rm"(blocks),
          [r]"r"(result),
          [a]"r"(a),
          [b]"r"(b)
        : "cc", "memory", "rcx", "rdx");
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint8_t mul<uint8_t>(uint8_t a,
                     uint8_t b,
                     uint8_t & high)
{
    asm volatile(
        "mul %[b]  \n\t"
        "mov dl, ah\n\t"
        : [high]"=d"(high), //dl
          [a]"+a"(a)        //al
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint16_t mul<uint16_t>(uint16_t a,
                       uint16_t b,
                       uint16_t & high)
{
    asm volatile(
        "mul %[b]\n\t"
        : [high]"=d"(high), //dx
          [a]"+a"(a)        //ax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint32_t mul<uint32_t>(uint32_t a,
                       uint32_t b,
                       uint32_t & high)
{
    asm volatile(
        "mul %[b]\n\t"
        : [high]"=d"(high), //edx
          [a]"+a"(a)        //eax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
inline
uint64_t mul<uint64_t>(uint64_t a,
                       uint64_t b,
                       uint64_t & high)
{
    asm volatile(
        "mul %[b]\n\t"
        : [high]"=d"(high), //rdx
          [a]"+a"(a)        //rax
        : [b]"r"(b)
        : "cc");
    return a;
}

///////////////////////////////////////////////////////////////////////////////

template<>
void fma8(uint64_t * data,
          const unsigned int blocks_data,
          const uint64_t * a_data,
          const unsigned int blocks_a,
          const uint64_t * b_data,
          const unsigned int blocks_b)
{
    using base_type = uint64_t;
    const base_type zero = static_cast<base_type>(0);

    auto min = [](unsigned int a, unsigned int b)
    {return a < b ? a : b;};

    const unsigned int limit = min(blocks_a, blocks_data);
    for (unsigned int i = 0; i < limit; i += 3)
    {
        const base_type a0 = a_data[i + 0];
        const base_type a1 = i + 1 < blocks_a ? a_data[i + 1] : zero;
        const base_type a2 = i + 2 < blocks_a ? a_data[i + 2] : zero;

        asm volatile(
            "mov r8,  0             \n\t" //tmp0 = 0
            "mov r9,  r8            \n\t" //tmp1 = 0
            "mov r10, r8            \n\t" //tmp2 = 0
            "mov eax, %[blocks_data]\n\t"
            "mov edx, %[blocks_b]   \n\t" //blocks_b
            "sub eax, %[i]          \n\t" //blocks_data - i
            "cmp eax, edx           \n\t" //blocks_data - i < blocks_b ?
            "jle BLOCKS_DATA_MENOR%=\n\t"
            "mov ecx, edx           \n\t" //d = blocks_b
            "jmp D_CALCULADO%=      \n\t"
            "BLOCKS_DATA_MENOR%=:   \n\t"
            "mov ecx, eax           \n\t" //d = blocks_data - i
            "D_CALCULADO%=:         \n\t"
            "sal rcx, 3             \n\t" //d *= 8
            "mov rdi, %[data]       \n\t"
            "mov eax, %[i]          \n\t"
            "sal rax, 3             \n\t"
            "add rdi, rax           \n\t"
            "add rdi, rcx           \n\t" //r = data + i + d
            "mov rsi, %[b_data]     \n\t"
            "add rsi, rcx           \n\t" //b = b_data + d
            "neg rcx                \n\t" //j = -d

            "REPE%=:                \n\t"
//Primera
            "mov r11, QWORD PTR [rsi+rcx]\n\t"
            "mov rax, %[a1]              \n\t"
            "xor ebx, ebx                \n\t"
            "mul r11                     \n\t"
            "add r8, QWORD PTR [rdi+rcx] \n\t"
            "adc r9, rax                 \n\t"
            "adc r10, rdx                \n\t"
            "adc ebx, ebx                \n\t"
            "mov rax, %[a0]              \n\t"
            "mul r11                     \n\t"
            "mov r12, rax                \n\t"
            "mov r13, rdx                \n\t"
            "mov rax, %[a2]              \n\t"
            "mul r11                     \n\t"
            "add r8, r12                 \n\t"
            "mov QWORD PTR [rdi+rcx], r8 \n\t"
            "adc r9, r13                 \n\t"
            "adc r10, rax                \n\t"
            "adc rbx, rdx                \n\t"
#if 0
            "mov r8, r9                  \n\t"
            "mov r9, r10                 \n\t"
            "mov r10, rbx                \n\t"
            "add rcx, 8                  \n\t"
            "jz FIN%=                    \n\t"
            "jmp REPE%=                  \n\t"
#endif
            "mov r8, rbx                 \n\t"
            "add rcx, 8                  \n\t"
            "jz CAMBIO_1%=               \n\t"

//Segunda
            "mov r11, QWORD PTR [rsi+rcx]\n\t"
            "mov rax, %[a1]              \n\t"
            "xor ebx, ebx                \n\t"
            "mul r11                     \n\t"
            "add r9, QWORD PTR [rdi+rcx] \n\t"
            "adc r10, rax                \n\t"
            "adc r8, rdx                 \n\t"
            "adc ebx, ebx                \n\t"
            "mov rax, %[a0]              \n\t"
            "mul r11                     \n\t"
            "mov r12, rax                \n\t"
            "mov r13, rdx                \n\t"
            "mov rax, %[a2]              \n\t"
            "mul r11                     \n\t"
            "add r9, r12                 \n\t"
            "mov QWORD PTR [rdi+rcx], r9 \n\t"
            "adc r10, r13                \n\t"
            "adc r8, rax                 \n\t"
            "adc rbx, rdx                \n\t"
            "mov r9, rbx                 \n\t"
            "add rcx, 8                  \n\t"
            "jz CAMBIO_2%=               \n\t"

//Tercera
            "mov r11, QWORD PTR [rsi+rcx]\n\t"
            "mov rax, %[a1]              \n\t"
            "xor ebx, ebx                \n\t"
            "mul r11                     \n\t"
            "add r10, QWORD PTR [rdi+rcx]\n\t"
            "adc r8, rax                 \n\t"
            "adc r9, rdx                 \n\t"
            "adc ebx, ebx                \n\t"
            "mov rax, %[a0]              \n\t"
            "mul r11                     \n\t"
            "mov r12, rax                \n\t"
            "mov r13, rdx                \n\t"
            "mov rax, %[a2]              \n\t"
            "mul r11                     \n\t"
            "add r10, r12                \n\t"
            "mov QWORD PTR [rdi+rcx], r10\n\t"
            "adc r8, r13                 \n\t"
            "adc r9, rax                 \n\t"
            "adc rbx, rdx                \n\t"
            "mov r10, rbx                \n\t"
            "add rcx, 8                  \n\t"
            "jz FIN%=                    \n\t"
            "jmp REPE%=                  \n\t"

            "CAMBIO_1%=:                 \n\t"
            "mov rbx, r8                 \n\t"
            "mov r8, r9                  \n\t"
            "mov r9, r10                 \n\t"
            "mov r10, rbx                \n\t"
            "jmp FIN%=                   \n\t"
            
            "CAMBIO_2%=:                 \n\t"
            "mov rbx, r8                 \n\t"
            "mov r8, r10                 \n\t"
            "mov r10, r9                 \n\t"
            "mov r9, rbx                 \n\t"

            "FIN%=:                      \n\t"

            "mov ecx, %[blocks_data] \n\t"
            "sal ecx, 3              \n\t"
            "mov rdx, %[data]        \n\t"
            "add rdx, rcx            \n\t" //data + blocks_data
            "mov rax, rdx            \n\t"
            "sub rax, rdi            \n\t"
            "cmp eax, 0              \n\t"
            "je HECHO%=              \n\t"
            "cmp eax, 8              \n\t"
            "je QUEDA_1%=            \n\t"
            "cmp eax, 16             \n\t"
            "je QUEDAN_2%=           \n\t"
            "cmp eax, 24             \n\t"
            "je QUEDAN_3%=           \n\t"

            "QUEDAN_MAS%=:           \n\t"
            "add QWORD PTR [rdi], r8 \n\t"
            "lea rdi, [rdi+8]        \n\t"
            "adc QWORD PTR [rdi], r9 \n\t"
            "lea rdi, [rdi+8]        \n\t"
            "adc QWORD PTR [rdi], r10\n\t"
            "jnc HECHO%=             \n\t"
            "add rdi, 8              \n\t"
            "ACARREO%=:              \n\t"
            "add QWORD PTR [rdi], 1  \n\t"
            "jnc HECHO%=             \n\t"
            "add rdi, 8              \n\t"
            "cmp rdi, rdx            \n\t"
            "jne ACARREO%=           \n\t"
            "jmp HECHO%=             \n\t"
            
            "QUEDAN_3%=:             \n\t"
            "add QWORD PTR [rdi], r8 \n\t"
            "lea rdi, [rdi+8]        \n\t"
            "adc QWORD PTR [rdi], r9 \n\t"
            "lea rdi, [rdi+8]        \n\t"
            "adc QWORD PTR [rdi], r10\n\t"
            "jmp HECHO%=             \n\t"

            "QUEDAN_2%=:             \n\t"
            "add QWORD PTR [rdi], r8 \n\t"
            "lea rdi, [rdi+8]        \n\t"
            "adc QWORD PTR [rdi], r9 \n\t"
            "jmp HECHO%=             \n\t"

            "QUEDA_1%=:              \n\t"
            "add QWORD PTR [rdi], r8 \n\t"
            //"jmp HECHO%=             \n\t"

            "HECHO%=:                \n\t"
            :
            : [a0]"rm"(a0),
              [a1]"rm"(a1),
              [a2]"rm"(a2),
              [data]"rm"(data),
              [b_data]"rm"(b_data),
              [i]"rm"(i),
              [blocks_b]"rm"(blocks_b),
              [blocks_data]"rm"(blocks_data)
            : "cc", "memory",
              "rax", "rbx", "rcx", "rdx",
              "rdi", "rsi",
              "r8", "r9", "r10", "r11", "r12", "r13");
    }
}

///////////////////////////////////////////////////////////////////////////////

template<>
struct addmul_1<uint64_t, 1>
{
    using base_type = uint64_t;

    static void operate(base_type * data,
                        const base_type a0,
                        const base_type * b,
                        base_type & tmp0)
    {
#define ADDMUL_1_ASM_SNIPPET(offset) \
            "mov rax, %[a0]                  \n\t"\
            "mul QWORD PTR [%[b]+" #offset "]\n\t"\
            "add %[tmp0], rax                \n\t"\
            "adc rdx, 0                      \n\t"\
            "add QWORD PTR [%[data]+" #offset "], %[tmp0]\n\t"\
            "mov %[tmp0], rdx                \n\t"\
            "adc %[tmp0], 0                  \n\t"
        asm volatile(
            ADDMUL_1_ASM_SNIPPET(0)
            : [tmp0]"+r"(tmp0)
            : [data]"r"(data),
              [b]"r"(b),
              [a0]"r"(a0)
            : "cc", "memory", "rax", "rdx");
    }
};

///////////////////////////////////////////////////////////////////////////////

template<>
struct addmul_1<uint64_t, 2>
{
    using base_type = uint64_t;

    static void operate(base_type * data,
                        const base_type a0,
                        const base_type * b,
                        base_type & tmp0)
    {
        asm volatile(
            "mov rax, %[a0]                    \n\t"
            "mul QWORD PTR [%[b]+0]            \n\t"
            "add %[tmp0], rax                  \n\t"
            "adc rdx, 0                        \n\t"
            "xchg rdx, rcx                     \n\t"
            "mov rax, %[a0]                    \n\t"
            "mul QWORD PTR [%[b]+8]            \n\t"
            "add rcx, rax                      \n\t"
            "adc rdx, 0                        \n\t"
            "add QWORD PTR [%[data]+0], %[tmp0]\n\t"
            "mov %[tmp0], 0                    \n\t"
            "adc QWORD PTR [%[data]+8], rcx    \n\t"
            "adc %[tmp0], rdx                  \n\t"
            : [tmp0]"+r"(tmp0)
            : [data]"r"(data),
              [b]"r"(b),
              [a0]"r"(a0)
            : "cc", "memory", "rax", "rdx", "rcx");
    }
};

#undef ADDMUL_1_ASM_SNIPPET

///////////////////////////////////////////////////////////////////////////////

template<>
struct addmul_2<uint64_t, 1>
{
    using base_type = uint64_t;
    static const base_type zero = static_cast<base_type>(0);

    static void operate(base_type * data,
                        const base_type a0,
                        const base_type a1,
                        const base_type * b,
                        base_type & tmp0,
                        base_type & tmp1)
    {
            /*
            "mov rax, %[a0]                  \n\t"
            "mov r8, QWORD PTR [%[b]]        \n\t" //b[0]
            "mul r8                          \n\t" //high0:low0 = a0 * b0
            "add %[tmp0], rax                \n\t" //tmp0 += low0
            "adc rdx, 0                      \n\t" //high0 += carry
            "mov rcx, rdx                    \n\t"
            "mov rax, %[a1]                  \n\t"
            "mul r8                          \n\t" //high1:low1 = a1 * b0
            "add rcx, rax                    \n\t" //high0 += low1
            "adc rdx, 0                      \n\t" //high1 += carry
            "add QWORD PTR [%[data]], %[tmp0]\n\t" //data[0] += tmp0
            "mov %[tmp0], %[tmp1]            \n\t" //tmp0 = tmp1
            "adc %[tmp0], rcx                \n\t" //tmp0 += high0 + carry
            "mov %[tmp1], %[tmp2]            \n\t" //tmp1 = tmp2
            "adc %[tmp1], rdx                \n\t" //tmp1 += high1 + carry
            "mov %[tmp2], 0                  \n\t" //tmp2 = 0
            "adc %[tmp2], 0                  \n\t" //tmp2 += carry
            */
#define ADDMUL_2_ASM_SNIPPET(offset) \
            "mov rcx, QWORD PTR [%[b]+" #offset "]\n\t"\
            "mov rax, %[a0]                  \n\t"\
            "mul rcx                         \n\t"\
            "add %[tmp0], rax                \n\t"\
            "adc rdx, 0                      \n\t"\
            "xchg rcx, rdx                   \n\t"\
            "mov rax, %[a1]                  \n\t"\
            "mul rdx                         \n\t"\
            "add rcx, rax                    \n\t"\
            "adc rdx, 0                      \n\t"\
            "add QWORD PTR [%[data]+" #offset "], %[tmp0]\n\t"\
            "mov %[tmp0], %[tmp1]            \n\t"\
            "adc %[tmp0], rcx                \n\t"\
            "mov %[tmp1], 0                  \n\t"\
            "adc %[tmp1], rdx                \n\t"

        asm volatile(
            ADDMUL_2_ASM_SNIPPET(0)
            : [tmp0]"+r"(tmp0),
              [tmp1]"+r"(tmp1)
            : [data]"r"(data),
              [b]"r"(b),
              [a0]"r"(a0),
              [a1]"r"(a1)
            : "cc", "memory", "rax", "rcx", "rdx");
    }
};

///////////////////////////////////////////////////////////////////////////////

template<>
struct addmul_2<uint64_t, 2>
{
    using base_type = uint64_t;
    static const base_type zero = static_cast<base_type>(0);

    static void operate(base_type * data,
                        const base_type a0,
                        const base_type a1,
                        const base_type * b,
                        base_type & tmp0,
                        base_type & tmp1)
    {
        asm volatile(
            ADDMUL_2_ASM_SNIPPET(0)
            ADDMUL_2_ASM_SNIPPET(8)
            : [tmp0]"+r"(tmp0),
              [tmp1]"+r"(tmp1)
            : [data]"r"(data),
              [b]"r"(b),
              [a0]"r"(a0),
              [a1]"r"(a1)
            : "cc", "memory", "rax", "rcx", "rdx");
    }
};

///////////////////////////////////////////////////////////////////////////////

template<>
struct addmul_2<uint64_t, 4>
{
    using base_type = uint64_t;
    static const base_type zero = static_cast<base_type>(0);

    static void operate(base_type * data,
                        const base_type a0,
                        const base_type a1,
                        const base_type * b,
                        base_type & tmp0,
                        base_type & tmp1)
    {
        asm volatile(
            ADDMUL_2_ASM_SNIPPET(0)
            ADDMUL_2_ASM_SNIPPET(8)
            ADDMUL_2_ASM_SNIPPET(16)
            ADDMUL_2_ASM_SNIPPET(24)
            : [tmp0]"+r"(tmp0),
              [tmp1]"+r"(tmp1)
            : [data]"r"(data),
              [b]"r"(b),
              [a0]"r"(a0),
              [a1]"r"(a1)
            : "cc", "memory", "rax", "rcx", "rdx");
    }
};
#undef ADDMUL_2_ASM_SNIPPET

///////////////////////////////////////////////////////////////////////////////

#define REENTRANT_INCLUDE 1

#define LOAD(dest, src) \
    "mov " dest ", " src "\n\t"

#define MOVE(dest, src) \
    "mov " dest ", " src "\n\t"

#define STORE(reg, offset) \
    "mov QWORD PTR [%[r]+" #offset "], " reg "\n\t"

#define SET_ZERO(reg) \
        "xor " reg ", " reg "\n\t"\

#define MUL(op1, op2) \
        "mov rax, " op1 "\n\t"\
        "mul " op2 "\n\t"

#define ADD2(op1, op2) \
        "add " op1 ", rax\n\t"\
        "adc " op2 ", rdx\n\t"

#define MULADD1(op1, op2, reg1) \
        "mov rax, " op1 "\n\t"\
        "mul " op2 "\n\t"\
        "add " reg1 ", rax\n\t"

#define MULADD2(op1, op2, reg1, reg2) \
        "mov rax, " op1 "\n\t"\
        "mul " op2 "\n\t"\
        "add " reg1 ", rax\n\t"\
        "adc " reg2 ", rdx\n\t"

#define MULADD3(op1, op2, reg1, reg2, reg3) \
        "mov rax, " op1 "\n\t"\
        "mul " op2 "\n\t"\
        "add " reg1 ", rax\n\t"\
        "adc " reg2 ", rdx\n\t"\
        "adc " reg3 ", 0\n\t"

#define MEM_A0 "QWORD PTR[%[a]+0]"
#define MEM_A1 "QWORD PTR[%[a]+8]"
#define MEM_A2 "QWORD PTR[%[a]+16]"
#define MEM_A3 "QWORD PTR[%[a]+24]"
#define MEM_A4 "QWORD PTR[%[a]+32]"
#define MEM_A5 "QWORD PTR[%[a]+40]"
#define MEM_A6 "QWORD PTR[%[a]+48]"
#define MEM_A7 "QWORD PTR[%[a]+56]"
#define MEM_B0 "QWORD PTR[%[b]+0]"
#define MEM_B1 "QWORD PTR[%[b]+8]"
#define MEM_B2 "QWORD PTR[%[b]+16]"
#define MEM_B3 "QWORD PTR[%[b]+24]"
#define MEM_B4 "QWORD PTR[%[b]+32]"
#define MEM_B5 "QWORD PTR[%[b]+40]"
#define MEM_B6 "QWORD PTR[%[b]+48]"
#define MEM_B7 "QWORD PTR[%[b]+56]"

#define SIZE_OPERANDS 1
#define SIZE_RESULT 1
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 1
#define SIZE_RESULT 2
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 2
#define SIZE_RESULT 2
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 2
#define SIZE_RESULT 3
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 2
#define SIZE_RESULT 4
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 3
#define SIZE_RESULT 3
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 3
#define SIZE_RESULT 5
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 3
#define SIZE_RESULT 6
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 4
#define SIZE_RESULT 4
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 4
#define SIZE_RESULT 7
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 4
#define SIZE_RESULT 8
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 8
#define SIZE_RESULT 8
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 8
#define SIZE_RESULT 15
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

#define SIZE_OPERANDS 8
#define SIZE_RESULT 16
#include "big_uint_x86_64.h"
#undef SIZE_OPERANDS
#undef SIZE_RESULT

///////////////////////////////////////////////////////////////////////////////

#else //REENTRANT_INCLUDE

///////////////////////////////////////////////////////////////////////////////
#if SIZE_OPERANDS == 1
#if SIZE_RESULT == 1
template<>
void fixed_size_multiply<uint64_t, 1, 1, 1>(uint64_t * r,
                                            const uint64_t * a,
                                            const uint64_t * b)
{
    asm volatile(
        MUL(MEM_A0, MEM_B0)
        STORE("rax", 0)
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory", "rax", "rdx");
}
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_RESULT == 2
template<>
void fixed_size_multiply<uint64_t, 1, 1, 2>(uint64_t * r,
                                            const uint64_t * a,
                                            const uint64_t * b)
{
    asm volatile(
        MUL(MEM_A0, MEM_B0)
        STORE("rax", 0)
        STORE("rdx", 8)
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx");
}
#endif
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_OPERANDS == 2
#if SIZE_RESULT == 2
template<>
void fixed_size_multiply<uint64_t, 2, 2, 2>(uint64_t * r,
                                            const uint64_t * a,
                                            const uint64_t * b)
{
#define REG_A0 "r8"
#define REG_B0 "r9"
#define ACC0 "r10"
    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_B0, MEM_B0)
        MUL(REG_A0, REG_B0)
        STORE("rax", 0)

        MOVE(ACC0, "rdx")
        MULADD1(REG_A0, MEM_B1, ACC0)
        MULADD1(MEM_A1, REG_B0, ACC0)
        STORE(ACC0, 8)
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "r8", "r9", "r10");
#undef REG_A0
#undef REG_B0
#undef ACC0
}
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_RESULT == 3 || \
    SIZE_RESULT == 4
template<>
void fixed_size_multiply<uint64_t,
                         2,
                         2,
                         SIZE_RESULT>(uint64_t * r,
                                      const uint64_t * a,
                                      const uint64_t * b)
{
#define REG_A0 "r8"
#define REG_A1 "r8"
#define REG_B0 "r9"
#define REG_B1 "r12"
#define ACC0 "r10"
#define ACC1 "r11"
#define ACC2 "r13"
    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_B0, MEM_B0)
        LOAD(REG_B1, MEM_B1)
        MUL(REG_A0, REG_B0)
        STORE("rax", 0)

        MOVE(ACC0, "rdx")
        SET_ZERO(ACC1)
        SET_ZERO(ACC2)
        MULADD2(REG_A0, REG_B1, ACC0, ACC1)
        LOAD(REG_A1, MEM_A1)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
        STORE(ACC0, 8)

        MULADD2(REG_A1, REG_B1, ACC1, ACC2)
        STORE(ACC1, 16)
#if SIZE_RESULT == 4
        STORE(ACC2, 24)
#endif
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "r8", "r9", "r10", "r11", "r12", "r13");
#undef REG_A0
#undef REG_A1
#undef REG_B0
#undef REG_B1
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_OPERANDS == 3
template<>
void fixed_size_multiply<uint64_t,
                         3,
                         3,
                         SIZE_RESULT>(uint64_t * r,
                                      const uint64_t * a,
                                      const uint64_t * b)
{
#define REG_A0 "rcx"
#define REG_A1 "rbx"
#define REG_A2 "rcx"
#define REG_B0 "rdi"
#define REG_B1 "rsi"
#define REG_B2 "r11"
#define ACC0 "r8"
#define ACC1 "r9"
#define ACC2 "r10"

    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_A1, MEM_A1)
        LOAD(REG_B0, MEM_B0)
        LOAD(REG_B1, MEM_B1)
        MUL(REG_A0, REG_B0)
        STORE("rax", 0)

        MOVE(ACC0, "rdx")
        SET_ZERO(ACC1)
        MULADD2(REG_A0, REG_B1, ACC0, ACC1)
#if SIZE_RESULT == 3
        MULADD2(REG_A1, REG_B0, ACC0, ACC1)
#else
        SET_ZERO(ACC2)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
#endif
        STORE(ACC0, 8)

#if SIZE_RESULT == 3
        MULADD1(REG_A0, MEM_B2, ACC1)
        MULADD1(REG_A1, REG_B1, ACC1)
        LOAD(REG_A2, MEM_A2)
        MULADD1(REG_A2, MEM_B0, ACC1)
        STORE(ACC1, 16)
#else
        SET_ZERO(ACC0)
        LOAD(REG_B2, MEM_B2)
        MULADD3(REG_A0, REG_B2, ACC1, ACC2, ACC0)
        MULADD3(REG_A1, REG_B1, ACC1, ACC2, ACC0)
        LOAD(REG_A2, MEM_A2)
        MULADD3(REG_A2, REG_B0, ACC1, ACC2, ACC0)
        STORE(ACC1, 16)

        SET_ZERO(ACC1)
        MULADD3(REG_A1, REG_B2, ACC2, ACC0, ACC1)
        MULADD3(REG_A2, REG_B1, ACC2, ACC0, ACC1)
        STORE(ACC2, 24)

        MULADD2(REG_A2, REG_B2, ACC0, ACC1)
        STORE(ACC0, 32)
#endif

#if SIZE_RESULT == 6
        STORE(ACC1, 40)
#endif
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "rbx", "rcx", "rsi", "rdi",
          "r8", "r9", "r10", "r11");
#undef REG_A0
#undef REG_A1
#undef REG_A2
#undef REG_B0
#undef REG_B1
#undef REG_B2
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_OPERANDS == 4
#if SIZE_RESULT == 4
template<>
void fixed_size_multiply<uint64_t, 4, 4, 4>(uint64_t * r,
                                            const uint64_t * a,
                                            const uint64_t * b)
{
#define REG_A0 "rcx"
#define REG_A1 "rcx"
#define REG_A2 "rcx"
#define REG_B0 "rbx"
#define REG_B1 "rsi"
#define REG_B2 "rdi"
#define ACC0 "r8"
#define ACC1 "r9"
#define ACC2 "r10"
    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_B0, MEM_B0)
        MUL(REG_A0, REG_B0)
        STORE("rax", 0)

        MOVE(ACC0, "rdx")
        LOAD(REG_B1, MEM_B1)
        SET_ZERO(ACC1)
        MULADD2(REG_A0, REG_B1, ACC0, ACC1)
        LOAD(REG_B2, MEM_B2)
        SET_ZERO(ACC2)
        MULADD2(REG_A0, REG_B2, ACC1, ACC2)
        MULADD1(REG_A0, MEM_B3, ACC2)
        LOAD(REG_A1, MEM_A1)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
        STORE(ACC0, 8)

        MULADD2(REG_A1, REG_B1, ACC1, ACC2)
        MULADD1(REG_A1, REG_B2, ACC2)
        LOAD(REG_A2, MEM_A2)
        MULADD2(REG_A2, REG_B0, ACC1, ACC2)
        STORE(ACC1, 16)

        MULADD1(REG_A2, REG_B1, ACC2)
        MULADD1(MEM_A3, REG_B0, ACC2)
        STORE(ACC2, 24)
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "rbx", "rcx", "rsi", "rdi",
          "r8", "r9", "r10");
#undef REG_A0
#undef REG_A1
#undef REG_A2
#undef REG_B0
#undef REG_B1
#undef REG_B2
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_RESULT == 7 || \
    SIZE_RESULT == 8
template<>
void fixed_size_multiply<uint64_t,
                         4,
                         4,
                         SIZE_RESULT>(uint64_t * r,
                                      const uint64_t * a,
                                      const uint64_t * b)
{
#define REG_A0 "r8"
#define REG_A1 "r9"
#define REG_A2 "r10"
#define REG_A3 "r8"
#define REG_B0 "r11"
#define REG_B1 "r12"
#define REG_B2 "r13"
#define REG_B3 "r11"
#define ACC0 "rbx"
#define ACC1 "rcx"
#define ACC2 "rsi"

    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_A1, MEM_A1)
        LOAD(REG_A2, MEM_A2)
        LOAD(REG_B0, MEM_B0)
        LOAD(REG_B1, MEM_B1)
        LOAD(REG_B2, MEM_B2)

        SET_ZERO(ACC2)
        SET_ZERO(ACC0)
        SET_ZERO(ACC1)
        MULADD3(REG_A0, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 0)

        SET_ZERO(ACC2)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
        MULADD3(REG_A0, REG_B1, ACC0, ACC1, ACC2)
        STORE(ACC0, 8)

        SET_ZERO(ACC0)
        MULADD3(REG_A1, REG_B1, ACC1, ACC2, ACC0)
        MULADD3(REG_A0, REG_B2, ACC1, ACC2, ACC0)
        MULADD3(REG_A2, REG_B0, ACC1, ACC2, ACC0)
        STORE(ACC1, 16)

        SET_ZERO(ACC1)
        MULADD3(REG_A1, REG_B2, ACC2, ACC0, ACC1)
        MULADD3(REG_A2, REG_B1, ACC2, ACC0, ACC1)
        MULADD3(REG_A0, MEM_B3, ACC2, ACC0, ACC1)
        MULADD3(MEM_A3, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 24)

        SET_ZERO(ACC2)
        MULADD3(REG_A2, REG_B2, ACC0, ACC1, ACC2)
        MULADD3(REG_A1, MEM_B3, ACC0, ACC1, ACC2)
        MULADD3(MEM_A3, REG_B1, ACC0, ACC1, ACC2)
        STORE(ACC0, 32)

        SET_ZERO(ACC0)
        MULADD3(REG_A2, MEM_B3, ACC1, ACC2, ACC0)
        MULADD3(MEM_A3, REG_B2, ACC1, ACC2, ACC0)
        STORE(ACC1, 40)

        MULADD2(MEM_A3, MEM_B3, ACC2, ACC0)
        STORE(ACC2, 48)
#if SIZE_RESULT == 8
        STORE(ACC0, 56)
#endif
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "rbx", "rcx", "rsi",
          "r8", "r9", "r10",
          "r11", "r12", "r13");
#undef REG_A0
#undef REG_A1
#undef REG_A2
#undef REG_A3
#undef REG_B0
#undef REG_B1
#undef REG_B2
#undef REG_B3
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_OPERANDS == 8
#if SIZE_RESULT == 8
template<>
void fixed_size_multiply<uint64_t, 8, 8, 8>(uint64_t * r,
                                            const uint64_t * a,
                                            const uint64_t * b)
{
#define REG_A0 "r8"
#define REG_A1 "r9"
#define REG_A2 "r10"
#define REG_A3 "r8"
#define REG_A4 "r9"
#define REG_A5 "r10"
#define REG_A6 "r8"
#define REG_A7 "r9"
#define REG_B0 "r11"
#define REG_B1 "r12"
#define REG_B2 "r13"
#define REG_B3 "r11"
#define REG_B4 "r12"
#define REG_B5 "r13"
#define REG_B6 "r11"
#define REG_B7 "r12"
#define ACC0 "rbx"
#define ACC1 "rcx"
#define ACC2 "rsi"

    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_A1, MEM_A1)
        LOAD(REG_A2, MEM_A2)
        LOAD(REG_B0, MEM_B0)
        LOAD(REG_B1, MEM_B1)
        LOAD(REG_B2, MEM_B2)

        SET_ZERO(ACC2)
        SET_ZERO(ACC0)
        SET_ZERO(ACC1)
        MULADD3(REG_A0, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 0)

        SET_ZERO(ACC2)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
        MULADD3(REG_A0, REG_B1, ACC0, ACC1, ACC2)
        STORE(ACC0, 8)

        SET_ZERO(ACC0)
        MULADD3(REG_A1, REG_B1, ACC1, ACC2, ACC0)
        MULADD3(REG_A0, REG_B2, ACC1, ACC2, ACC0)
        MULADD3(REG_A2, REG_B0, ACC1, ACC2, ACC0)
        STORE(ACC1, 16)

        SET_ZERO(ACC1)
        MULADD3(REG_A1, REG_B2, ACC2, ACC0, ACC1)
        MULADD3(REG_A2, REG_B1, ACC2, ACC0, ACC1)
        MULADD3(REG_A0, MEM_B3, ACC2, ACC0, ACC1)
        MULADD3(MEM_A3, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 24)

        SET_ZERO(ACC2)
        MULADD3(REG_A2, REG_B2, ACC0, ACC1, ACC2)
        MULADD3(REG_A1, MEM_B3, ACC0, ACC1, ACC2)
        MULADD3(MEM_A3, REG_B1, ACC0, ACC1, ACC2)
        //BEGIN 8 * 8 -> 8
        MULADD3(REG_A0, MEM_B4, ACC0, ACC1, ACC2)
        MULADD3(MEM_A4, REG_B0, ACC0, ACC1, ACC2)
        //END 8 * 8 -> 8
        STORE(ACC0, 32)

        SET_ZERO(ACC0)
        MULADD3(REG_A2, MEM_B3, ACC1, ACC2, ACC0)
        MULADD3(MEM_A3, REG_B2, ACC1, ACC2, ACC0)
        //BEGIN 8 * 8 -> 8
        MULADD3(REG_A1, MEM_B4, ACC1, ACC2, ACC0)
        MULADD3(MEM_A4, MEM_B1, ACC1, ACC2, ACC0)
        MULADD3(REG_A0, MEM_B5, ACC1, ACC2, ACC0)
        MULADD3(MEM_A5, MEM_B0, ACC1, ACC2, ACC0)
        //END 8 * 8 -> 8
        STORE(ACC1, 40)

        MULADD2(MEM_A3, MEM_B3, ACC2, ACC0)
        //BEGIN 8 * 8 -> 8
        MULADD2(REG_A2, MEM_B4, ACC2, ACC0)
        MULADD2(MEM_A4, REG_B2, ACC2, ACC0)
        MULADD2(REG_A1, MEM_B5, ACC2, ACC0)
        MULADD2(MEM_A5, REG_B1, ACC2, ACC0)
        MULADD2(REG_A0, MEM_B6, ACC2, ACC0)
        MULADD2(MEM_A6, REG_B0, ACC2, ACC0)
        //END 8 * 8 -> 8
        STORE(ACC2, 48)

        //BEGIN 8 * 8 -> 8
        MULADD1(MEM_A3, MEM_B4, ACC0)
        MULADD1(MEM_A4, MEM_B3, ACC0)
        MULADD1(REG_A2, MEM_B5, ACC0)
        MULADD1(MEM_A5, REG_B2, ACC0)
        MULADD1(REG_A1, MEM_B6, ACC0)
        MULADD1(MEM_A6, REG_B1, ACC0)
        MULADD1(REG_A0, MEM_B7, ACC0)
        MULADD1(MEM_A7, REG_B0, ACC0)
        //END 8 * 8 -> 8
        STORE(ACC0, 56)
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "rbx", "rcx", "rsi",
          "r8", "r9", "r10",
          "r11", "r12", "r13");
#undef REG_A0
#undef REG_A1
#undef REG_A2
#undef REG_A3
#undef REG_A4
#undef REG_A5
#undef REG_A6
#undef REG_A7
#undef REG_B0
#undef REG_B1
#undef REG_B2
#undef REG_B3
#undef REG_B4
#undef REG_B5
#undef REG_B6
#undef REG_B7
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
///////////////////////////////////////////////////////////////////////////////
#if SIZE_RESULT == 15 || \
    SIZE_RESULT == 16
template<>
void fixed_size_multiply<uint64_t,
                         8,
                         8,
                         SIZE_RESULT>(uint64_t * r,
                                      const uint64_t * a,
                                      const uint64_t * b)
{
#define REG_A0 "r8"
#define REG_A1 "r9"
#define REG_A2 "r10"
#define REG_A3 "r8"
#define REG_A4 "r9"
#define REG_A5 "r10"
#define REG_A6 "r8"
#define REG_A7 "r9"
#define REG_B0 "r11"
#define REG_B1 "r12"
#define REG_B2 "r13"
#define REG_B3 "r11"
#define REG_B4 "r12"
#define REG_B5 "r13"
#define REG_B6 "r11"
#define REG_B7 "r12"
#define ACC0 "rbx"
#define ACC1 "rcx"
#define ACC2 "rsi"
    asm volatile(
        LOAD(REG_A0, MEM_A0)
        LOAD(REG_A1, MEM_A1)
        LOAD(REG_A2, MEM_A2)
        LOAD(REG_B0, MEM_B0)
        LOAD(REG_B1, MEM_B1)
        LOAD(REG_B2, MEM_B2)

        SET_ZERO(ACC2)
        SET_ZERO(ACC0)
        SET_ZERO(ACC1)
        MULADD3(REG_A0, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 0)

        SET_ZERO(ACC2)
        MULADD3(REG_A1, REG_B0, ACC0, ACC1, ACC2)
        MULADD3(REG_A0, REG_B1, ACC0, ACC1, ACC2)
        STORE(ACC0, 8)

        SET_ZERO(ACC0)
        MULADD3(REG_A1, REG_B1, ACC1, ACC2, ACC0)
        MULADD3(REG_A0, REG_B2, ACC1, ACC2, ACC0)
        MULADD3(REG_A2, REG_B0, ACC1, ACC2, ACC0)
        STORE(ACC1, 16)

        SET_ZERO(ACC1)
        MULADD3(REG_A1, REG_B2, ACC2, ACC0, ACC1)
        MULADD3(REG_A2, REG_B1, ACC2, ACC0, ACC1)
        MULADD3(REG_A0, MEM_B3, ACC2, ACC0, ACC1)
        MULADD3(MEM_A3, REG_B0, ACC2, ACC0, ACC1)
        STORE(ACC2, 24)

        SET_ZERO(ACC2)
        MULADD3(REG_A2, REG_B2, ACC0, ACC1, ACC2)
        MULADD3(REG_A1, MEM_B3, ACC0, ACC1, ACC2)
        MULADD3(MEM_A3, REG_B1, ACC0, ACC1, ACC2)
        //BEGIN 8 * 8 -> 16
        MULADD3(REG_A0, MEM_B4, ACC0, ACC1, ACC2)
        MULADD3(MEM_A4, REG_B0, ACC0, ACC1, ACC2)
        //END 8 * 8 -> 16
        STORE(ACC0, 32)

        SET_ZERO(ACC0)
        MULADD3(REG_A2, MEM_B3, ACC1, ACC2, ACC0)
        MULADD3(MEM_A3, REG_B2, ACC1, ACC2, ACC0)
        //BEGIN 8 * 8 -> 16
        MULADD3(REG_A1, MEM_B4, ACC1, ACC2, ACC0)
        MULADD3(MEM_A4, MEM_B1, ACC1, ACC2, ACC0)
        MULADD3(REG_A0, MEM_B5, ACC1, ACC2, ACC0)
        MULADD3(MEM_A5, MEM_B0, ACC1, ACC2, ACC0)
        //END 8 * 8 -> 16
        STORE(ACC1, 40)

        SET_ZERO(ACC1)
        MULADD3(MEM_A3, MEM_B3, ACC2, ACC0, ACC1)
        //BEGIN 8 * 8 -> 16
        MULADD3(REG_A2, MEM_B4, ACC2, ACC0, ACC1)
        MULADD3(MEM_A4, REG_B2, ACC2, ACC0, ACC1)
        MULADD3(REG_A1, MEM_B5, ACC2, ACC0, ACC1)
        MULADD3(MEM_A5, REG_B1, ACC2, ACC0, ACC1)
        MULADD3(REG_A0, MEM_B6, ACC2, ACC0, ACC1)
        MULADD3(MEM_A6, REG_B0, ACC2, ACC0, ACC1)
        //END 8 * 8 -> 16
        STORE(ACC2, 48)

        SET_ZERO(ACC2)
        //BEGIN 8 * 8 -> 16
        MULADD3(MEM_A3, MEM_B4, ACC0, ACC1, ACC2)
        MULADD3(MEM_A4, MEM_B3, ACC0, ACC1, ACC2)
        MULADD3(REG_A2, MEM_B5, ACC0, ACC1, ACC2)
        MULADD3(MEM_A5, REG_B2, ACC0, ACC1, ACC2)
        MULADD3(REG_A1, MEM_B6, ACC0, ACC1, ACC2)
        MULADD3(MEM_A6, REG_B1, ACC0, ACC1, ACC2)
        MULADD3(REG_A0, MEM_B7, ACC0, ACC1, ACC2)
        MULADD3(MEM_A7, REG_B0, ACC0, ACC1, ACC2)
        //END 8 * 8 -> 16
        STORE(ACC0, 56)

        SET_ZERO(ACC0)
        //BEGIN 8 * 8 -> 16
        LOAD(REG_A3, MEM_A3)
        LOAD(REG_B3, MEM_B3)
        MULADD3(MEM_A4, MEM_B4, ACC1, ACC2, ACC0)
        MULADD3(REG_A3, MEM_B5, ACC1, ACC2, ACC0)
        MULADD3(MEM_A5, REG_B3, ACC1, ACC2, ACC0)
        MULADD3(REG_A2, MEM_B6, ACC1, ACC2, ACC0)
        MULADD3(MEM_A6, REG_B2, ACC1, ACC2, ACC0)
        MULADD3(REG_A1, MEM_B7, ACC1, ACC2, ACC0)
        MULADD3(MEM_A7, REG_B1, ACC1, ACC2, ACC0)
        STORE(ACC1, 64)

        SET_ZERO(ACC1)
        LOAD(REG_A4, MEM_A4)
        LOAD(REG_B4, MEM_B4)
        MULADD3(REG_A4, MEM_B5, ACC2, ACC0, ACC1)
        MULADD3(MEM_A5, REG_B4, ACC2, ACC0, ACC1)
        MULADD3(REG_A3, MEM_B6, ACC2, ACC0, ACC1)
        MULADD3(MEM_A6, REG_B3, ACC2, ACC0, ACC1)
        MULADD3(REG_A2, MEM_B7, ACC2, ACC0, ACC1)
        MULADD3(MEM_A7, REG_B2, ACC2, ACC0, ACC1)
        STORE(ACC2, 72)

        SET_ZERO(ACC2)
        LOAD(REG_A5, MEM_A5)
        LOAD(REG_B5, MEM_B5)
        MULADD3(REG_A5, REG_B5, ACC0, ACC1, ACC2)
        MULADD3(REG_A4, MEM_B6, ACC0, ACC1, ACC2)
        MULADD3(MEM_A6, REG_B4, ACC0, ACC1, ACC2)
        MULADD3(REG_A3, MEM_B7, ACC0, ACC1, ACC2)
        MULADD3(MEM_A7, REG_B3, ACC0, ACC1, ACC2)
        STORE(ACC0, 80)

        SET_ZERO(ACC0)
        LOAD(REG_A6, MEM_A6)
        LOAD(REG_B6, MEM_B6)
        MULADD3(REG_A5, REG_B6, ACC1, ACC2, ACC0)
        MULADD3(REG_A6, REG_B5, ACC1, ACC2, ACC0)
        MULADD3(REG_A4, MEM_B7, ACC1, ACC2, ACC0)
        MULADD3(MEM_A7, REG_B4, ACC1, ACC2, ACC0)
        STORE(ACC1, 88)

        SET_ZERO(ACC1)
        LOAD(REG_A7, MEM_A7)
        LOAD(REG_B7, MEM_B7)
        MULADD3(REG_A6, REG_B6, ACC2, ACC0, ACC1)
        MULADD3(REG_A5, REG_B7, ACC2, ACC0, ACC1)
        MULADD3(REG_A7, REG_B5, ACC2, ACC0, ACC1)
        STORE(ACC2, 96)

        SET_ZERO(ACC2)
        MULADD3(REG_A6, REG_B7, ACC0, ACC1, ACC2)
        MULADD3(REG_A7, REG_B6, ACC0, ACC1, ACC2)
        STORE(ACC0, 104)

        MULADD3(REG_A7, REG_B7, ACC1, ACC2, ACC0)
        STORE(ACC1, 112)
#if SIZE_RESULT == 16
        STORE(ACC2, 120)
#endif
        //END 8 * 8 -> 16
        :
        : [a]"r"(a),
          [b]"r"(b),
          [r]"r"(r)
        : "cc", "memory",
          "rax", "rdx",
          "rbx", "rcx", "rsi",
          "r8", "r9", "r10",
          "r11", "r12", "r13");
#undef REG_A0
#undef REG_A1
#undef REG_A2
#undef REG_A3
#undef REG_A4
#undef REG_A5
#undef REG_A6
#undef REG_A7
#undef REG_B0
#undef REG_B1
#undef REG_B2
#undef REG_B3
#undef REG_B4
#undef REG_B5
#undef REG_B6
#undef REG_B7
#undef ACC0
#undef ACC1
#undef ACC2
}
#endif
#endif
///////////////////////////////////////////////////////////////////////////////
#if 0
template<>
void fixed_size_multiply<uint64_t, 16, 16, 16>(uint64_t * r,
                                             const uint64_t * a,
                                             const uint64_t * b)
{
    const unsigned int N = 8;
    fixed_size_multiply<uint64_t, N, N, N * 2>(r + 0, a + 0, b + 0);

    uint64_t tmp[N];

    fixed_size_multiply<uint64_t, N, N, N>(tmp, a + 0, b + N);
    uint64_t carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N);
    
    fixed_size_multiply<uint64_t, N, N, N>(tmp, a + N, b + 0);
    carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N);
}

///////////////////////////////////////////////////////////////////////////////

template<>
void fixed_size_multiply<uint64_t, 16, 16, 31>(uint64_t * r,
                                               const uint64_t * a,
                                               const uint64_t * b)
{
    const unsigned int N = 8;
    fixed_size_multiply<uint64_t, N, N, N * 2>    (r + 0,     a + 0, b + 0);
    fixed_size_multiply<uint64_t, N, N, N * 2 - 1>(r + N * 2, a + N, b + N);

    uint64_t tmp[N * 2];

    fixed_size_multiply<uint64_t, N, N, N * 2>(tmp, a + 0, b + N);
    uint64_t carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N * 2);
    if (carry)
    inc_blocks<uint64_t>(r + N * 3, carry, N);
    
    fixed_size_multiply<uint64_t, N, N, N * 2>(tmp, a + N, b + 0);
    carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N * 2);
    if (carry) inc_blocks<uint64_t>(r + N * 3, carry, N);
}

///////////////////////////////////////////////////////////////////////////////

template<>
void fixed_size_multiply<uint64_t, 16, 16, 32>(uint64_t * r,
                                               const uint64_t * a,
                                               const uint64_t * b)
{
    const unsigned int N = 8;

#if 0
    fixed_size_multiply<uint64_t, N, N, N * 2>(r + 0,     a + 0, b + 0);
    fixed_size_multiply<uint64_t, N, N, N * 2>(r + N * 2, a + N, b + N);

    uint64_t tmp[N * 2];

    fixed_size_multiply<uint64_t, N, N, N * 2>(tmp, a + 0, b + N);
    uint64_t carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N * 2);
    if (carry)
    inc_blocks<uint64_t>(r + N * 3, carry, N);
    
    fixed_size_multiply<uint64_t, N, N, N * 2>(tmp, a + N, b + 0);
    carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, tmp, carry, N * 2);
    if (carry) inc_blocks<uint64_t>(r + N * 3, carry, N);

#else
    fixed_size_multiply<uint64_t, N, N, N * 2>(r + 0,     a + 0, b + 0);
    fixed_size_multiply<uint64_t, N, N, N * 2>(r + N * 2, a + N, b + N);

    uint64_t sum_c0_c2_carry = 0;
    uint64_t sum_c0_c2[N * 2];
    addc_blocks<uint64_t>(sum_c0_c2, r + 0, r + N * 2, sum_c0_c2_carry, N * 2);
    uint64_t carry = 0;
    addc_blocks<uint64_t>(r + N, r + N, sum_c0_c2, carry, 8);

    uint64_t diff_a[N * 2];
    uint64_t * diff_b = diff_a + N;

    uint64_t carry_a = 1;
    subc_blocks<uint64_t>(diff_a, a + 0, a + N, carry_a, N);
    bool sign_a = carry_a ^ 0x1;
    if (sign_a)
    {
        carry_a = 1;
        neg_blocks<uint64_t>(diff_a, diff_a, carry_a, N);
    }

    uint64_t carry_b = 1;
    subc_blocks<uint64_t>(diff_b, b + N, b + 0, carry_b, N);
    bool sign_b = carry_b ^ 0x1;
    if (sign_b)
    {
        carry_b = 1;
        neg_blocks<uint64_t>(diff_b, diff_b, carry_b, N);
    }

    bool sign = sign_a ^ sign_b;

    uint64_t * diff_ab = sum_c0_c2;
    fixed_size_multiply<uint64_t, N, N, N * 2>(diff_ab, diff_a, diff_b);

    carry = sign ? 1 : 0;
    if (!sign)
        addc_blocks(r + N, r + N, diff_ab, carry, N * 2);
    else
        subc_blocks(r + N, r + N, diff_ab, carry, N * 2);

    if (carry)
        inc_blocks<uint64_t>(r + N * 3, carry, N);
#endif
}
#endif
///////////////////////////////////////////////////////////////////////////////
#endif
