#include "rs.hpp"

//
//  rsfec_impl.cpp
//  rsfec_cpp
//
//  Created by Changqing Yan on 2018/11/27.
//  Copyright © 2018 Changqing Yan. All rights reserved.
//
/* fec.c -- forward error correction based on Vandermonde matrices
 * 980624
 * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it)
 *
 * Portions derived from code by Phil Karn (karn@ka9q.ampr.org),
 * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari
 * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must Retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above
 *    copyright notice, this list of conditions and the following
 *    disclaimer in the documentation and/or other materials
 *    provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
 * OF SUCH DAMAGE.
 */
#include <cmath>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>

#ifdef _WIN32
    #include <intrin.h>
    #include <climits>
#endif

#if defined(WEBRTC_HAS_NEON)
    #include <arm_neon.h>
#endif
#if defined(WEBRTC_ARCH_X86_FAMILY)
    #include <immintrin.h>
#endif

#if defined(WEBRTC_ARCH_X86_FAMILY) && defined(_MSC_VER)
    #include <intrin.h>
#endif

// No CPU feature is available => straight C path.
int GetCPUInfoNoASM(X86CpuFeature feature)
{
    (void)feature;
    return 0;
}

#if defined(WEBRTC_ARCH_X86_FAMILY)
    #ifndef _MSC_VER
        // Intrinsic for "cpuid".
        #if defined(__pic__) && defined(__i386__)
static inline void __cpuidex(int cpu_info[4], int info_type, int info_ext)
{
    __asm__ volatile(
        "mov %%ebx, %%edi\n"
        "cpuid\n"
        "xchg %%edi, %%ebx\n"
        : "=a"(cpu_info[0]),
          "=D"(cpu_info[1]),
          "=c"(cpu_info[2]),
          "=d"(cpu_info[3])
        : "a"(info_type), "c"(info_ext));
}
        #else
static inline void __cpuidex(int cpu_info[4], int info_type, int info_ext)
{
    __asm__ volatile("cpuid\n"
                     : "=a"(cpu_info[0]),
                       "=b"(cpu_info[1]),
                       "=c"(cpu_info[2]),
                       "=d"(cpu_info[3])
                     : "a"(info_type), "c"(info_ext));
}
        #endif
    #endif // _MSC_VER
#endif     // WEBRTC_ARCH_X86_FAMILY

#if defined(WEBRTC_ARCH_X86_FAMILY)
// Actual feature detection for x86.
static int GetCPUInfo(X86CpuFeature feature)
{
    int cpu_info[4];
    __cpuidex(cpu_info, 7, 0);
    if (feature == X86CpuFeature::kAVX2)
    {
        return 0 != (cpu_info[1] & (0x01 << 5));
    }

    __cpuidex(cpu_info, 1, 0);
    if (feature == X86CpuFeature::kSSE2)
    {
        return 0 != (cpu_info[3] & 0x04000000);
    }
    if (feature == X86CpuFeature::kSSE3)
    {
        return 0 != (cpu_info[2] & 0x00000001);
    }
    if (feature == X86CpuFeature::kSSSE3)
    {
        return 0 != (cpu_info[2] & (0x01 << 9));
    }
    if (feature == X86CpuFeature::kSSE4_1)
    {
        return 0 != (cpu_info[2] & (0x01 << 19));
    }
    if (feature == X86CpuFeature::kSSE4_2)
    {
        return 0 != (cpu_info[2] & (0x01 << 20));
    }
    if (feature == X86CpuFeature::kAVX)
    {
        return 0 != (cpu_info[2] & (0x01 << 28));
    }

    return 0;
}
#else
// Default to straight C for other platforms.
static int GetCPUInfo(CPUFeature feature)
{
    (void)feature;
    return 0;
}
#endif

using CpuInfo = int (*)(X86CpuFeature feature);

CpuInfo WebRtc_GetCPUInfo      = GetCPUInfo;
CpuInfo WebRtc_GetCPUInfoNoASM = GetCPUInfoNoASM;

RsFecTable* RsFecTable::impl_ = nullptr;

std::mutex& RsFecTable::mutex()
{
    // use pointer to avoid mutex being freed before used when program exits, casusing crash
    static auto* lock_ = new std::mutex();
    return *lock_;
}

std::shared_ptr<RsFecTable> RsFecTable::get_this()
{
    std::lock_guard<std::mutex> lock(mutex());
    if (impl_ == nullptr)
    {
        auto result = std::make_shared<RsFecTable>();
        impl_       = result.get();
        return result;
    }
    return impl_->shared_from_this();
}

RsFecTable::RsFecTable()
    : gf_exp(
          {1,   2,   4,   8,   16,  32,  64,  128, 29,  58,  116, 232, 205, 135,
           19,  38,  76,  152, 45,  90,  180, 117, 234, 201, 143, 3,   6,   12,
           24,  48,  96,  192, 157, 39,  78,  156, 37,  74,  148, 53,  106, 212,
           181, 119, 238, 193, 159, 35,  70,  140, 5,   10,  20,  40,  80,  160,
           93,  186, 105, 210, 185, 111, 222, 161, 95,  190, 97,  194, 153, 47,
           94,  188, 101, 202, 137, 15,  30,  60,  120, 240, 253, 231, 211, 187,
           107, 214, 177, 127, 254, 225, 223, 163, 91,  182, 113, 226, 217, 175,
           67,  134, 17,  34,  68,  136, 13,  26,  52,  104, 208, 189, 103, 206,
           129, 31,  62,  124, 248, 237, 199, 147, 59,  118, 236, 197, 151, 51,
           102, 204, 133, 23,  46,  92,  184, 109, 218, 169, 79,  158, 33,  66,
           132, 21,  42,  84,  168, 77,  154, 41,  82,  164, 85,  170, 73,  146,
           57,  114, 228, 213, 183, 115, 230, 209, 191, 99,  198, 145, 63,  126,
           252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75,  150, 49,
           98,  196, 149, 55,  110, 220, 165, 87,  174, 65,  130, 25,  50,  100,
           200, 141, 7,   14,  28,  56,  112, 224, 221, 167, 83,  166, 81,  162,
           89,  178, 121, 242, 249, 239, 195, 155, 43,  86,  172, 69,  138, 9,
           18,  36,  72,  144, 61,  122, 244, 245, 247, 243, 251, 235, 203, 139,
           11,  22,  44,  88,  176, 125, 250, 233, 207, 131, 27,  54,  108, 216,
           173, 71,  142, 1,   2,   4,   8,   16,  32,  64,  128, 29,  58,  116,
           232, 205, 135, 19,  38,  76,  152, 45,  90,  180, 117, 234, 201, 143,
           3,   6,   12,  24,  48,  96,  192, 157, 39,  78,  156, 37,  74,  148,
           53,  106, 212, 181, 119, 238, 193, 159, 35,  70,  140, 5,   10,  20,
           40,  80,  160, 93,  186, 105, 210, 185, 111, 222, 161, 95,  190, 97,
           194, 153, 47,  94,  188, 101, 202, 137, 15,  30,  60,  120, 240, 253,
           231, 211, 187, 107, 214, 177, 127, 254, 225, 223, 163, 91,  182, 113,
           226, 217, 175, 67,  134, 17,  34,  68,  136, 13,  26,  52,  104, 208,
           189, 103, 206, 129, 31,  62,  124, 248, 237, 199, 147, 59,  118, 236,
           197, 151, 51,  102, 204, 133, 23,  46,  92,  184, 109, 218, 169, 79,
           158, 33,  66,  132, 21,  42,  84,  168, 77,  154, 41,  82,  164, 85,
           170, 73,  146, 57,  114, 228, 213, 183, 115, 230, 209, 191, 99,  198,
           145, 63,  126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171,
           75,  150, 49,  98,  196, 149, 55,  110, 220, 165, 87,  174, 65,  130,
           25,  50,  100, 200, 141, 7,   14,  28,  56,  112, 224, 221, 167, 83,
           166, 81,  162, 89,  178, 121, 242, 249, 239, 195, 155, 43,  86,  172,
           69,  138, 9,   18,  36,  72,  144, 61,  122, 244, 245, 247, 243, 251,
           235, 203, 139, 11,  22,  44,  88,  176, 125, 250, 233, 207, 131, 27,
           54,  108, 216, 173, 71,  142})
    , gf_log({255, 0,   1,   25,  2,   50,  26,  198, 3,   223, 51,  238, 27,
              104, 199, 75,  4,   100, 224, 14,  52,  141, 239, 129, 28,  193,
              105, 248, 200, 8,   76,  113, 5,   138, 101, 47,  225, 36,  15,
              33,  53,  147, 142, 218, 240, 18,  130, 69,  29,  181, 194, 125,
              106, 39,  249, 185, 201, 154, 9,   120, 77,  228, 114, 166, 6,
              191, 139, 98,  102, 221, 48,  253, 226, 152, 37,  179, 16,  145,
              34,  136, 54,  208, 148, 206, 143, 150, 219, 189, 241, 210, 19,
              92,  131, 56,  70,  64,  30,  66,  182, 163, 195, 72,  126, 110,
              107, 58,  40,  84,  250, 133, 186, 61,  202, 94,  155, 159, 10,
              21,  121, 43,  78,  212, 229, 172, 115, 243, 167, 87,  7,   112,
              192, 247, 140, 128, 99,  13,  103, 74,  222, 237, 49,  197, 254,
              24,  227, 165, 153, 119, 38,  184, 180, 124, 17,  68,  146, 217,
              35,  32,  137, 46,  55,  63,  209, 91,  149, 188, 207, 205, 144,
              135, 151, 178, 220, 252, 190, 97,  242, 86,  211, 171, 20,  42,
              93,  158, 132, 60,  57,  83,  71,  109, 65,  162, 31,  45,  67,
              216, 183, 123, 164, 118, 196, 23,  73,  236, 127, 12,  111, 246,
              108, 161, 59,  82,  41,  157, 85,  170, 251, 96,  134, 177, 187,
              204, 62,  90,  203, 89,  95,  176, 156, 169, 160, 81,  11,  245,
              22,  235, 122, 117, 44,  215, 79,  174, 213, 233, 230, 231, 173,
              232, 116, 214, 244, 234, 168, 80,  88,  175})
{
    init_rscoding();
}

RsFecTable::~RsFecTable()
{
    uninit_rscoding();
    impl_ = nullptr;
}

constexpr int MAX_PACKET_SIZE = 1460;

RsfecImpl::RsfecImpl()
{
    rsfec_table_ = RsFecTable::get_this();
    init_addmul_func();
}

RsfecImpl::~RsfecImpl()
{
    rs_map_clear();
    std::lock_guard<std::mutex> lock(RsFecTable::mutex());
    rsfec_table_ = nullptr;
}

void RsFecTable::init_rscoding()
{
    gf_mul_table = new gf*[GF_SIZE + 1];
    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        gf_mul_table[i] = new gf[GF_SIZE + 1];
        memset(gf_mul_table[i], 0, sizeof(gf) * (GF_SIZE + 1));
    }

    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        for (int j = 0; j < GF_SIZE + 1; j++)
        {
            gf_mul_table[i][j] = 0;
        }
    }

    gf_mul_zip_table = new gf*[GF_SIZE + 1];
    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        gf_mul_zip_table[i] = new gf[GF_ENTRY_LEN];
    }

    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        for (int j = 0; j < GF_ENTRY_LEN; j++)
        {
            gf_mul_zip_table[i][j] = 0;
        }
    }

    init_cpu_flag();
    generate_gf();
    init_mul_table();
}

void RsFecTable::uninit_rscoding()
{
    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        delete[] gf_mul_table[i];
    }
    delete[] gf_mul_table;
    gf_mul_table = nullptr;

    for (int i = 0; i < GF_SIZE + 1; i++)
    {
        delete[] gf_mul_zip_table[i];
    }
    delete[] gf_mul_zip_table;
    gf_mul_zip_table = nullptr;
}

void RsFecTable::init_cpu_flag()
{
#if defined(WEBRTC_HAS_NEON)
    m_cpu_flag = kNEON;
    return;
#elif defined(WEBRTC_ARCH_X86_FAMILY)
    int has_AVX2 = 0, has_SSSE3 = 0;
    has_AVX2  = WebRtc_GetCPUInfo(X86CpuFeature::kAVX2);
    has_SSSE3 = WebRtc_GetCPUInfo(X86CpuFeature::kSSSE3);
    if (has_AVX2)
    {
        m_cpu_flag = X86CpuFeature::kAVX2;
        return;
    }
    else if (has_SSSE3)
    {
        m_cpu_flag = X86CpuFeature::kSSSE3;
        return;
    }
#endif
    m_cpu_flag = X86CpuFeature::kNone;
    return;
}

void RsfecImpl::init_addmul_func()
{
#if defined(WEBRTC_ARCH_X86_FAMILY)
    #if defined(__AVX2__)
    if (rsfec_table_->m_cpu_flag == kAVX2)
        addmul = &RsfecImpl::addmul_avx2;
    else
    #elif defined(__SSSE3__)
    if (rsfec_table_->m_cpu_flag == kSSSE3)
        addmul = &RsfecImpl::addmul_ssse3;
    else
    #endif
#elif defined(WEBRTC_HAS_NEON)
    if (rsfec_table_->m_cpu_flag == kNEON)
        addmul = &RsfecImpl::addmul_neon;
    else
#endif
        addmul = &RsfecImpl::addmul_normal;
}

void RsFecTable::init_mul_table()
{
    int i, j;

    // init gf_mul_table
    for (i = 0; i < GF_SIZE + 1; i++)
    {
        for (j = 0; j < GF_SIZE + 1; j++)
        {
            gf_mul_table[i][j] = gf_exp[modnn(gf_log[i] + gf_log[j])];
        }

        for (j = 0; j < GF_SIZE + 1; j++)
        {
            gf_mul_table[0][j] = gf_mul_table[j][0] = 0;
        }
    }

    // init gf_mul_zip_table
    int len = GF_ENTRY_LEN / 2;

    for (i = 1; i < GF_SIZE + 1; i++)
    {
        for (j = 1; j < len; j++)
        {
            gf_mul_zip_table[i][j] = gf_exp[modnn(gf_log[i] + gf_log[j])];
            gf_mul_zip_table[i][j + len] =
                gf_exp[modnn(gf_log[i] + gf_log[j * len])];
        }
    }

    for (j = 0; j < GF_SIZE + 1; j++)
    {
        gf_mul_zip_table[j][0] = 0;
    }

    for (j = 0; j < GF_ENTRY_LEN; j++)
    {
        gf_mul_zip_table[0][j] = 0;
    }
}

/*
 * modnn(x) computes x % GF_SIZE, where GF_SIZE is 2**GF_BITS - 1,
 * without a slow divide.
 */
gf RsFecTable::modnn(int x)
{
    while (x >= GF_SIZE)
    {
        x -= GF_SIZE;
        x = (x >> GF_BITS) + (x & GF_SIZE);
    }

    return x;
}

long RsfecImpl::fec_encode_packet(const fec_parms*     parms,
                                  unsigned char* const src[],
                                  const int            size[],
                                  int                  index,
                                  unsigned char*       fec)
{
    if (!fec || !parms)
    {
        return -1;
    }

    int       i, kk, nn;
    const gf* p;

    kk = parms->k;
    nn = parms->n;
    if (kk == 0 || kk >= nn || nn > (GF_SIZE + 1) || parms->enc_matrix.empty())
    {
        return -1;
    }

    int max_size = size[0];

    for (i = 1; i < kk; i++)
    {
        max_size = (max_size > size[i]) ? max_size : size[i];
    }
    if (max_size <= 0 || max_size > MAX_PACKET_SIZE)
    {
        return -1;
    }

    p = &(parms->enc_matrix[index * kk]);

    memset(fec, 0, max_size * sizeof(gf));

    for (i = 0; i < kk; i++)
    {
        if (!src[i])
        {
            return -1;
        }
        (this->*addmul)(fec, src[i], p[i], size[i]);
    }

    return 0;
}

long RsfecImpl::fec_encode_length(const fec_parms*     parms,
                                  unsigned char* const src[],
                                  int                  size,
                                  int                  index,
                                  unsigned char*       fec_length)
{
    if (!fec_length || !parms)
    {
        return -1;
    }

    int       i, kk, nn;
    const gf* p;

    kk = parms->k;
    nn = parms->n;
    if (kk == 0 || kk >= nn || nn > (GF_SIZE + 1) || parms->enc_matrix.empty())
    {
        return -1;
    }

    p = &(parms->enc_matrix[index * kk]);

    memset(fec_length, 0, size * sizeof(gf));

    for (i = 0; i < kk; i++)
    {
        if (!src[i])
        {
            return -1;
        }
        (this->*addmul)(fec_length, src[i], p[i], size);
    }

    return 0;
}

long RsfecImpl::decode_fec_packet(const fec_parms* parms,
                                  unsigned char*   src[],
                                  int              size[],
                                  int              srcIndex[],
                                  unsigned char*   dest[],
                                  int              destIndex[],
                                  int              dest_num)
{
    if (!parms)
    {
        return -1;
    }

    int i, row, col, kk = parms->k, nn = parms->n;
    if (kk == 0 || kk >= nn || nn > (GF_SIZE + 1))
    {
        return -1;
    }

    gf   decMatrix[GF_SIZE * GF_SIZE];
    long ret = build_decode_matrix(parms, decMatrix, srcIndex);

    if (ret != 0)
    {
        return -1; /* error */
    }

    int max_size = size[0];

    for (i = 1; i < kk; i++)
    {
        max_size = (max_size > size[i]) ? max_size : size[i];
    }
    if (max_size <= 0 || max_size > MAX_PACKET_SIZE)
    {
        return -1;
    }

    for (row = 0; row < dest_num; row++)
    {
        if (!dest[row])
        {
            return -1;
        }
        memset(dest[row], 0, max_size * sizeof(gf));
        for (col = 0; col < kk; col++)
        {
            if (!src[col])
            {
                return -1;
            }
            (this->*addmul)(dest[row],
                            src[col],
                            decMatrix[destIndex[row] * kk + col],
                            size[col]);
        }
    }

    return 0;
}

long RsfecImpl::decode_fec_length(const fec_parms* parms,
                                  unsigned char*   src[],
                                  int              size,
                                  int              srcIndex[],
                                  unsigned char*   dest[],
                                  int              destIndex[],
                                  int              dest_num)
{
    if (!parms)
    {
        return -1;
    }

    int row, col, kk = parms->k, nn = parms->n;
    if (kk == 0 || kk >= nn || nn > (GF_SIZE + 1))
    {
        return -1;
    }

    gf   decMatrix[GF_SIZE * GF_SIZE];
    long ret = build_decode_matrix(parms, decMatrix, srcIndex);

    if (ret != 0)
    {
        return 1; /* error */
    }

    for (row = 0; row < dest_num; row++)
    {
        if (!dest[row])
        {
            return -1;
        }
        memset(dest[row], 0, size * sizeof(gf));

        for (col = 0; col < kk; col++)
        {
            if (!src[col])
            {
                return -1;
            }
            (this->*addmul)(dest[row],
                            src[col],
                            decMatrix[destIndex[row] * kk + col],
                            size);
        }
    }

    return 0;
}

std::unique_ptr<fec_parms> RsfecImpl::create_rsparms(int k, int n)
{
    if (k <= 0 || n > GF_SIZE + 1 || k >= n)
    {
        return nullptr;
    }

    auto retval = std::make_unique<fec_parms>();

    retval->k = k;
    retval->n = n;
    retval->enc_matrix.resize(n * k);

    std::vector<gf> tmp_m(n * k);

    /*
     * fill the matrix with powers of field elements, starting from 0.
     * The first row is special, cannot be computed with exp. table.
     */
    tmp_m[0] = 1;

    for (int col = 1; col < k; col++)
    {
        tmp_m[col] = 0;
    }

    for (size_t p = k, row = 0; row < n - 1; row++, p += k)
    {
        for (size_t col = 0; col < k; col++)
        {
            tmp_m[p + col] =
                rsfec_table_->gf_exp[rsfec_table_->modnn(row * col)];
        }
    }

    /*
     * quick code to build systematic matrix: invert the top
     * k*k vandermonde matrix, multiply right the bottom n-k rows
     * by the inverse, and construct the identity matrix at the top.
     */
    invert_vdm(&tmp_m, k); /* much faster than invert_mat */
    matmul(&(*tmp_m.begin()) + k * k,
           &(*tmp_m.begin()),
           &(*retval->enc_matrix.begin()) + k * k,
           n - k,
           k,
           k);
    /*
     * the upper matrix is I so do not bother with a slow multiply
     */
    memset(&(*retval->enc_matrix.begin()), 0, k * k * sizeof(gf));
    int col = 0;
    for (auto p = &(*retval->enc_matrix.begin()); col < k; col++, p += k + 1)
    {
        *p = 1;
    }
    return retval;
}

/*
 * fast code for inverting a vandermonde matrix.
 * XXX NOTE: It assumes that the matrix
 * is not singular and _IS_ a vandermonde matrix. Only uses
 * the second column of the matrix, containing the p_i's.
 *
 * Algorithm borrowed from "Numerical recipes in C" -- sec.2.8, but
 * largely revised for my purposes.
 * p = coefficients of the matrix (p_i)
 * q = values of the polynomial (known)
 */
int RsfecImpl::invert_vdm(std::vector<gf>* src, int k)
{
    int i, j, row, col;
    gf  b[GF_SIZE], c[GF_SIZE], p[GF_SIZE];
    gf  t, xx;

    if (k == 1)
    { /* degenerate case, matrix must be p^0 = 1 */
        return 0;
    }

    for (j = 1, i = 0; i < k; i++, j += k)
    {
        c[i] = 0;
        p[i] = (*src)[j]; /* p[i] */
    }

    /*
     * construct coeffs. recursively. We know c[k] = 1 (implicit)
     * and start P_0 = x - p_0, then at each stage multiply by
     * x - p_i generating P_i = x P_{i-1} - p_i P_{i-1}
     * After k steps we are done.
     */
    c[k - 1] = p[0]; /* really -p(0), but x = -x in GF(2^m) */

    for (i = 1; i < k; i++)
    {
        gf p_i = p[i]; /* see above comment */

        for (j = k - 1 - (i - 1); j < k - 1; j++)
        {
            c[j] ^= rsfec_table_->gf_mul_table[p_i][c[j + 1]];
        }

        c[k - 1] ^= p_i;
    }

    for (row = 0; row < k; row++)
    {
        /*
         * synthetic division etc.
         */
        xx       = p[row];
        t        = 1;
        b[k - 1] = 1; /* this is in fact c[k] */

        for (i = k - 2; i >= 0; i--)
        {
            b[i] = c[i + 1] ^ rsfec_table_->gf_mul_table[xx][b[i + 1]];
            t    = rsfec_table_->gf_mul_table[xx][t] ^ b[i];
        }

        for (col = 0; col < k; col++)
        {
            (*src)[col * k + row] =
                rsfec_table_->gf_mul_table[rsfec_table_->inverse[t]][b[col]];
        }
    }

    return 0;
}

/*
 * computes C = AB where A is n*k, B is k*m, C is n*m
 */
void RsfecImpl::matmul(gf* a, gf* b, gf* c, int n, int k, int m)
{
    int row, col, i;

    for (row = 0; row < n; row++)
    {
        for (col = 0; col < m; col++)
        {
            gf* pa  = &a[row * k];
            gf* pb  = &b[col];
            gf  acc = 0;

            for (i = 0; i < k; i++, pa++, pb += m)
            {
                acc ^= rsfec_table_->gf_mul_table[*pa][*pb];
            }

            c[row * m + col] = acc;
        }
    }
}

void RsFecTable::generate_gf()
{
    inverse[0] = 0;
    inverse[1] = 1;
    for (int i = 2; i <= GF_SIZE; i++)
    {
        inverse[i] = gf_exp[GF_SIZE - gf_log[i]];
    }
}

/*
 * build_decode_matrix constructs the encoding matrix given the
 * indexes. The matrix must be already allocated as
 * a vector of k*k elements, in row-major order
 */
long RsfecImpl::build_decode_matrix(const fec_parms* code,
                                    gf*              matrix,
                                    int              index[])
{
    int i, k = code->k;
    gf* p;

    for (i = 0, p = matrix; i < k; i++, p += k)
    {
        if (index[i] < k)
        {
            memset(p, 0, k * sizeof(gf));
            //          p[i] = 1 ;
            p[index[i]] = 1;
        }
        else
        {
            if (index[i] < code->n)
            {
                memcpy(p, &(code->enc_matrix[index[i] * k]), k * sizeof(gf));
            }
            else
            {
                //              free(matrix) ;
                return -1;
            }
        }
    }

    if (invert_mat(matrix, k))
    {
        return -1;
    }

    return 0;
}

/*
 * invert_mat() takes a matrix and produces its inverse
 * k is the size of the matrix.
 * (Gauss-Jordan, adapted from Numerical Recipes in C)
 * Return non-zero if singular.
 */
int RsfecImpl::invert_mat(gf* src, int k)
{
    gf  c, *p;
    int irow, icol, row, col, i, ix;

    int error = 1;

    if (0 == src || k > GF_SIZE)
    {
        return error;
    }

    //   int *indxc = new int[k];
    //  int *indxr = new int[k];
    //   int *ipiv =  new int[k];
    int indxc[GF_SIZE];
    int indxr[GF_SIZE];
    int ipiv[GF_SIZE];

    gf id_row[GF_SIZE];

    memset(id_row, 0, k * sizeof(gf));

    /*
     * ipiv marks elements already used as pivots.
     */
    for (i = 0; i < k; i++)
    {
        ipiv[i] = 0;
    }

    for (col = 0; col < k; col++)
    {
        gf* pivot_row;
        /*
         * Zeroing column 'col', look for a non-zero element.
         * First try on the diagonal, if it fails, look elsewhere.
         */
        irow = icol = -1;

        if (ipiv[col] != 1 && src[col * k + col] != 0)
        {
            irow = col;
            icol = col;
            goto found_piv;
        }

        for (row = 0; row < k; row++)
        {
            if (ipiv[row] != 1)
            {
                for (ix = 0; ix < k; ix++)
                {
                    if (ipiv[ix] == 0)
                    {
                        if (src[row * k + ix] != 0)
                        {
                            irow = row;
                            icol = ix;
                            goto found_piv;
                        }
                    }
                    else if (ipiv[ix] > 1)
                    {
                        return error;
                    }
                }
            }
        }

        if (icol == -1)
        {
            return error;
        }

    found_piv:
        ++(ipiv[icol]);

        /*
         * wse_swap rows irow and icol, so afterwards the diagonal
         * element will be correct. Rarely done, not worth
         * optimizing.
         */
        if (irow != icol)
        {
            for (ix = 0; ix < k; ix++)
            {
                std::swap(src[irow * k + ix], src[icol * k + ix]);
            }
        }

        indxr[col] = irow;
        indxc[col] = icol;
        pivot_row  = &src[icol * k];
        c          = pivot_row[icol];

        if (c == 0)
        {
            return error;
        }

        if (c != 1)
        {
            /* otherwhise this is a NOP */
            /*
             * this is done often , but optimizing is not so
             * fruitful, at least in the obvious ways (unrolling)
             */
            c               = rsfec_table_->inverse[c];
            pivot_row[icol] = 1;

            for (ix = 0; ix < k; ix++)
            {
                pivot_row[ix] = rsfec_table_->gf_mul_table[c][pivot_row[ix]];
            }
        }

        /*
         * from all rows, remove multiples of the selected row
         * to zero the relevant entry (in fact, the entry is not zero
         * because we know it must be zero).
         * (Here, if we know that the pivot_row is the identity,
         * we can optimize the addmul).
         */
        id_row[icol] = 1;

        if (memcmp(pivot_row, id_row, k * sizeof(gf)) != 0)
        {
            for (p = src, ix = 0; ix < k; ix++, p += k)
            {
                if (ix != icol)
                {
                    c       = p[icol];
                    p[icol] = 0;
                    (this->*addmul)(p, pivot_row, c, k);
                }
            }
        }

        id_row[icol] = 0;
    } /* done all columns */

    for (col = k - 1; col >= 0; col--)
    {
        if (indxr[col] < 0 || indxr[col] >= k || indxc[col] < 0 ||
            indxc[col] >= k)
        {
            return error;
        }
        else
        {
            if (indxr[col] != indxc[col])
            {
                for (row = 0; row < k; row++)
                {
                    std::swap(src[row * k + indxr[col]],
                              src[row * k + indxc[col]]);
                }
            }
        }
    }

    error = 0;

    return error;
}

#define GET_RSMODEL_INDEX(k, n) (((k) << 16) | (0x0000ffff & (n)))

void RsfecImpl::rs_map_clear()
{
    rsparms_map_.clear();
}

const fec_parms* RsfecImpl::get_rs_params(int k, int n)
{
    const fec_parms* parms;

    unsigned int index = GET_RSMODEL_INDEX((unsigned int)k, (unsigned int)n);
    auto         rsIt  = rsparms_map_.find(index);

    if (rsIt == rsparms_map_.end())
    {
        rsparms_map_[index] = create_rsparms(k, n);
        rsIt                = rsparms_map_.find(index);
    }
    parms = rsIt->second.get();

    return parms;
}

#define GF_ADDMULC_NORMAL(dst, x) dst ^= __gf_mulc_[x]
#define UNROLL                    16 /* 1, 4, 8, 16 */
void RsfecImpl::addmul_normal(gf* dst1, const gf* src1, gf c, int sz)
{
    if (c == 0)
    {
        return;
    }

    /*register*/ gf*       __gf_mulc_;
    /*register*/ gf*       dst = dst1;
    /*register*/ const gf* src = src1;

    gf* lim = &dst[sz - UNROLL + 1];

    __gf_mulc_ = rsfec_table_->gf_mul_table[c];

    for (; dst < lim; dst += UNROLL, src += UNROLL)
    {
        GF_ADDMULC_NORMAL(dst[0], src[0]);
        GF_ADDMULC_NORMAL(dst[1], src[1]);
        GF_ADDMULC_NORMAL(dst[2], src[2]);
        GF_ADDMULC_NORMAL(dst[3], src[3]);
        GF_ADDMULC_NORMAL(dst[4], src[4]);
        GF_ADDMULC_NORMAL(dst[5], src[5]);
        GF_ADDMULC_NORMAL(dst[6], src[6]);
        GF_ADDMULC_NORMAL(dst[7], src[7]);
        GF_ADDMULC_NORMAL(dst[8], src[8]);
        GF_ADDMULC_NORMAL(dst[9], src[9]);
        GF_ADDMULC_NORMAL(dst[10], src[10]);
        GF_ADDMULC_NORMAL(dst[11], src[11]);
        GF_ADDMULC_NORMAL(dst[12], src[12]);
        GF_ADDMULC_NORMAL(dst[13], src[13]);
        GF_ADDMULC_NORMAL(dst[14], src[14]);
        GF_ADDMULC_NORMAL(dst[15], src[15]);
    }

    lim += UNROLL - 1;

    for (; dst < lim; dst++, src++)
    {
        GF_ADDMULC_NORMAL(*dst, *src);
    }
}

// Computation speed up with ssse3 registers, with rigorous derivation
// the addmul operation in finited field can be transformed into register-friendly operations
#if defined(WEBRTC_ARCH_X86_FAMILY)
    #if defined(__SSSE3__)
void RsfecImpl::addmul_ssse3(gf* dst1, const gf* src1, gf c, int sz)
{
    if (c == 0)
    {
        return;
    }

    int unroll1 = 32, unroll2 = 16;

    /*register*/ gf* dst  = dst1;
    const gf*        src  = src1;
    gf*              lim1 = &dst[sz - unroll1 + 1];
    gf*              lim2 = &dst[sz - unroll2 + 1];

    __m128i tb0, tb1, arr, vs0, vs1, vs2, vs3, res, res1;

    tb0 = _mm_loadu_si128((__m128i*)&(rsfec_table_->gf_mul_zip_table[c][0]));
    tb1 = _mm_loadu_si128(
        (__m128i*)&(rsfec_table_->gf_mul_zip_table[c][GF_ENTRY_LEN / 2]));
    arr = _mm_set1_epi8(0x0f);

    for (; dst < lim1; dst += unroll1, src += unroll1)
    {
        vs0 = _mm_loadu_si128((__m128i*)src);
        vs2 = _mm_loadu_si128((__m128i*)(src + GF_ENTRY_LEN / 2));

        vs1 = _mm_srli_epi64(vs0, 4);
        vs3 = _mm_srli_epi64(vs2, 4);

        vs0 = _mm_and_si128(vs0, arr);
        vs1 = _mm_and_si128(vs1, arr);
        vs2 = _mm_and_si128(vs2, arr);
        vs3 = _mm_and_si128(vs3, arr);

        vs0 = _mm_shuffle_epi8(tb0, vs0);
        vs1 = _mm_shuffle_epi8(tb1, vs1);
        vs2 = _mm_shuffle_epi8(tb0, vs2);
        vs3 = _mm_shuffle_epi8(tb1, vs3);

        res  = _mm_loadu_si128((__m128i*)dst);
        res1 = _mm_loadu_si128((__m128i*)(dst + GF_ENTRY_LEN / 2));

        res = _mm_xor_si128(res, vs0);
        res = _mm_xor_si128(res, vs1);

        res1 = _mm_xor_si128(res1, vs2);
        res1 = _mm_xor_si128(res1, vs3);

        _mm_storeu_si128((__m128i*)dst, res);
        _mm_storeu_si128((__m128i*)(dst + GF_ENTRY_LEN / 2), res1);
    }

    for (; dst < lim2; dst += unroll2, src += unroll2)
    {
        vs0 = _mm_loadu_si128((__m128i*)src);
        vs1 = _mm_srli_epi64(vs0, 4);

        vs0 = _mm_and_si128(vs0, arr);
        vs1 = _mm_and_si128(vs1, arr);

        vs0 = _mm_shuffle_epi8(tb0, vs0);
        vs1 = _mm_shuffle_epi8(tb1, vs1);

        res = _mm_loadu_si128((__m128i*)dst);
        res = _mm_xor_si128(res, vs0);
        res = _mm_xor_si128(res, vs1);

        _mm_storeu_si128((__m128i*)dst, res);
    }

    lim2 += unroll2 - 1;

    /*register*/ gf* __gf_mulc_;
    __gf_mulc_ = rsfec_table_->gf_mul_table[c];

    for (; dst < lim2; dst++, src++)
    {
        GF_ADDMULC_NORMAL(*dst, *src);
    }
}
    #endif

    // Computation speed up with avx2 registers, with rigorous derivation
    // the addmul operation in finited field can be transformed into register-friendly operations
    #if defined(__AVX2__)
void RsfecImpl::addmul_avx2(gf* dst1, const gf* src1, gf c, int sz)
{
    if (c == 0)
    {
        return;
    }

    int unroll1 = 64, unroll2 = 32;

    /*register*/ gf* dst  = dst1;
    const gf*        src  = src1;
    gf*              lim1 = &dst[sz - unroll1 + 1];
    gf*              lim2 = &dst[sz - unroll2 + 1];

    __m256i tb, tb0, tb1, arr, vs0, vs1, vs2, vs3, res, res1;
    tb  = _mm256_loadu_si256((__m256i*)&(rsfec_table_->gf_mul_zip_table[c][0]));
    tb0 = _mm256_permute2x128_si256(tb, tb, 0);
    tb1 = _mm256_permute2x128_si256(tb, tb, 17);
    arr = _mm256_set1_epi8(0x0f);

    for (; dst < lim1; dst += unroll1, src += unroll1)
    {
        vs0 = _mm256_loadu_si256((__m256i*)src);
        vs2 = _mm256_loadu_si256((__m256i*)(src + GF_ENTRY_LEN));
        vs1 = _mm256_srli_epi64(vs0, 4);
        vs3 = _mm256_srli_epi64(vs2, 4);

        vs0 = _mm256_and_si256(vs0, arr);
        vs1 = _mm256_and_si256(vs1, arr);
        vs2 = _mm256_and_si256(vs2, arr);
        vs3 = _mm256_and_si256(vs3, arr);

        vs0 = _mm256_shuffle_epi8(tb0, vs0);
        vs1 = _mm256_shuffle_epi8(tb1, vs1);
        vs2 = _mm256_shuffle_epi8(tb0, vs2);
        vs3 = _mm256_shuffle_epi8(tb1, vs3);

        res  = _mm256_loadu_si256((__m256i*)dst);
        res1 = _mm256_loadu_si256((__m256i*)(dst + GF_ENTRY_LEN));

        res  = _mm256_xor_si256(res, vs0);
        res  = _mm256_xor_si256(res, vs1);
        res1 = _mm256_xor_si256(res1, vs2);
        res1 = _mm256_xor_si256(res1, vs3);

        _mm256_storeu_si256((__m256i*)dst, res);
        _mm256_storeu_si256((__m256i*)(dst + GF_ENTRY_LEN), res1);
    }

    for (; dst < lim2; src += unroll2, dst += unroll2)
    {
        vs0 = _mm256_loadu_si256((__m256i*)src);
        vs1 = _mm256_srli_epi64(vs0, 4);

        vs0 = _mm256_and_si256(vs0, arr);
        vs1 = _mm256_and_si256(vs1, arr);

        vs0 = _mm256_shuffle_epi8(tb0, vs0);
        vs1 = _mm256_shuffle_epi8(tb1, vs1);

        res = _mm256_loadu_si256((__m256i*)dst);
        res = _mm256_xor_si256(res, vs0);
        res = _mm256_xor_si256(res, vs1);

        _mm256_storeu_si256((__m256i*)dst, res);
    }
    lim2 += unroll2 - 1;

    /*register*/ gf* __gf_mulc_;
    __gf_mulc_ = rsfec_table_->gf_mul_table[c];

    for (; dst < lim2; dst++, src++)
    {
        GF_ADDMULC_NORMAL(*dst, *src);
    }
}
    #endif

#elif defined(WEBRTC_HAS_NEON)
void RsfecImpl::addmul_neon(gf* dst1, const gf* src1, gf c, int sz)
{
    if (c == 0)
    {
        return;
    }

    int unroll1 = 32, unroll2 = 16;

    /*register*/ gf* dst  = dst1;
    const gf*        src  = src1;
    gf*              lim1 = &dst[sz - unroll1 + 1];
    gf*              lim2 = &dst[sz - unroll2 + 1];

    uint8x16_t  arr, vs0, vs1, vs2, vs3, res, res1;
    uint8x8x2_t tb0, tb1;

    #define uint8x16_to_8x8x2(x) \
        ((uint8x8x2_t){{vget_low_u8(x), vget_high_u8(x)}})
    tb0 = uint8x16_to_8x8x2(vld1q_u8(&(rsfec_table_->gf_mul_zip_table[c][0])));
    tb1 = uint8x16_to_8x8x2(
        vld1q_u8(&(rsfec_table_->gf_mul_zip_table[c][GF_ENTRY_LEN / 2])));

    arr = vdupq_n_u8(0x0f);

    for (; dst < lim1; dst += unroll1, src += unroll1)
    {
        vs0 = vld1q_u8(src);
        vs2 = vld1q_u8((src + GF_ENTRY_LEN / 2));

        vs1 = vshrq_n_u8(vs0, 4);
        vs3 = vshrq_n_u8(vs2, 4);

        vs0 = vandq_u8(vs0, arr);
        vs1 = vandq_u8(vs1, arr);
        vs2 = vandq_u8(vs2, arr);
        vs3 = vandq_u8(vs3, arr);

        vs0 = vcombine_u8(vtbl2_u8(tb0, vget_low_u8(vs0)),
                          vtbl2_u8(tb0, vget_high_u8(vs0)));
        vs1 = vcombine_u8(vtbl2_u8(tb1, vget_low_u8(vs1)),
                          vtbl2_u8(tb1, vget_high_u8(vs1)));

        vs2 = vcombine_u8(vtbl2_u8(tb0, vget_low_u8(vs2)),
                          vtbl2_u8(tb0, vget_high_u8(vs2)));
        vs3 = vcombine_u8(vtbl2_u8(tb1, vget_low_u8(vs3)),
                          vtbl2_u8(tb1, vget_high_u8(vs3)));

        res  = vld1q_u8(dst);
        res1 = vld1q_u8(dst + GF_ENTRY_LEN / 2);
        res  = veorq_u8(res, vs0);
        res  = veorq_u8(res, vs1);

        res1 = veorq_u8(res1, vs2);
        res1 = veorq_u8(res1, vs3);

        vst1q_u8(dst, res);
        vst1q_u8((dst + GF_ENTRY_LEN / 2), res1);
    }

    for (; dst < lim2; dst += unroll2, src += unroll2)
    {
        vs0 = vld1q_u8(src);
        vs1 = vshrq_n_u8(vs0, 4);

        vs0 = vandq_u8(vs0, arr);
        vs1 = vandq_u8(vs1, arr);

        vs0 = vcombine_u8(vtbl2_u8(tb0, vget_low_u8(vs0)),
                          vtbl2_u8(tb0, vget_high_u8(vs0)));
        vs1 = vcombine_u8(vtbl2_u8(tb1, vget_low_u8(vs1)),
                          vtbl2_u8(tb1, vget_high_u8(vs1)));

        res = vld1q_u8(dst);
        res = veorq_u8(res, vs0);
        res = veorq_u8(res, vs1);

        vst1q_u8(dst, res);
    }

    lim2 += unroll2 - 1;

    /*register*/ gf* __gf_mulc_;
    __gf_mulc_ = rsfec_table_->gf_mul_table[c];

    for (; dst < lim2; dst++, src++)
    {
        GF_ADDMULC_NORMAL(*dst, *src);
    }
}
#endif
