/*
 * gf_w16.c
 *
 * Routines for 16-bit Galois fields
 */

#include "gf_int.h"
#include <stdio.h>

#define GF_FIELD_WIDTH      16
#define GF_FIELD_SIZE       (1 << GF_FIELD_WIDTH)

#ifdef  INTEL_SSE4

struct gf_data {
    gf_val_16_t      alpha;
    uint32_t        prim_poly;
    gf_val_16_t      log_tbl[GF_FIELD_SIZE];
    gf_val_16_t      antilog_tbl[GF_FIELD_SIZE * 2];
    gf_val_16_t      inv_tbl[GF_FIELD_SIZE];
    void            *extra;
};

struct __attribute__ ((aligned (16))) gf_half_tables {
    gf_val_16_t  lo_mul[GF_FIELD_SIZE][1 << (GF_FIELD_WIDTH/2)];
    gf_val_16_t  hi_mul[GF_FIELD_SIZE][1 << (GF_FIELD_WIDTH/2)];
};

#endif

/*
 * Basic Galois field routines
 */

/* Multiply uses log-antilog tables.
   The antilog table is "duplicated" to avoid the extra test for overflow on addition.
 */
static
inline
gf_val_16_t
multiply_log (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    gf_val_16_t     result;

    result = (a == 0 || b == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a] + gf->data->log_tbl[b])];

    return (result);
}

static
inline
gf_val_16_t
multiply_htbl (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    gf_val_16_t  *lo_tbl, *hi_tbl;

    lo_tbl = ((struct gf_half_tables *)gf->data->extra)->lo_mul[b];
    hi_tbl = ((struct gf_half_tables *)gf->data->extra)->hi_mul[b];
    return (lo_tbl[a & 0xff] ^ hi_tbl[(a >> (GF_FIELD_WIDTH/2))]);
}

static
inline
gf_val_16_t
multiply_ftbl (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    return (((gf_val_16_t *)gf->data->extra)[GF_FIELD_SIZE * (uint32_t)b + (uint32_t)a]);
}

static
inline
gf_val_16_t
divide (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
  int log_sum = 0;

  if (a == 0) {
    return ((gf_val_16_t)0);
  } else if (b == 0) {
    return ((gf_val_16_t)0);
  }
  log_sum = gf->data->log_tbl[a] - gf->data->log_tbl[b];
  if (log_sum < 0) {
    log_sum += GF_FIELD_SIZE-1;
  }
  return (gf->data->antilog_tbl[log_sum]);
}

/***************************************/

static
gf_val_16_t
gf_w16_pow (const galois_field_16_t *gf, gf_val_16_t a, uint32_t b)
{
  if (a == 0) {
    return (a);
  } else if (b == 0) {
    return (gf_val_16_t)1;
  } else {
    return (gf->data->antilog_tbl[(gf_val_16_t)((gf->data->log_tbl[a] * (b & 0xfffff)) % (GF_FIELD_SIZE -1))]);
  }
}

static
gf_val_16_t
gf_w16_log (const galois_field_16_t *gf, gf_val_16_t a)
{
    return (gf->data->log_tbl[a]);
}

static
gf_val_16_t
gf_w16_antilog (const galois_field_16_t *gf, gf_val_16_t a)
{
    return (gf->data->antilog_tbl[a]);
}

static
gf_val_16_t
gf_w16_invert (const galois_field_16_t *gf, gf_val_16_t a)
{
    return (gf->data->inv_tbl[a]);
}

static
gf_val_16_t
gf_w16_mult_log (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    return (multiply_log (gf, a, b));
}

static
gf_val_16_t
gf_w16_mult_htbl (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    return (multiply_htbl (gf, a, b));
}

static
gf_val_16_t
gf_w16_mult_ftbl (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    return (multiply_ftbl (gf, a, b));
}

static
gf_val_16_t
gf_w16_divide (const galois_field_16_t *gf, gf_val_16_t a, gf_val_16_t b)
{
    return (divide (gf, a, b));
}

static
gf_val_16_t
gf_w16_invert_slow (galois_field_16_t *gf, gf_val_16_t a)
{
    if (a == 0) {
        return (0);
    } else if (a == 1) {
        return (1);
    } else {
        return (divide (gf, a, multiply_log (gf, a, a)));
    }
}

static
void
gf_w16_mult_buf_const_htbl (const galois_field_16_t *gf, const gf_val_16_t *a, gf_val_16_t b, gf_val_16_t *dst, int len)
{
    int     i;
    gf_val_16_t  *lo_tbl, *hi_tbl;

    lo_tbl = ((struct gf_half_tables *)gf->data->extra)->lo_mul[b];
    hi_tbl = ((struct gf_half_tables *)gf->data->extra)->hi_mul[b];
    for (i = 0; i < len; i++) {
        dst[i] = lo_tbl[a[i] & 0xff] ^ hi_tbl[(a[i] >> (GF_FIELD_WIDTH/2))];
    }
}

static
void
gf_w16_mult_buf_const_ftbl_lazy (const galois_field_16_t *gf, const gf_val_16_t *a, gf_val_16_t b, gf_val_16_t *dst, int len)
{
    int         i;
    int         lb;
    gf_val_16_t  *tbl;

    if (b == 0) return;
    tbl = (gf_val_16_t *) malloc(sizeof(gf_val_16_t)*GF_FIELD_SIZE);
    tbl[0] = 0;

    lb = gf->data->log_tbl[b];
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
      tbl[gf->data->antilog_tbl[i]] = gf->data->antilog_tbl[lb];
      lb++;
    }
    for (i = 0; i < len; i++) {
        dst[i] = tbl[a[i]];
    }
    free(tbl);
}

static
void
gf_w16_mult_buf_buf_htbl (const galois_field_16_t *gf, const gf_val_16_t *a, const gf_val_16_t *b, gf_val_16_t *dst, int len)
{
    int     i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_htbl (gf, a[i], b[i]);
    }
}

static
void
gf_w16_mult_buf_const_log (const galois_field_16_t *gf, const gf_val_16_t *a, gf_val_16_t b, gf_val_16_t *dst, int len)
{
    int         i;
    uint32_t    log_b;

    if (b == 0) {
        memset (dst, 0, len);
        return;
    }

    log_b = gf->data->log_tbl[b];

    for (i = 0; i < len; i++) {
        dst[i] = (a[i] == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a[i]] + log_b)];
    }
}

static
void
gf_w16_mult_buf_buf_log (const galois_field_16_t *gf, const gf_val_16_t *a, const gf_val_16_t *b, gf_val_16_t *dst, int len)
{
    int         i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_log (gf, a[i], b[i]);
    }
}

#if 0
static
void
gf_w8_mult_buf_const_ftbl (const galois_field_16_t *gf, const gf_val_16_t *a, gf_val_16_t b, gf_val_16_t *dst, int len)
{
    int         i;
    gf_val_16_t  *tbl;

    tbl = (gf_val_16_t *)gf->data->extra + GF_FIELD_SIZE * b;
    for (i = 0; i < len; i++) {
        dst[i] = tbl[a[i]];
    }
}

static
void
gf_w8_mult_buf_buf_ftbl (const galois_field_16_t *gf, const gf_val_16_t *a, const gf_val_16_t *b, gf_val_16_t *dst, int len)
{
    int         i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_ftbl (gf, a[i], b[i]);
    }
}
#endif

#if 0
/*
 * Multiply a single input buffer by a set of constants, and accumulate the result into output buffers.
 * The input is chunked into pieces to leverage cache locality and amortize the cost of loading the
 * half tables.
 */
void
gf_w8_mult_acc_buf_htbl (const struct galois_field_8 *gf, const gf_val_16_t *buf,
                         gf_val_16_t factors[], gf_val_16_t *accs[], int len, int naccs)
{
    int         i, j, k;
    const int   step = 32;
    uint64_t    xbuf[step / sizeof (uint64_t)];

    for (i = 0; i < len; i += step) {
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_htbl(gf, buf, factors[j], (gf_val_16_t *)xbuf, sizeof (xbuf));
            for (k = 0; k < sizeof (xbuf) / sizeof (xbuf[0]); k++) {
                ((uint64_t *)(accs[j] + i))[k] ^= xbuf[k];
            }
        }
    }
    if (len % step != 0) {
        i -= step;
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_htbl(gf, buf, factors[j], (gf_val_16_t *)xbuf, len % step);
            for (k = 0; k < len % step; k++) {
                *(accs[j] + i + k) ^= ((gf_val_16_t *)xbuf)[k];
            }
        }
    }
}
#endif

#ifdef  INTEL_SSE4

static
inline
__m128i
gf_w16_mult_vec (__m128i va, __m128i vb, uint32_t prim_poly)
{
    __m128i     acc, t1;
    __m128i     poly = _mm_set1_epi16 ( (int16_t)(prim_poly & 0xffff) );
    __m128i     zero = _mm_setzero_si128 ();
    int         i;

    acc = zero;
    for (i = 15; i > 0; i--) {
        /* Add va into the accumulator if the bit in the multiplicand is 0 */
        t1 = _mm_and_si128 (_mm_cmpgt_epi16 (zero, _mm_slli_epi32 (vb, i)), va);
        acc = _mm_xor_si128 (acc, t1);
        /* Multiply va by 2 */
        t1 = _mm_and_si128 (_mm_cmpgt_epi16 (zero, va), poly);
        va = _mm_xor_si128 (_mm_add_epi16 (va, va), t1);
    }
    /* Add in the last value */
    t1 = _mm_and_si128 (_mm_cmpgt_epi16 (zero, vb), va);
    acc = _mm_xor_si128 (acc, t1);

    return (acc);
}

static
void
gf_w16_mult_buf_const_vec (const galois_field_16_t *gf, const gf_val_16_t *a, gf_val_16_t b, gf_val_16_t *dst, int len)
{
    __m128i     va;
    __m128i     vconst;
    uint32_t    prim_poly = gf->data->prim_poly;
    int         i;
    
    vconst = _mm_set1_epi16 ( (int16_t)b);
    for (i = 0; i < len; i += 8) {
        va = _mm_loadu_si128 ((__m128i *)(a+i));
        _mm_storeu_si128 ((__m128i *)(dst+i), gf_w16_mult_vec (va, vconst, prim_poly));
    }
    /* Finish off the last few bytes */
    if (len % 8 != 0) {
        i -= 8;
        gf_w16_mult_buf_const_htbl (gf, a+i, b, dst+i, len % 8);
    }
}

static
void
gf_w16_mult_buf_buf_vec (const galois_field_16_t *gf, const gf_val_16_t *a, const gf_val_16_t *b, gf_val_16_t *dst, int len)
{
    int         i;
    __m128i     va, vb, vr;
    uint32_t    prim_poly = gf->data->prim_poly;

    for (i = 0; i < len; i += 8) {
        va = _mm_loadu_si128 ((__m128i *)(a + i));
        vb = _mm_loadu_si128 ((__m128i *)(b + i));
        vr = gf_w16_mult_vec (va, vb, prim_poly);
        _mm_storeu_si128 ((__m128i *)(dst + i), vr);
    }
    /* Finish off the last few bytes */
    if (len % 8 != 0) {
        i -= 8;
        gf_w16_mult_buf_buf_htbl (gf, a+i, b+i, dst+i, len % 8);
    }
}

#endif

int
gf_w16_init (galois_field_16_t *gf, uint32_t flags)
{
    uint32_t    b;
    int         i, j;
    gf_val_16_t  v;
    gf_val_16_t  *gtbl;

    gf->flags = flags;
    gf->data = (struct gf_data *)malloc (sizeof (struct gf_data));
    gf->data->alpha = 1;
    gf->data->prim_poly = 0x1100b;
    gf->data->log_tbl[0] = 0;
    for (b = gf->data->alpha, i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->log_tbl[b] = (gf_val_16_t)i;
        gf->data->antilog_tbl[i] = (gf_val_16_t)b;
        gf->data->antilog_tbl[i+GF_FIELD_SIZE-1] = (gf_val_16_t)b;
        b <<= 1;
        if (b & GF_FIELD_SIZE) {
            b ^= gf->data->prim_poly;
        }
    }
    gf->data->inv_tbl[0] = 0;  /* Not really, but we need to fill it with something */
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->inv_tbl[gf->data->antilog_tbl[i]] = gf_w16_invert_slow (gf, gf->data->antilog_tbl[i]);
    }
    gf->log = gf_w16_log;
    gf->antilog = gf_w16_antilog;
    gf->invert = gf_w16_invert;
    gf->divide = gf_w16_divide;
    if (flags & GALOIS_MULT_LOG) {
        gf->data->extra = NULL;
        gf->mult_buf_const = gf_w16_mult_buf_const_log;
        gf->mult_buf_buf = gf_w16_mult_buf_buf_log;
        gf->mult = gf_w16_mult_log;
    } else if ((flags & GALOIS_MULT_FULL_TABLE) && (flags & GALOIS_MULT_LAZY)) {
        gf->mult_buf_const = gf_w16_mult_buf_const_ftbl_lazy;
        gf->mult_buf_buf = gf_w16_mult_buf_buf_log;
        gf->mult = gf_w16_mult_log;
    } else if (flags & GALOIS_MULT_FULL_TABLE) {
        /* Full table for 16-bit fields isn't practical */
        return (0);
    } else if (flags & GALOIS_MULT_HALF_TABLE) {
        gf->data->extra = (void *)malloc (sizeof (struct gf_half_tables));
        for (i = 0; i < GF_FIELD_SIZE; i++) {
            for (j = 0; j < (1 << (GF_FIELD_WIDTH/2)); j++) {
                ((struct gf_half_tables *)gf->data->extra)->lo_mul[i][j] = multiply_log (gf, (gf_val_16_t)i, (gf_val_16_t)j);
                ((struct gf_half_tables *)gf->data->extra)->hi_mul[i][j] = multiply_log (gf, (gf_val_16_t)i, (gf_val_16_t)(j << (GF_FIELD_WIDTH/2)) );
            }
        }
#ifdef  INTEL_SSE4
        gf->mult_buf_const = gf_w16_mult_buf_const_vec;
        gf->mult_buf_buf = gf_w16_mult_buf_buf_vec;
#else
        gf->mult_buf_const = gf_w16_mult_buf_const_htbl;
        gf->mult_buf_buf = gf_w16_mult_buf_buf_htbl;
#endif
        gf->mult = gf_w16_mult_htbl;
    }
    return (1);
}

void
gf_w16_free (galois_field_16_t *gf)
{
    if (gf->data != NULL) {
        if (gf->data->extra != NULL) {
            free (gf->data->extra);
        }
        free (gf->data);
    }
    gf->data = NULL;
}

static
int
unit_test (uint32_t flags, int base)
{
    int counts[0x10000];
    unsigned int i, j, k, l, r;
    gf_val_16_t r1, r2;
    gf_val_16_t  *p1, *p2;
    uint64_t  testbuf[0x80000];
    __m128i     va, vb, vr;
    int     stage;
    galois_field_16_t    gf;
    int         buf_len;

    gf_w16_init (&gf, flags);
    __m128i     a, b;

#if 0
    printf ("%5s %5s %5s %5s\n", "N", "log", "alog", "inv");
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        printf ("%5d %5d %5d %5d\n", i, gf.data->log_tbl[i], gf.data->antilog_tbl[i], gf.data->inv_tbl[i]);
    }
    exit (1);
#endif

    /* Check that antilog (log (x)) == x
     * Note that we have to start at 1, since log (0) is undefined.
     */
    stage = 1;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r = gf.antilog (&gf, gf.log (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

    /* Check that log (antilog (x)) == x
     * Note that we have to end at 65534, since antilog (65535) is undefined.
     */
    stage = 2;
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        r = gf.log (&gf, gf.antilog (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

     /* Check that inverses really are */
    stage = 3;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, gf.invert (&gf, (gf_val_16_t)i), (gf_val_16_t)i);
        if (r1 != (gf_val_16_t)1) {
            goto fail;
        }
    }

    /* Check multiplicative identity */
    stage = 4;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)1); 
        if (r1 != (gf_val_16_t)i) {
            goto fail;
        }
    }

#if 0
    /*
     * Test all multiplications to ensure 1-1 mapping and commutativity.
     */
    stage = 11;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            counts[j] = 0;
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)j);
            counts[r1] += 1;
            r2 = gf.mult (&gf, (gf_val_16_t)j, (gf_val_16_t)i);
            if (r1 != r2) {
                printf ("FAIL at %d, %d.\n", i, j);
                goto fail;
            }
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            if (counts[j] != 1) {
                printf ("FAIL at %d, %d.\n", i, j);
                goto fail;
            }
        }
    }
#endif

#if 0
    /* Check that divide and multiply work */
    stage = 4;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 1; j < GF_FIELD_SIZE; j++) {
            r1 = gf.divide (&gf, (gf_val_16_t)i, (gf_val_16_t)j);
            r2 = gf.mult (&gf, (gf_val_16_t)i, gf.invert (&gf, (gf_val_16_t)j));
            if (r1 != r2) {
                goto fail;
            }
        }
    }
#endif


#if 0
    /* Check distribution property */
    stage = 6;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            for (k = 0; k < GF_FIELD_SIZE; k++) {
                r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)k) ^ gf.mult (&gf, (gf_val_16_t)j, (gf_val_16_t)k);
                r2 = gf.mult (&gf, (gf_val_16_t)i ^ (gf_val_16_t)j, (gf_val_16_t)k);
                if (r1 != r2) {
                    goto fail;
                }
            }
        }
    }

    /* Check powers */
    stage = 7;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = (gf_val_16_t)(i > 0 ? 1 : 0);
        for (j = 0; j < GF_FIELD_SIZE*2; j++) {
            r2 = pow ((gf_val_16_t)i, j);
            if (r1 != r2) {
                goto fail;
            }
            r1 = mult (r1, (gf_val_16_t)i);
        }
    }
#endif

    /* Check buffer-constant multiplication */    
    stage = 21;
    buf_len = 1024;
    for (p1 = (gf_val_16_t *)testbuf, i = 0; i < buf_len; i++) {
        p1[i] = (gf_val_16_t)((i * 0x47f3) % 0x10000);
        p1[i+buf_len] = (gf_val_16_t)((i * 0x5899) % 0x10000);
    }
    p2 = (gf_val_16_t *)testbuf + buf_len * 3;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        /* Test odd offsets and lengths - it can give vectorization fits */
        for (j = 0; j < 8; j += 1) {
            /* Zero out the result buffer, just in case */
            memset (p2, 0, (buf_len + j) * sizeof (gf_val_16_t));
            gf.mult_buf_const (&gf, p1+j, (gf_val_16_t)i, p2, buf_len + j);
            for (k = 0; k < buf_len+j; k++) {
                r1 = gf.mult (&gf, p1[j+k], (gf_val_16_t)i);
                if (r1 != p2[k]) {
                    printf ("i=%d j=%d k=%d\n", i, j, k);
                    printf ("r1=0x%04x p1[j+k]=%04x p2[k]=0x%04x\n", r1, p1[j+k], p2[k]);
                    goto fail;
                }
            }
            /* Doing 64K * 8 is overkill for lazy */
            if (flags | GALOIS_MULT_LAZY) break;
        }
    }


    /* Check buffer-buffer multiplication */
    stage = 22;
    buf_len = 1024;
    for (p1 = (gf_val_16_t *)testbuf, i = 0; i < buf_len; i++) {
        p1[i] = (gf_val_16_t)((i * 0x47f3) % 0x10000);
        p1[i+buf_len] = (gf_val_16_t)((i * 0x5889) % 0x10000);
    }
    p2 = (gf_val_16_t *)testbuf + buf_len * 3;
    /* Test odd offsets and buffer lengths to make sure we don't run into alignment issues */
    for (i = 0; i < 20; i++) {
        for (j = 248; j < 270; j++) {
            memset (p2, 0, j);
            gf.mult_buf_buf (&gf, p1, p1+i, p2, j);
            for (k = 0; k < j; k++) {
                if (p2[k] != gf.mult (&gf, p1[k], p1[i+k])) {
                    goto fail;
                }
            }
        }
    }

#if 0
    stage = 100;
    // p1 is the x values (sequence numbers)
    p1 = (gf_val_16_t *)testbuf;
    for (i = 0; i < 60; i++) {
        p1[i] = (gf_val_16_t)(i+4);
    }
    // p2 is the y values (shares)
    p2 = (gf_val_16_t *)(&testbuf[1000]);

    // We always assume 40 total shares so we only need to generate once per
    // number of shares needed to rebuild.
    for (i = 2; i < 20; i++) {
    }
#endif
    return (0);
fail:
    return (stage + base);
}


int
gf_w16_unit_test ()
{
    int     r;

    if ((r = unit_test (GALOIS_MULT_LOG, 1000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_HALF_TABLE, 2000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_FULL_TABLE | GALOIS_MULT_LAZY, 3000)) > 0) {
        return (r);
    }
    return (0);
}
