/*
 * gf_w8.c
 *
 * Routines for 8-bit Galois fields
 */

#include "gf_int.h"
#include <stdio.h>

#define GF_FIELD_WIDTH      8
#define GF_FIELD_SIZE       (1 << GF_FIELD_WIDTH)


struct gf_data {
    gf_val_8_t      alpha;
    uint32_t        prim_poly;
    gf_val_8_t      log_tbl[GF_FIELD_SIZE];
    gf_val_8_t      antilog_tbl[GF_FIELD_SIZE * 2];
    gf_val_8_t      inv_tbl[GF_FIELD_SIZE];
    void            *extra;
};

#ifdef  INTEL_SSE4
struct __attribute__ ((aligned (16))) gf_half_tables {
    gf_val_8_t  lo_mul[GF_FIELD_SIZE][1 << (GF_FIELD_WIDTH/2)];
    gf_val_8_t  hi_mul[GF_FIELD_SIZE][1 << (GF_FIELD_WIDTH/2)];
};

#endif

struct gf_bytwo_info {
  uint64_t poly;
  uint64_t mask1;
  uint64_t mask2;
};

/*
 * Basic Galois field routines
 */

/* Multiply uses log-antilog tables.
   The antilog table is "duplicated" to avoid the extra test for overflow on addition.
 */

static
inline
gf_val_8_t
multiply_log (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    gf_val_8_t  result;
    
    result = (a == 0 || b == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a] + gf->data->log_tbl[b])];
    return (result);
}

static
inline
gf_val_8_t
multiply_logc (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t logb)
{
    gf_val_8_t  result;
    
    result = (a == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a] + logb)];
    return (result);
}

static
inline
gf_val_8_t
multiply_htbl (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    gf_val_8_t  *lo_tbl, *hi_tbl;

    lo_tbl = ((struct gf_half_tables *)gf->data->extra)->lo_mul[b];
    hi_tbl = ((struct gf_half_tables *)gf->data->extra)->hi_mul[b];
    return (lo_tbl[a & 0xf] ^ hi_tbl[(a >> 4)]);
}

static
inline
gf_val_8_t
multiply_ftbl (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (((gf_val_8_t *)gf->data->extra)[GF_FIELD_SIZE * (uint32_t)b + (uint32_t)a]);
}

static
inline
gf_val_8_t
divide (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  int log_sum = 0;

  if (a == 0) {
    return ((gf_val_8_t)0);
  } else if (b == 0) {
    return ((gf_val_8_t)0);
  }
  log_sum = gf->data->log_tbl[a] - gf->data->log_tbl[b];
  if (log_sum < 0) {
    log_sum += GF_FIELD_SIZE-1;
  }
  return (gf->data->antilog_tbl[log_sum]);
}

/***************************************/

static
gf_val_8_t
gf_w8_pow (const galois_field_8_t *gf, gf_val_8_t a, uint32_t b)
{
  if (a == 0) {
    return (a);
  } else if (b == 0) {
    return (gf_val_8_t)1;
  } else {
    return (gf->data->antilog_tbl[(gf_val_8_t)((gf->data->log_tbl[a] * (b & 0xfffff)) % (GF_FIELD_SIZE -1))]);
  }
}

static
gf_val_8_t
gf_w8_log (const galois_field_8_t *gf, gf_val_8_t a)
{
    return (gf->data->log_tbl[a]);
}

static
gf_val_8_t
gf_w8_antilog (const galois_field_8_t *gf, gf_val_8_t a)
{
    return (gf->data->antilog_tbl[a]);
}

static
gf_val_8_t
gf_w8_invert (const galois_field_8_t *gf, gf_val_8_t a)
{
    return (gf->data->inv_tbl[a]);
}

static
gf_val_8_t
gf_w8_mult_log (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (multiply_log (gf, a, b));
}

static
gf_val_8_t
gf_w8_mult_shift (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  uint16_t product, i;

  product = 0;

  for (i = 0; i < GF_FIELD_WIDTH; i++) { 
    if (a & (1 << i)) product ^= (b << i);
  }
  for (i = (GF_FIELD_WIDTH*2-1); i >= GF_FIELD_WIDTH; i--) {
    if (product & (1 << i)) product ^= (gf->data->prim_poly << (i-GF_FIELD_WIDTH)); 
  }

  return product;
}

static
gf_val_8_t
gf_w8_mult_bytwo (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  uint8_t prod;

  prod = 0;

  while (1) {
    if (a & 1) prod ^= b;
    a >>= 1;
    if (a == 0) return prod;
    if (b & (1 << (GF_FIELD_WIDTH-1))) {
      b = (b << 1) ^ gf->data->prim_poly;
    } else {
      b <<= 1;
    }
  }
}

static
gf_val_8_t
gf_w8_mult_htbl (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (multiply_htbl (gf, a, b));
}

static
gf_val_8_t
gf_w8_mult_ftbl (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (multiply_ftbl (gf, a, b));
}

static
gf_val_8_t
gf_w8_divide (const galois_field_8_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (divide (gf, a, b));
}

static
gf_val_8_t
gf_w8_invert_slow (galois_field_8_t *gf, gf_val_8_t a)
{
    if (a == 0) {
        return (0);
    } else if (a == 1) {
        return (1);
    } else {
        return (divide (gf, a, multiply_log (gf, a, a)));
    }
}

static
void
gf_w8_mult_buf_const_htbl (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
    int     i;
    gf_val_8_t  *lo_tbl, *hi_tbl;

    lo_tbl = ((struct gf_half_tables *)gf->data->extra)->lo_mul[b];
    hi_tbl = ((struct gf_half_tables *)gf->data->extra)->hi_mul[b];
    for (i = 0; i < len; i++) {
        dst[i] = lo_tbl[a[i] & 0xf] ^ hi_tbl[(a[i] >> 4)];
    }
}

static
void
gf_w8_mult_buf_buf_htbl (const galois_field_8_t *gf, const gf_val_8_t *a, const gf_val_8_t *b, gf_val_8_t *dst, int len)
{
    int     i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_htbl (gf, a[i], b[i]);
    }
}

/*
 * Multiply a single input buffer by a set of constants, and accumulate the result into output buffers.
 * The input is chunked into pieces to leverage cache locality and amortize the cost of loading the
 * half tables.
 */
static
void
gf_w8_mult_acc_buf_htbl (const struct galois_field_8 *gf, const gf_val_8_t *buf,
                         gf_val_8_t factors[], gf_val_8_t *accs[], int len, int naccs)
{
    int         i, j, k;
    const int   step = 32;
    uint64_t    xbuf[step / sizeof (uint64_t)];

    for (i = 0; i < len; i += step) {
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_htbl(gf, buf, factors[j], (gf_val_8_t *)xbuf, sizeof (xbuf));
            for (k = 0; k < sizeof (xbuf) / sizeof (xbuf[0]); k++) {
                ((uint64_t *)(accs[j] + i))[k] ^= xbuf[k];
            }
        }
    }
    if (len % step != 0) {
        i -= step;
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_htbl(gf, buf, factors[j], (gf_val_8_t *)xbuf, len % step);
            for (k = 0; k < len % step; k++) {
                *(accs[j] + i + k) ^= ((gf_val_8_t *)xbuf)[k];
            }
        }
    }
}

static
void
gf_w8_mult_buf_const_bytwo (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
  uint64_t prod;
  uint64_t ta;
  uint64_t tmp1;
  uint64_t tmp2;
  uint64_t *dptr;
  uint64_t *aptr;
  int i;
  uint8_t tb;

  dptr = (uint64_t *) dst;
  aptr = (uint64_t *) a;
  for (i = 0; i < len; i += (64/GF_FIELD_WIDTH)) {
    tb = b;
    prod = 0;
    ta = *aptr;
    while (1) {
      if (tb & 1) prod ^= ta;
      tb >>= 1;
      if (tb == 0) break;
      tmp1 = (ta << 1) & ((struct gf_bytwo_info *) gf->data->extra)->mask1;
      tmp2 = ta & ((struct gf_bytwo_info *) gf->data->extra)->mask2;
      tmp2 = ((tmp2 << 1) - (tmp2 >> (GF_FIELD_WIDTH-1)));
      ta = (tmp1 ^ (tmp2 & ((struct gf_bytwo_info *) gf->data->extra)->poly));
    }
    *dptr ^= prod;
    dptr++;
    aptr++;
  }

  for (; i < len; i++) {
    dst[i] ^= gf_w8_mult_bytwo (gf, a[i], b);
  }
}

static
void
gf_w8_mult_buf_const_log (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
    int         i;
    uint32_t    log_b;

    if (b == 0) {
        memset (dst, 0, len);
        return;
    }

    log_b = gf->data->log_tbl[b];

    for (i = 0; i < len; i++) {
        if (a[i] == 0) {
            dst[i] = 0;
        } else {
            dst[i] = gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a[i]] + log_b)];
        }
    }
}

static
void
gf_w8_mult_buf_buf_log (const galois_field_8_t *gf, const gf_val_8_t *a, const gf_val_8_t *b, gf_val_8_t *dst, int len)
{
    int         i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_log (gf, a[i], b[i]);
    }
}

/*
 * Multiply a single input buffer by a set of constants, and accumulate the result into output buffers.
 * The input is chunked into pieces to leverage cache locality and amortize the cost of loading the
 * half tables.
 */
static
void
gf_w8_mult_acc_buf_log (const struct galois_field_8 *gf, const gf_val_8_t *buf,
                        gf_val_8_t factors[], gf_val_8_t *accs[], int len, int naccs)
{
    int         i, j, k;
    const int   step = 64;
    uint64_t    xbuf[step / sizeof (uint64_t)];

    for (i = 0; i < len; i += step) {
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_log(gf, buf, factors[j], (gf_val_8_t *)xbuf, sizeof (xbuf));
            for (k = 0; k < sizeof (xbuf) / sizeof (xbuf[0]); k++) {
                ((uint64_t *)(accs[j] + i))[k] ^= xbuf[k];
            }
        }
    }
    if (len % step != 0) {
        i -= step;
        for (j = 0; j < naccs; j++) {
            gf_w8_mult_buf_const_log(gf, buf, factors[j], (gf_val_8_t *)xbuf, len % step);
            for (k = 0; k < len % step; k++) {
                *(accs[j] + i + k) ^= ((gf_val_8_t *)xbuf)[k];
            }
        }
    }
}

static
void
gf_w8_mult_acc_sig_log (const struct galois_field_8 *gf, const gf_val_8_t *buf, gf_val_8_t alpha,
                        gf_val_8_t *result, int sig_width, int len)
{
    int         i;
    gf_val_8_t  alpha_log;

    memset (result, 0, sig_width);
    alpha_log = gf_w8_log (gf, alpha);
    for (i = 0; i < len; i++) {
        result[i % sig_width] = multiply_logc (gf, result[i % sig_width], alpha_log) ^ buf[i];
    }
}

static
void
gf_w8_mult_buf_const_ftbl (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
    int         i;
    gf_val_8_t  *tbl;

    tbl = (gf_val_8_t *)gf->data->extra + GF_FIELD_SIZE * b;
    for (i = 0; i < len; i++) {
        dst[i] = tbl[a[i]];
    }
}

static
void
gf_w8_mult_buf_const_ftbl_lazy (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
    int         i, j;
    int          lgb;
    gf_val_16_t  *tbl, high, hi;
    gf_val_16_t  *pa, *pd;

    if (b == 0) return;
    lgb = gf->data->log_tbl[b];
    tbl = (gf_val_16_t *) malloc(sizeof(gf_val_16_t)*(GF_FIELD_SIZE*GF_FIELD_SIZE));

    
    tbl[0] = 0;
    for (j = 1; j < GF_FIELD_SIZE; j++) {
      tbl[j] = gf->data->antilog_tbl[lgb+gf->data->log_tbl[j]];
    }
    for (i = 1; i < GF_FIELD_SIZE; i++) {
      high = (gf->data->antilog_tbl[lgb+gf->data->log_tbl[i]])<<GF_FIELD_WIDTH;
      hi = (i << GF_FIELD_WIDTH);
      tbl[hi] = high;
      for (j = 1; j < GF_FIELD_SIZE; j++) {
        tbl[hi|j] = high | (gf->data->antilog_tbl[lgb+gf->data->log_tbl[j]]);
      }
    }
    pa = (gf_val_16_t *) a;
    pd = (gf_val_16_t *) dst;
    for (i = 0; i+1 < len; i+=2) {
        *pd = tbl[*pa];
        pd++;
        pa++;
    }
    free(tbl);

    for (; i < len; i++) {
      dst[i] = multiply_log(gf, a[i], b);
    }
}

static
void
gf_w8_mult_buf_buf_ftbl (const galois_field_8_t *gf, const gf_val_8_t *a, const gf_val_8_t *b, gf_val_8_t *dst, int len)
{
    int         i;

    for (i = 0; i < len; i++) {
        dst[i] = multiply_ftbl (gf, a[i], b[i]);
    }
}

static
void
gf_w8_mult_acc_sig_ftbl (const struct galois_field_8 *gf, const gf_val_8_t *buf, gf_val_8_t alpha,
                         gf_val_8_t *result, int sig_width, int len)
{
    int         i;

    memset (result, 0, sig_width);
    for (i = 0; i < len; i++) {
        result[i % sig_width] = multiply_ftbl (gf, result[i % sig_width], alpha) ^ buf[i];
    }
}


#ifdef  INTEL_SSE4

static
inline
__m128i
gf_w8_mult_const_vec (__m128i v, __m128i htbl, __m128i ltbl, __m128i loset)
{
  __m128i t1, r, h4;

  h4 = _mm_srli_epi32 (v, 4);
  t1 = _mm_and_si128 (loset, v);
  r = _mm_shuffle_epi8 (ltbl, t1);
  t1 = _mm_shuffle_epi8 (htbl, _mm_and_si128 (loset, h4));
  r = _mm_xor_si128 (r, t1);

  return (r);
}

static
inline
__m128i
gf_w8_mult_vec (__m128i va, __m128i vb, uint32_t prim_poly)
{
    __m128i     acc;
    __m128i     poly = _mm_set1_epi8 ( (int8_t)(prim_poly & 0xff) );
    __m128i     t1;
    int64_t     i;

    acc = _mm_setzero_si128 ();
    for (i = 7; i > 0; i--) {
        t1 = _mm_slli_epi32 (vb, i);
        /* Add va into the accumulator if the bit in the multiplicand is 0 */
        acc = _mm_xor_si128 (acc, _mm_blendv_epi8 (_mm_setzero_si128 (), va, t1));
        /* Multiply va by 2 */
        t1 = _mm_blendv_epi8 (_mm_setzero_si128 (), poly, va);
        va = _mm_add_epi8 (va, va);
        va = _mm_xor_si128 (va, t1);
    }
    /* Add in the last value */
    acc = _mm_xor_si128 (acc, _mm_blendv_epi8 (_mm_setzero_si128 (), va, vb));

    return (acc);
}

static
void
gf_w8_mult_buf_const_vec (const galois_field_8_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
    __m128i     ltbl, htbl, loset;
    __m128i     va;
    int         i;
    
    ltbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->lo_mul[b]);
    htbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->hi_mul[b]);
    loset = _mm_set1_epi8 (0x0f);
    
    for (i = 0; i < len; i += 16) {
        va = _mm_loadu_si128 ((__m128i *)(a+i));
        _mm_storeu_si128 ((__m128i *)(dst+i), gf_w8_mult_const_vec(va, htbl, ltbl, loset));
    }
    /* Finish off the last few bytes */
    if (i != len) {
        i -= 16;
        gf_w8_mult_buf_const_htbl (gf, a+i, b, dst+i, len - i);
    }
}

static
void
gf_w8_mult_buf_buf_vec (const galois_field_8_t *gf, const gf_val_8_t *a, const gf_val_8_t *b, gf_val_8_t *dst, int len)
{
    int     i;
    __m128i va, vb, vr;

    for (i = 0; i < len; i += 16) {
        va = _mm_loadu_si128 ((__m128i *)(a + i));
        vb = _mm_loadu_si128 ((__m128i *)(b + i));
        vr = gf_w8_mult_vec (va, vb, gf->data->prim_poly);
        _mm_storeu_si128 ((__m128i *)(dst + i), vr);
    }
    /* Finish off the last few bytes */
    if (i != len) {
        i -= 16;
        gf_w8_mult_buf_buf_htbl (gf, a+i, b+i, dst+i, len-i);
    }
}


static
void
gf_w8_mult_acc_buf_vec_v0 (const struct galois_field_8 *gf, const gf_val_8_t *buf,
                           gf_val_8_t factors[], gf_val_8_t *accs[], int len, int naccs)
{
    __m128i     ltbl, htbl, loset;
    __m128i     va, vb;
    int         i, j, acc;
    gf_val_8_t  *rem_accs[256];

    loset = _mm_set1_epi8 (0x0f);
    for (acc = 0; acc < naccs; acc++) {
        ltbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->lo_mul[factors[acc]]);
        htbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->hi_mul[factors[acc]]);
        for (i = 0; i < len; i += 16) {
            va = _mm_loadu_si128 ((__m128i *)(buf + i));
            vb = _mm_loadu_si128 ((__m128i *)(accs[acc] + i));
            vb = _mm_xor_si128 (vb, gf_w8_mult_const_vec(va, htbl, ltbl, loset));
            _mm_storeu_si128 ((__m128i *)(accs[acc] + i), vb);
        }
    }

    if (len % 8 != 0) {
        i -= 16;
        for (j = 0; j < naccs; j++) {
            rem_accs[j] = accs[j] + i;
        }
        gf_w8_mult_acc_buf_htbl (gf, buf + i, factors, rem_accs, len % 8, naccs);
    }
}

/*
 * This routine is about the same speed as v0, though it's a lot nicer to the cache
 * because it doesn't run through the big buffer multiple times.
 */
static
void
gf_w8_mult_acc_buf_vec_v1 (const struct galois_field_8 *gf, const gf_val_8_t *buf,
                           gf_val_8_t factors[], gf_val_8_t *accs[], int len, int naccs)
{
    __m128i     ltbl, htbl, loset;
    __m128i     va, vb;
    int         i, j;
    gf_val_8_t  *rem_accs[256];

    loset = _mm_set1_epi8 (0x0f);
    for (i = 0; i < len; i += 16) {
        va = _mm_loadu_si128 ((__m128i *)(buf + i));
        for (j = 0; j < naccs; j++) {
            vb = _mm_loadu_si128 ((__m128i *)(accs[j] + i));
            ltbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->lo_mul[factors[j]]);
            htbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->hi_mul[factors[j]]);
            vb = _mm_xor_si128 (vb, gf_w8_mult_const_vec(va, htbl, ltbl, loset));
            _mm_storeu_si128 ((__m128i *)(accs[j] + i), vb);
        }
    }

    if (len % 8 != 0) {
        i -= 16;
        for (j = 0; j < naccs; j++) {
            rem_accs[j] = accs[j] + i;
        }
        gf_w8_mult_acc_buf_htbl (gf, buf + i, factors, rem_accs, len % 8, naccs);
    }
}

/*
 * This is slower than half-table vector multiplication, so we don't use it.
 */
static
void
gf_w8_mult_acc_buf_vec_v2 (const struct galois_field_8 *gf, const gf_val_8_t *buf,
                           gf_val_8_t factors[], gf_val_8_t *accs[], int len, int naccs)
{
    __m128i     va, vb, vr;
    int         i, j;
    gf_val_8_t  *rem_accs[256];

    for (i = 0; i < len; i += 16) {
        va = _mm_loadu_si128 ((__m128i *)(buf + i));
        for (j = 0; j < naccs; j++) {
            vb = _mm_loadu_si128 ((__m128i *)(accs[j] + i));
            vr = gf_w8_mult_vec (va, _mm_set1_epi8 (factors[j]), gf->data->prim_poly);
            _mm_storeu_si128 ((__m128i *)(accs[j] + i), _mm_xor_si128 (vb, vr));
        }
    }

    if (len % 8 != 0) {
        i -= 16;
        for (j = 0; j < naccs; j++) {
            rem_accs[j] = accs[j] + i;
        }
        gf_w8_mult_acc_buf_htbl (gf, buf + i, factors, rem_accs, len % 8, naccs);
    }
}

static
void
gf_w8_mult_acc_sig_vec (const struct galois_field_8 *gf, const gf_val_8_t *buf, gf_val_8_t alpha, gf_val_8_t *result,
                        int sig_width, int len)
{
    __m128i     ltbl, htbl, loset;
    __m128i     va, sig;
    gf_val_8_t  alpha_exp;
    gf_val_8_t  wide_sig[16];
    int         i, j, k;

    alpha_exp = gf_w8_pow (gf, alpha, 16 / sig_width);
    ltbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->lo_mul[alpha_exp]);
    htbl = *(__m128i *)(&((struct gf_half_tables *)gf->data->extra)->hi_mul[alpha_exp]);
    loset = _mm_set1_epi8 (0x0f);
    sig = _mm_setzero_si128 ();
    for (i = 0; i < len; i += 16) {
        va = _mm_loadu_si128 ((__m128i *)(buf+i));
        sig = _mm_xor_si128 (gf_w8_mult_const_vec(sig, htbl, ltbl, loset), va);
    }
    /* Fold the signatures together, multiplying by powers of alpha as needed */
    _mm_storeu_si128 ((__m128i *)&wide_sig, sig);
//    printf ("Signature is %016llx %016llx\n", _mm_extract_epi64 (sig, 1), _mm_extract_epi64 (sig, 0));
    for (j = sig_width; j < 16; j++) {
        wide_sig[j % sig_width] = multiply_log (gf, wide_sig[j % sig_width], alpha) ^ wide_sig[j];
    }
//    printf ("Folded signature is %016llx\n", *(uint64_t *)wide_sig);

    if (i != len) {
        i -= 16;
        while (i < len) {
            wide_sig[i % sig_width] = multiply_log (gf, wide_sig[i % sig_width], alpha) ^ buf[i];
            i += 1;
        }
    }
    memcpy (result, wide_sig, sig_width);
}

#endif

int
gf_w8_init (galois_field_8_t *gf, uint32_t flags)
{
    uint32_t    b;
    int         i, j;
    gf_val_8_t  v;
    gf_val_8_t  *gtbl;
    struct gf_bytwo_info *gbt;

    gf->flags = flags;
    gf->data = (struct gf_data *)malloc (sizeof (struct gf_data));
    gf->data->alpha = 1;
    gf->data->prim_poly = 0435;
    gf->data->log_tbl[0] = 0;
    for (b = gf->data->alpha, i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->log_tbl[b] = (gf_val_8_t)i;
        gf->data->antilog_tbl[i] = (gf_val_8_t)b;
        gf->data->antilog_tbl[i+GF_FIELD_SIZE-1] = (gf_val_8_t)b;
        b <<= 1;
        if (b & GF_FIELD_SIZE) {
            b = b ^ gf->data->prim_poly;
        }
    }
    gf->data->inv_tbl[0] = 0;  /* Not really, but we need to fill it with something */
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->inv_tbl[gf->data->antilog_tbl[i]] = gf_w8_invert_slow (gf, gf->data->antilog_tbl[i]);
    }
    gf->log = gf_w8_log;
    gf->antilog = gf_w8_antilog;
    gf->invert = gf_w8_invert;
    gf->divide = gf_w8_divide;
    gf->pow = gf_w8_pow;
    if (flags & GALOIS_MULT_LOG) {
        gf->data->extra = NULL;
        gf->mult = gf_w8_mult_log;
        gf->mult_buf_const = gf_w8_mult_buf_const_log;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_log;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_log;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_log;
    } else if (flags & GALOIS_MULT_BYTWO) {
        gbt = (struct gf_bytwo_info *) malloc(sizeof(struct gf_bytwo_info));
        gf->data->extra = (void *) gbt;
        v = (gf->data->prim_poly & 0xff);
        gbt->poly = 0;
        gbt->mask1 = 0;
        gbt->mask2 = 0;
        for (i = 0; i < 64; i += GF_FIELD_WIDTH) {
          gbt->poly <<= GF_FIELD_WIDTH;
          gbt->poly |= v;
          gbt->mask1 <<= GF_FIELD_WIDTH;
          gbt->mask1 |= (GF_FIELD_SIZE-2);
          gbt->mask2 <<= GF_FIELD_WIDTH;
          gbt->mask2 |= (GF_FIELD_SIZE>>1);
        }
        gf->mult = gf_w8_mult_bytwo;
        gf->mult_buf_const = gf_w8_mult_buf_const_bytwo;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_log;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_log;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_log;
    } else if ((flags & GALOIS_MULT_FULL_TABLE) && (flags & GALOIS_MULT_LAZY)) {
        gf->data->extra = NULL;
        gf->mult = gf_w8_mult_log;
        gf->mult_buf_const = gf_w8_mult_buf_const_ftbl_lazy;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_log;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_log;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_log;
    } else if (flags & GALOIS_MULT_SHIFT) {
        gf->data->extra = NULL;
        gf->mult = gf_w8_mult_shift;
        gf->mult_buf_const = gf_w8_mult_buf_const_log;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_log;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_log;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_log;
    } else if (flags & GALOIS_MULT_FULL_TABLE) {
        gf->data->extra = malloc (GF_FIELD_SIZE * GF_FIELD_SIZE);
        gtbl = (gf_val_8_t *)gf->data->extra;
        for (i = 0; i < GF_FIELD_SIZE; i++) {
            for (j = 0; j < GF_FIELD_SIZE; j++) {
                v = multiply_log (gf, (gf_val_8_t)i, (gf_val_8_t)j);
                gtbl[i * GF_FIELD_SIZE + j] = v;
                gtbl[j * GF_FIELD_SIZE + i] = v;
            }
        }
        gf->mult_buf_const = gf_w8_mult_buf_const_ftbl;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_ftbl;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_ftbl;
        gf->mult = gf_w8_mult_ftbl;
    } else if (flags & GALOIS_MULT_HALF_TABLE) {
        gf->data->extra = (void *)malloc (sizeof (struct gf_half_tables));
        for (i = 0; i < GF_FIELD_SIZE; i++) {
            for (j = 0; j < (1 << (GF_FIELD_WIDTH/2)); j++) {
                ((struct gf_half_tables *)gf->data->extra)->lo_mul[i][j] = multiply_log (gf, (gf_val_8_t)i, (gf_val_8_t)j);
                ((struct gf_half_tables *)gf->data->extra)->hi_mul[i][j] = multiply_log (gf, (gf_val_8_t)i, (gf_val_8_t)(j << 4) );
            }
        }
#ifdef  INTEL_SSE4
        gf->mult_buf_const = gf_w8_mult_buf_const_vec;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_vec;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_vec_v1;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_vec;
#else
        gf->mult_buf_const = gf_w8_mult_buf_const_htbl;
        gf->mult_buf_buf = gf_w8_mult_buf_buf_htbl;
        gf->mult_acc_buf = gf_w8_mult_acc_buf_htbl;
        gf->mult_acc_sig = gf_w8_mult_acc_sig_htbl;
#endif
        gf->mult = gf_w8_mult_htbl;
    }
    return (1);
}

void
gf_w8_free (galois_field_8_t *gf)
{
    if (gf->data != NULL) {
        if (gf->data->extra != NULL) {
            free (gf->data->extra);
        }
        free (gf->data);
    }
    gf->data = NULL;
}

static
int
unit_test (uint32_t flags, int base)
{
    int counts[256];
    uint32_t    i, j, k, l, r;
    gf_val_8_t  r1, r2;
    gf_val_8_t  *p1, *p2;
    uint64_t    testbuf[8192];
    gf_val_8_t  bufs[256];
    int         testlen = 8192;
    gf_val_8_t  smallbuf[256];
    gf_val_8_t  smallbuf2[256];
    __m128i     va, vb, vr;
    int         stage;
    galois_field_8_t    gf;

    gf_w8_init (&gf, flags);

    /* Check that antilog (log (x)) == x
     * Note that we have to start at 1, since log (0) is undefined.
     */
    stage = 1;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r = gf.antilog (&gf, gf.log (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

    /* Check that log (antilog (x)) == x
     * Note that we have to end at 65534, since antilog (65535) is undefined.
     */
    stage = 2;
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        r = gf.log (&gf, gf.antilog (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

     /* Check that inverses really are */
    stage = 3;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, gf.invert (&gf, (gf_val_16_t)i), (gf_val_16_t)i);
        if (r1 != (gf_val_16_t)1) {
            goto fail;
        }
    }

    /* Check multiplicative identity */
    stage = 4;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)1); 
        if (r1 != (gf_val_16_t)i) {
            goto fail;
        }
    }

    /* Check that inverses really are */
    stage = 5;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, gf.invert (&gf, (gf_val_8_t)i), (gf_val_8_t)i);
        if (r1 != (gf_val_8_t)1) {
            goto fail;
        }
    }

    /* Check powers */
    stage = 6;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = (gf_val_8_t)(i > 0 ? 1 : 0);
        for (j = 0; j < GF_FIELD_SIZE*2; j++) {
            r2 = gf.pow (&gf, (gf_val_8_t)i, j);
            if (r1 != r2) {
                goto fail;
            }
            r1 = gf.mult (&gf, r1, (gf_val_8_t)i);
        }
    }

    /*
     * Test all multiplications to ensure 1-1 mapping and commutativity.
     */
    stage = 11;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            counts[j] = 0;
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)j);
            counts[r1] += 1;
            r2 = gf.mult (&gf, (gf_val_16_t)j, (gf_val_16_t)i);
            if (r1 != r2) {
                goto fail;
            }
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            if (counts[j] != 1) {
                goto fail;
            }
        }
    }

    /* Check that divide and multiply work */
    stage = 12;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 1; j < GF_FIELD_SIZE; j++) {
            r1 = gf.divide (&gf, (gf_val_8_t)i, (gf_val_8_t)j);
            r2 = gf.mult (&gf, (gf_val_8_t)i, gf.invert (&gf, (gf_val_8_t)j));
            if (r1 != r2) {
                goto fail;
            }
        }
    }

    /* Check distribution property */
    stage = 13;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            for (k = 0; k < GF_FIELD_SIZE; k++) {
                r1 = gf.mult (&gf, (gf_val_8_t)i, (gf_val_8_t)k) ^ gf.mult (&gf, (gf_val_8_t)j, (gf_val_8_t)k);
                r2 = gf.mult (&gf, (gf_val_8_t)i ^ (gf_val_8_t)j, (gf_val_8_t)k);
                if (r1 != r2) {
                    goto fail;
                }
            }
        }
    }


    /* Check buffer-constant multiplication */    
    stage = 21;
    p1 = (gf_val_8_t *)testbuf;
    galois_fill_random (testbuf, testlen * 2, 0);
    p2 = (gf_val_8_t *)testbuf + testlen * 2 + 16;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        /* Test odd offsets and lengths - it can give vectorization fits */
        for (j = 0; j < 8; j += 1) {
            /* Zero out the result buffer, just in case */
            memset (p2, 0, testlen + j);
            gf.mult_buf_const (&gf, p1+j, (gf_val_8_t)i, p2, testlen + j);
            for (k = 0; k < testlen+j; k++) {
                r1 = gf.mult (&gf, p1[j+k], (gf_val_8_t)i);
                if (r1 != p2[k]) {
                    goto fail;
                }
            }
        }
    }


    /* Check buffer-buffer multiplication */
    stage = 22;
    p1 = (gf_val_8_t *)testbuf;
    galois_fill_random (p1, testlen * 2, 0);
    p2 = (gf_val_8_t *)testbuf + testlen * 2 + 16;
    /* Test odd offsets and buffer lengths to make sure we don't run into alignment issues */
    for (i = 0; i < 20; i++) {
        for (j = testlen; j < testlen + 32; j++) {
            memset (p2, 0, j);
            gf.mult_buf_buf (&gf, p1, p1+i, p2, j);
            for (k = 0; k < j; k++) {
                if (p2[k] != gf.mult (&gf, p1[k], p1[i+k])) {
                    goto fail;
                }
            }
        }
    }

    stage = 23;
    p1 = (gf_val_8_t *)testbuf;
    galois_fill_random (p1, testlen * 2, 0);
    p2 = (gf_val_8_t *)testbuf + testlen * 2 + 16;
    /* Test odd offsets and buffer lengths to make sure we don't run into alignment issues */
    for (i = 0; i < 20; i++) {
        for (j = testlen; j < testlen + 32; j++) {
            memset (p2, 0, j);
            gf.mult_buf_buf (&gf, p1, p1+i, p2, j);
            for (k = 0; k < j; k++) {
                if (p2[k] != gf.mult (&gf, p1[k], p1[i+k])) {
                    goto fail;
                }
            }
        }
    }

    /*
     * Test signature code.  Since there's no simple way to do this, we split the buffer into
     * columns, as the signature code does, and do the signatures manually.
     */
    stage = 24;

    testlen = 4096;
    p1 = (gf_val_8_t *)testbuf;
    galois_fill_random (p1, testlen, 4);
    for (i = 8; i >= 1; i /= 2) {
        for (j = 2; j < GF_FIELD_SIZE - 1; j++) {
            memset (smallbuf, 0, sizeof (smallbuf));
            for (k = 0; k < testlen; k++) {
                smallbuf[k % i] = gf.mult (&gf, smallbuf[k % i], j) ^ p1[k];
//                printf ("i=%02x j=%02x k=%02x buf=%02x sig=%02x\n", i, j, k, p1[k], smallbuf[k % i]);
            }
            gf.mult_acc_sig (&gf, p1, j, smallbuf2, i, testlen);
            for (k = 0; k < i; k++) {
                if (smallbuf2[k] != smallbuf[k]) {
                    printf ("Fail at i=%d j=%02x, sig1=%x sig2=%x\n", i, j, *(uint32_t *)smallbuf, *(uint32_t *)smallbuf2);
                    goto fail;
                }
            }
        }
    }

    return (0);
fail:
    return (stage + base);
}


int
gf_w8_unit_test ()
{
    int     r;

    if ((r = unit_test (GALOIS_MULT_HALF_TABLE, 1000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_FULL_TABLE, 2000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_LOG, 3000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_BYTWO, 4000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_SHIFT, 5000)) > 0) {
        return (r);
    } else if ((r = unit_test (GALOIS_MULT_FULL_TABLE | GALOIS_MULT_LAZY, 6000)) > 0) {
        return (r);
    }
    return (0);
}




#if 0


#include <stdio.h>

void
split_secret (const uint8_t key[], uint8_t *shares[], int key_len, int n_needed, int n_shares, int x_offset = 1)
{
    int         i, j;
    gf_val_8_t    x[256], y[256];
    gf_state_t  gfs (0435, 1);

    for (i = 0; i < n_shares; i++) {
        x[i] = (gf_val_8_t)(i + x_offset);
    }

    for (i = 0; i < key_len; i++) {
        gfs.gen_secret_shares (x, y, (gf_val_8_t)key[i], n_needed, n_shares);
        for (j = 0; j < n_shares; j++) {
            shares[j][i] = (uint8_t)y[j];
        }
    }
}

void
rebuild_secret (uint8_t key[], uint8_t *shares[], const int share_num[],
                int key_len, int n_provided, int x_offset = 1)
{
    int         i, j;
    gf_val_8_t    x[256], y[256];
    gf_state_t  gfs (0435, 1);

    for (i = 0; i < n_provided; i++) {
        x[i] = (gf_val_8_t)(share_num[i] + x_offset);
    }
    for (i = 0; i < key_len; i++) {
        for (j = 0; j < n_provided; j++) {
            y[j] = (gf_val_8_t)shares[j][i];
        }
        key[i] = (uint8_t)gfs.interpolate (x, y, n_provided);
    }
}

double
test_multiply_gf8 (const gf_val_8_t *buf, int len)
{
    gf_val_8_t    dest[65536];
    int         i;


    for (i = 0; i < len; i += sizeof (dest)) {
        
    }
}

int
main (int argc, char *argv[])
{
    gf_state_t      gfs (0435, 1);
    int             v;
    int             i, j;
    int             n_needed, n_shares;
    uint32_t        secret, r_secret;
    uint32_t        shares[100];
    uint8_t *       shares_ptr[100];
    int             share_num[100];

    if ((v = gfs.sanity_check ()) > 0) {
        fprintf (stderr, "%s: sanity check failed (%d)!\n", argv[0], v);
    } else {
        fprintf (stderr, "%s: sanity check succeeded!\n", argv[0]);
    }

    if (argc > 3) {
        secret = strtol (argv[1], 0, 0);
        n_needed = strtol (argv[2], 0, 0);
        n_shares = strtol (argv[3], 0, 0);
        if (n_needed > n_shares || n_needed < 2 || n_shares > 23 || n_shares < 3) {
            printf ("Usage: %s <secret> <n_needed> <n_shares>\n", argv[0]);
            exit (-1);
        }
        for (i = 0; i < 100; i++) {
            shares_ptr[i] = (uint8_t *)&shares[i];
        }
        split_secret ((uint8_t *)(&secret), shares_ptr, sizeof (secret), n_needed, n_shares);

        for (i = 0; i < n_shares; i++) {
            printf ("Share %d is %08x\n", i, shares[i]);
        }

        for (i = 0; i < n_shares - n_needed; i++) {
            for (j = 0; j < n_needed; j++) {
                shares_ptr[j] = (uint8_t *)(&shares[i + j]);
                share_num[j] = i + j;
            }
            rebuild_secret ((uint8_t *)(&r_secret), shares_ptr, share_num, sizeof (r_secret), n_needed);
            printf ("i=%d.  secret=%08x recovered=%08x.\n", i, secret, r_secret);
        }
    }
}
#endif
