/*
 * gf_w4.c
 *
 * Routines for 8-bit Galois fields
 */

#include "gf_int.h"
#include <stdio.h>

#define GF_FIELD_WIDTH      4
#define GF_FIELD_SIZE       (1 << GF_FIELD_WIDTH)


struct gf_data {
    gf_val_8_t      alpha;
    uint32_t        prim_poly;
    gf_val_8_t      log_tbl[GF_FIELD_SIZE];
    gf_val_8_t      antilog_tbl[GF_FIELD_SIZE * 2];
    gf_val_8_t      inv_tbl[GF_FIELD_SIZE];
    void            *extra;
};

struct gf_bytwo_info {
  uint64_t poly;
  uint64_t mask1;
  uint64_t mask2;
};

/*
 * Basic Galois field routines
 */

/* Multiply uses log-antilog tables.
   The antilog table is "duplicated" to avoid the extra test for overflow on addition.
 */

static
inline
gf_val_8_t
multiply_log (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    gf_val_8_t  result;
    
    result = (a == 0 || b == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a] + gf->data->log_tbl[b])];
    return (result);
}

static
inline
gf_val_8_t
multiply_logc (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t logb)
{
    gf_val_8_t  result;
    
    result = (a == 0) ? 0 : gf->data->antilog_tbl[(unsigned)(gf->data->log_tbl[a] + logb)];
    return (result);
}

static
inline
gf_val_8_t
divide (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  int log_sum = 0;

  if (a == 0) {
    return ((gf_val_8_t)0);
  } else if (b == 0) {
    return ((gf_val_8_t)0);
  }
  log_sum = gf->data->log_tbl[a] - gf->data->log_tbl[b];
  if (log_sum < 0) {
    log_sum += GF_FIELD_SIZE-1;
  }
  return (gf->data->antilog_tbl[log_sum]);
}

/***************************************/

static
gf_val_8_t
gf_w4_pow (const galois_field_4_t *gf, gf_val_8_t a, uint32_t b)
{
  if (a == 0) {
    return (a);
  } else if (b == 0) {
    return (gf_val_8_t)1;
  } else {
    return (gf->data->antilog_tbl[(gf_val_8_t)((gf->data->log_tbl[a] * (b & 0xfffff)) % (GF_FIELD_SIZE -1))]);
  }
}

static
gf_val_8_t
gf_w4_log (const galois_field_4_t *gf, gf_val_8_t a)
{
    return (gf->data->log_tbl[a]);
}

static
gf_val_8_t
gf_w4_antilog (const galois_field_4_t *gf, gf_val_8_t a)
{
    return (gf->data->antilog_tbl[a]);
}

static
gf_val_8_t
gf_w4_invert (const galois_field_4_t *gf, gf_val_8_t a)
{
    return (gf->data->inv_tbl[a]);
}

static
gf_val_8_t
gf_w4_mult_log (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (multiply_log (gf, a, b));
}

static
gf_val_8_t
gf_w4_mult_shift (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  uint8_t product, i;

  product = 0;

  for (i = 0; i < GF_FIELD_WIDTH; i++) { 
    if (a & (1 << i)) product ^= (b << i);
  }
  for (i = (GF_FIELD_WIDTH*2-1); i >= GF_FIELD_WIDTH; i--) {
    if (product & (1 << i)) product ^= (gf->data->prim_poly << (i-GF_FIELD_WIDTH)); 
  }

  return product;
}

static
gf_val_8_t
gf_w4_mult_bytwo (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
  uint8_t prod;

  prod = 0;

  while (1) {
    if (a & 1) prod ^= b;
    a >>= 1;
    if (a == 0) return prod;
    if (b & (1 << (GF_FIELD_WIDTH-1))) {
      b = (b << 1) ^ gf->data->prim_poly;
    } else {
      b <<= 1;
    }
  }
}

static
gf_val_8_t
gf_w4_divide (const galois_field_4_t *gf, gf_val_8_t a, gf_val_8_t b)
{
    return (divide (gf, a, b));
}

static
gf_val_8_t
gf_w4_invert_slow (galois_field_4_t *gf, gf_val_8_t a)
{
    if (a == 0) {
        return (0);
    } else if (a == 1) {
        return (1);
    } else {
        return (divide (gf, a, multiply_log (gf, a, a)));
    }
}

static
void
gf_w4_mult_buf_const_bytwo (const galois_field_4_t *gf, const gf_val_8_t *a, gf_val_8_t b, gf_val_8_t *dst, int len)
{
  uint64_t prod;
  uint64_t ta;
  uint64_t tmp1;
  uint64_t tmp2;
  uint64_t *dptr;
  uint64_t *aptr;
  int i;
  uint8_t tb;

  dptr = (uint64_t *) dst;
  aptr = (uint64_t *) a;
  for (i = 0; i < len; i += (sizeof(uint64_t)/sizeof(uint8_t))) {
    tb = b;
    prod = 0;
    ta = *aptr;
    while (1) {
      if (tb & 1) prod ^= ta;
      tb >>= 1;
      if (tb == 0) break;
      tmp1 = (ta << 1) & ((struct gf_bytwo_info *) gf->data->extra)->mask1;
      tmp2 = ta & ((struct gf_bytwo_info *) gf->data->extra)->mask2;
      tmp2 = ((tmp2 << 1) - (tmp2 >> (GF_FIELD_WIDTH-1)));
      ta = (tmp1 ^ (tmp2 & ((struct gf_bytwo_info *) gf->data->extra)->poly));
    }
    *dptr ^= prod;
    dptr++;
    aptr++;
  }

  for (; i < len; i++) {
    tb = gf_w4_mult_bytwo(gf, a[i]&0xf, b);
    tb |= (gf_w4_mult_bytwo(gf, a[i]>>4, b)<<4);
    dst[i] ^= tb;
  }
}

int
gf_w4_init (galois_field_4_t *gf, uint32_t flags)
{
    uint32_t    b;
    int         i, j;
    gf_val_8_t  v;
    gf_val_8_t  *gtbl;
    struct gf_bytwo_info *gbt;

    gf->flags = flags;
    gf->data = (struct gf_data *)malloc (sizeof (struct gf_data));
    gf->data->alpha = 1;
    gf->data->prim_poly = 0x13;
    gf->data->log_tbl[0] = 0;
    for (b = gf->data->alpha, i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->log_tbl[b] = (gf_val_8_t)i;
        gf->data->antilog_tbl[i] = (gf_val_8_t)b;
        gf->data->antilog_tbl[i+GF_FIELD_SIZE-1] = (gf_val_8_t)b;
        b <<= 1;
        if (b & GF_FIELD_SIZE) {
            b = b ^ gf->data->prim_poly;
        }
    }
    gf->data->inv_tbl[0] = 0;  /* Not really, but we need to fill it with something */
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        gf->data->inv_tbl[gf->data->antilog_tbl[i]] = gf_w4_invert_slow (gf, gf->data->antilog_tbl[i]);
    }
    gf->log = gf_w4_log;
    gf->antilog = gf_w4_antilog;
    gf->invert = gf_w4_invert;
    gf->divide = gf_w4_divide;
    gf->pow = gf_w4_pow;
    /* I'm ignoring flags for now */
        gbt = (struct gf_bytwo_info *) malloc(sizeof(struct gf_bytwo_info));
        gf->data->extra = (void *) gbt;
        v = (gf->data->prim_poly & 0xff);
        gbt->poly = 0;
        gbt->mask1 = 0;
        gbt->mask2 = 0;
        for (i = 0; i < 64; i += GF_FIELD_WIDTH) {
          gbt->poly <<= GF_FIELD_WIDTH;
          gbt->poly |= v;
          gbt->mask1 <<= GF_FIELD_WIDTH;
          gbt->mask1 |= (GF_FIELD_SIZE-2);
          gbt->mask2 <<= GF_FIELD_WIDTH;
          gbt->mask2 |= (GF_FIELD_SIZE>>1);
        }
        gf->mult = gf_w4_mult_log;
        gf->mult_buf_const = gf_w4_mult_buf_const_bytwo;
        gf->mult_buf_buf = NULL;
        gf->mult_acc_buf = NULL;
        gf->mult_acc_sig = NULL;
    return (1);
}

void
gf_w4_free (galois_field_4_t *gf)
{
    if (gf->data != NULL) {
        if (gf->data->extra != NULL) {
            free (gf->data->extra);
        }
        free (gf->data);
    }
    gf->data = NULL;
}

static
int
unit_test (uint32_t flags, int base)
{
    int counts[256];
    uint32_t    i, j, k, l, r;
    gf_val_8_t  r1, r2, r3;
    gf_val_8_t  *p1, *p2;
    uint64_t    testbuf[8192];
    gf_val_8_t  bufs[256];
    int         testlen = 8192;
    gf_val_8_t  smallbuf[256];
    gf_val_8_t  smallbuf2[256];
    __m128i     va, vb, vr;
    int         stage;
    galois_field_4_t    gf;

    gf_w4_init (&gf, flags);

    /* Check that antilog (log (x)) == x
     * Note that we have to start at 1, since log (0) is undefined.
     */
    stage = 1;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r = gf.antilog (&gf, gf.log (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

    /* Check that log (antilog (x)) == x
     * Note that we have to end at 65534, since antilog (65535) is undefined.
     */
    stage = 2;
    for (i = 0; i < GF_FIELD_SIZE-1; i++) {
        r = gf.log (&gf, gf.antilog (&gf, (gf_val_16_t)i));
        if (r != (gf_val_16_t)i) {
            goto fail;
        }
    }

     /* Check that inverses really are */
    stage = 3;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, gf.invert (&gf, (gf_val_16_t)i), (gf_val_16_t)i);
        if (r1 != (gf_val_16_t)1) {
            goto fail;
        }
    }

    /* Check multiplicative identity */
    stage = 4;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)1); 
        if (r1 != (gf_val_16_t)i) {
            goto fail;
        }
    }

    /* Check that inverses really are */
    stage = 5;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        r1 = gf.mult (&gf, gf.invert (&gf, (gf_val_8_t)i), (gf_val_8_t)i);
        if (r1 != (gf_val_8_t)1) {
            goto fail;
        }
    }

    /* Check powers */
    stage = 6;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        r1 = (gf_val_8_t)(i > 0 ? 1 : 0);
        for (j = 0; j < GF_FIELD_SIZE*2; j++) {
            r2 = gf.pow (&gf, (gf_val_8_t)i, j);
            if (r1 != r2) {
                goto fail;
            }
            r1 = gf.mult (&gf, r1, (gf_val_8_t)i);
        }
    }

    /*
     * Test all multiplications to ensure 1-1 mapping and commutativity.
     */
    stage = 11;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            counts[j] = 0;
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            r1 = gf.mult (&gf, (gf_val_16_t)i, (gf_val_16_t)j);
            counts[r1] += 1;
            r2 = gf.mult (&gf, (gf_val_16_t)j, (gf_val_16_t)i);
            if (r1 != r2) {
                goto fail;
            }
        }
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            if (counts[j] != 1) {
                goto fail;
            }
        }
    }

    /* Check that divide and multiply work */
    stage = 12;
    for (i = 1; i < GF_FIELD_SIZE; i++) {
        for (j = 1; j < GF_FIELD_SIZE; j++) {
            r1 = gf.divide (&gf, (gf_val_8_t)i, (gf_val_8_t)j);
            r2 = gf.mult (&gf, (gf_val_8_t)i, gf.invert (&gf, (gf_val_8_t)j));
            if (r1 != r2) {
                goto fail;
            }
        }
    }

    /* Check distribution property */
    stage = 13;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        for (j = 0; j < GF_FIELD_SIZE; j++) {
            for (k = 0; k < GF_FIELD_SIZE; k++) {
                r1 = gf.mult (&gf, (gf_val_8_t)i, (gf_val_8_t)k) ^ gf.mult (&gf, (gf_val_8_t)j, (gf_val_8_t)k);
                r2 = gf.mult (&gf, (gf_val_8_t)i ^ (gf_val_8_t)j, (gf_val_8_t)k);
                if (r1 != r2) {
                    goto fail;
                }
            }
        }
    }


    /* Check buffer-constant multiplication */    
    stage = 21;
    p1 = (gf_val_8_t *)testbuf;
    galois_fill_random (testbuf, testlen * 2, 0);
    p2 = (gf_val_8_t *)testbuf + testlen * 2 + 16;
    for (i = 0; i < GF_FIELD_SIZE; i++) {
        /* Test odd offsets and lengths - it can give vectorization fits */
        for (j = 0; j < 8; j += 1) {
            /* Zero out the result buffer, just in case */
            memset (p2, 0, testlen + j);
            gf.mult_buf_const (&gf, p1+j, (gf_val_8_t)i, p2, testlen+j);
            for (k = 0; k < testlen+j; k ++) {
                r1 = p1[j+k]&0xf;
                r1 = gf.mult (&gf, r1, (gf_val_8_t)i);
                if (r1 != (p2[k]&0xf)) {
                    printf("Problem widh k=%d.  p1[%d] = %d.  p2[%d] = %d.  b = %d\n",
                        k, k+j, p1[j+k], k+j, p2[j+k], i);
                    printf("First byte is %d.  Multiplied: %d.  P2: %d\n",
                       p1[j+k]&0xf, r1, p2[k]&0xf);
                    goto fail;
                }
                r1 = p1[j+k]>>4;
                r1 = gf.mult (&gf, r1, (gf_val_8_t)i);
                if (r1 != (p2[k]>>4)) {
                    goto fail;
                }
            }
        }
    }



    return (0);
fail:
    return (stage + base);
}


int
gf_w4_unit_test ()
{
    int     r;

    if ((r = unit_test (GALOIS_MULT_BYTWO, 2000)) > 0) {
        return (r);
    }
    return (0);
}




