\begin{tiny}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include "gpu_galois.h"

/* Globals for regions on device 
   to avoid reallocating and freeing region 
   during every function call */
unsigned char *c_ur1_d = NULL;
unsigned char *c_ur2_d = NULL;
unsigned short *s_ur1_d = NULL;
unsigned short *s_ur2_d = NULL;
unsigned int *l_ur1_d = NULL;
unsigned int *l_ur2_d = NULL;

/* Primitive polynomial global */
static int prim_poly[33] = 
{ 0, 
	/*  1 */     1, 
	/*  2 */    07,
	/*  3 */    013,
	/*  4 */    023,
	/*  5 */    045,
	/*  6 */    0103,
	/*  7 */    0211,
	/*  8 */    0435,
	/*  9 */    01021,
	/* 10 */    02011,
	/* 11 */    04005,
	/* 12 */    010123,
	/* 13 */    020033,
	/* 14 */    042103,
	/* 15 */    0100003,
	/* 16 */    0210013,
	/* 17 */    0400011,
	/* 18 */    01000201,
	/* 19 */    02000047,
	/* 20 */    04000011,
	/* 21 */    010000005,
	/* 22 */    020000003,
	/* 23 */    040000041,
	/* 24 */    0100000207,
	/* 25 */    0200000011,
	/* 26 */    0400000107,
	/* 27 */    01000000047,
	/* 28 */    02000000011,
	/* 29 */    04000000005,
	/* 30 */    010040000007,
	/* 31 */    020000000011, 
	/* 32 */    00020000007 };  
	/* Really 40020000007, but we're omitting the high order bit */

/* Power of 2 globals */
static int nw[33] = { 0, (1 << 1), (1 << 2), (1 << 3), (1 << 4), 
	(1 << 5), (1 << 6), (1 << 7), (1 << 8), (1 << 9), (1 << 10),
	(1 << 11), (1 << 12), (1 << 13), (1 << 14), (1 << 15), (1 << 16),
	(1 << 17), (1 << 18), (1 << 19), (1 << 20), (1 << 21), (1 << 22),
	(1 << 23), (1 << 24), (1 << 25), (1 << 26), (1 << 27), (1 << 28),
	(1 << 29), (1 << 30), (1 << 31), -1 };

/* Power of 2 - 1 globals */
static int nwm1[33] = { 0, (1 << 1)-1, (1 << 2)-1, (1 << 3)-1, (1 << 4)-1, 
	(1 << 5)-1, (1 << 6)-1, (1 << 7)-1, (1 << 8)-1, (1 << 9)-1, (1 << 10)-1,
	(1 << 11)-1, (1 << 12)-1, (1 << 13)-1, (1 << 14)-1, (1 << 15)-1, (1 << 16)-1,
	(1 << 17)-1, (1 << 18)-1, (1 << 19)-1, (1 << 20)-1, (1 << 21)-1, (1 << 22)-1,
	(1 << 23)-1, (1 << 24)-1, (1 << 25)-1, (1 << 26)-1, (1 << 27)-1, (1 << 28)-1,
	(1 << 29)-1, (1 << 30)-1, 0x7fffffff, 0xffffffff };

/* Global tables on device and host
   Logarithm, inverse logarithm, multiplication, and division */
int *galois_log_tables_d[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

static int *galois_log_tables[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

int *galois_ilog_tables_d[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

static int *galois_ilog_tables[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

int *galois_mult_tables_d[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

static int *galois_mult_tables[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

int *galois_div_tables_d[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

static int *galois_div_tables[33] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };

/* Tables for special case for w = 32 */

int *galois_split_w8_d = NULL; 

static int *galois_split_w8[7] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL };

/* Kernel for initializing logarithm table */
__global__ void init_log_tables(int *nwm1_d, int *log_d) {
	log_d[blockIdx.x*blockDim.x+threadIdx.x] = *nwm1_d;	
}

/* Kernel for initializing inverse log table */
__global__ void init_ilog_tables(int *ilog_d) {
	ilog_d[blockIdx.x*blockDim.x+threadIdx.x] = 0;
}

/* Kernel that continues initializing inverse log table */
__global__ void another_init_ilog_tables(int *nwm1_d, int *ilog_d) {
	ilog_d[blockIdx.x*blockDim.x+threadIdx.x+(*nwm1_d)] =  ilog_d[blockIdx.x*blockDim.x+threadIdx.x];
	ilog_d[blockIdx.x*blockDim.x+threadIdx.x+(*nwm1_d)*2] =  ilog_d[blockIdx.x*blockDim.x+threadIdx.x];
}

/* Function called from benchmarks to initialize log
   and inverse log tables */
int galois_create_log_tables(int w)
{
	int j, b;
	int *log_d;
	int *ilog_d;
	int *nwm1_d;
	cudaError_t ierr;

	// Allocate tables.
	if (w > 30) return -1;
	if (galois_log_tables[w] != NULL) return 0;
	galois_log_tables[w] = (int *) malloc(sizeof(int)*nw[w]);
	ierr = cudaMalloc((void **)&(log_d), sizeof(int)*nw[w]);

	if (galois_log_tables[w] == NULL) return -1; 

	galois_ilog_tables[w] = (int *) malloc(sizeof(int)*nw[w]*3);
	ierr = cudaMalloc((void **)&(ilog_d), sizeof(int)*nw[w]*3);

	if (galois_ilog_tables[w] == NULL) { 
		free(galois_log_tables[w]);
		galois_log_tables[w] = NULL;
		return -1;
	}

	// Copy relevant data to device
	ierr = cudaMalloc((void **)&(nwm1_d), sizeof(int));
	cudaMemcpy(nwm1_d,&(nwm1[w]), sizeof(int), cudaMemcpyHostToDevice);

	// Call the first round of kernels for initialization
	if (nw[w] <= 32) { 
		init_log_tables<<< 1, nw[w]>>>(nwm1_d, log_d);
		init_ilog_tables<<< 1, nw[w] >>>(ilog_d);
	}
	else {
		init_log_tables<<< nw[w]/32, 32>>>(nwm1_d, log_d);
		init_ilog_tables<<< nw[w]/32, 32 >>>(ilog_d);
	}
	// Copy result from device to host
	cudaMemcpy(galois_log_tables[w], log_d, sizeof(int)*nw[w], 
		cudaMemcpyDeviceToHost);
	cudaMemcpy(galois_ilog_tables[w], ilog_d, sizeof(int)*nw[w]*3, 
		cudaMemcpyDeviceToHost);

	// This section cannot be placed on the GPU
	// because it is not parallelizable.
	// Later elements in the loop depend on
	// the results of previous elements in the loop.
	b = 1;
	for (j = 0; j < nwm1[w]; j++) {
		if (galois_log_tables[w][b] != nwm1[w]) {
			fprintf(stderr, "Galois_create_log_tables Error: 
				j=%d, b=%d, B->J[b]=%d, J->B[j]=%d (0%o)\n",
					j, b, galois_log_tables[w][b], galois_ilog_tables[w][j], 
						(b << 1) ^ prim_poly[w]);
			exit(1);
		}
		galois_log_tables[w][b] = j;
		galois_ilog_tables[w][j] = b;
		b = b << 1;
		if (b & nw[w]) b = (b ^ prim_poly[w]) & nwm1[w];
	}
	// Copy updated inverse log results to the device
	cudaMemcpy(ilog_d, galois_ilog_tables[w], sizeof(int)*nw[w]*3, 
		cudaMemcpyHostToDevice);
	cudaMemcpy(nwm1_d,&(nwm1[w]), sizeof(int), cudaMemcpyHostToDevice); 

	// Continue initializing the inverse log table.
	if (nw[w] <= 32) { 
		another_init_ilog_tables<<< 1, nw[w] >>>( nwm1_d, ilog_d);
	}
	else {
		another_init_ilog_tables<<< nw[w]/32, 32 >>>(nwm1_d, ilog_d);
	}
	// Copy result from device to host.
	cudaMemcpy(galois_ilog_tables[w], ilog_d, sizeof(int)*nw[w]*3, 
		cudaMemcpyDeviceToHost);
	cudaMemcpy(log_d, galois_log_tables[w], sizeof(int)*nw[w], 
		cudaMemcpyHostToDevice);
	galois_log_tables_d[w] = log_d;
	galois_ilog_tables_d[w] = ilog_d;

	galois_ilog_tables[w] += nwm1[w];
	return 0;
}

/* Kernel for initializing multiplication and division table */
__global__ void init_mult_div_tables(int *nw, int *mult, int *div, int *log, int *ilog) {
	int j, x, y, logx;
	j = blockIdx.x*blockDim.x+threadIdx.x;
	x = j/(*nw);
	y = j%(*nw);
	logx = log[x];

	// Calculate using inverse log and log tables.
	mult[j] =  (x*y > 0) ? ilog[logx+log[y]] : 0;
	div[j] = (x*y > 0) ? ilog[logx-log[y]] : 0;
	div[j] = (y > 0) ? div[j] : -1;

}

/* Function called by benchmarks for creating
   multiplication and division tables */
int galois_create_mult_tables(int w)
{
	int j, x, y, logx;
	cudaError_t ierr;

	// Allocate relevant space on host.
	if (w >= 14) return -1;

	if (galois_mult_tables[w] != NULL) return 0;
	galois_mult_tables[w] = (int *) malloc(sizeof(int) * nw[w] * nw[w]);
	if (galois_mult_tables[w] == NULL) return -1;

	galois_div_tables[w] = (int *) malloc(sizeof(int) * nw[w] * nw[w]);
	if (galois_div_tables[w] == NULL) {
		free(galois_mult_tables[w]);
		galois_mult_tables[w] = NULL;
		return -1;
	}
	if (galois_log_tables[w] == NULL) {
		if (galois_create_log_tables(w) < 0) {
			free(galois_mult_tables[w]);
			free(galois_div_tables[w]);
			galois_mult_tables[w] = NULL;
			galois_div_tables[w] = NULL;
			return -1;
		}
	}

	int *mult_d, *div_d, *log_d, *ilog_d, *nw_d;
	// Allocate relevant space on device.
	ierr = cudaMalloc((void **)&(mult_d), sizeof(int)*nw[w]*nw[w]);
	ierr = cudaMalloc((void **)&(div_d), sizeof(int)*nw[w]*nw[w]); 
	ierr = cudaMalloc((void **)&(log_d), sizeof(int)*nw[w]); 
	ierr = cudaMalloc((void **)&(ilog_d), sizeof(int)*nw[w]*3); 
	ierr = cudaMalloc((void **)&(nw_d), sizeof(int)); 

	// Copy relevant data to device
	cudaMemcpy(ilog_d, (galois_ilog_tables[w]-nwm1[w]), sizeof(int)*nw[w]*3, 
			cudaMemcpyHostToDevice);
	cudaMemcpy(log_d, galois_log_tables[w], sizeof(int)*nw[w], 
			cudaMemcpyHostToDevice);
	cudaMemcpy(nw_d, &nw[w], sizeof(int), cudaMemcpyHostToDevice);

	// Call kernels to initialize data.
	if (nw[w]*nw[w] > 32) {
		init_mult_div_tables<<<(nw[w]*nw[w])/32, 32 >>>(nw_d, mult_d, div_d, log_d, ilog_d+nwm1[w]);
	}
	else { 
		init_mult_div_tables<<<1, nw[w]*nw[w]>>>(nw_d, mult_d, div_d, log_d, ilog_d+nwm1[w]);
	}
	// Copy results from device to host.
	cudaMemcpy(galois_mult_tables[w], mult_d, sizeof(int)*nw[w]*nw[w], 
			cudaMemcpyDeviceToHost);
	cudaMemcpy(galois_div_tables[w], div_d, sizeof(int)*nw[w]*nw[w], 
			cudaMemcpyDeviceToHost);
	galois_mult_tables_d[w] = mult_d;
	galois_div_tables_d[w] = div_d;

	return 0;
}

/* Kernel for w = 8 null case, which is always called.
   This code is difficult to read. It is adapted directly from
   Dr. Plank's Fast Galois Field library. */
__global__ void mult_w8_null(int *nbytes_d, 
unsigned char *ur1_d, 
unsigned char* ur2_d, 
int *mult_d, 
int *srow_d) {

	int i = blockIdx.x*blockDim.x + threadIdx.x;
	ur2_d[i] = (i < *nbytes_d) ? mult_d[*srow_d+ur1_d[i]] : 0;
}

/* Kernel for w = 8 called to complete XORs. This is also called in
   the w = 16 region multply function */
__global__ void mult_w8_notnull(unsigned long *l_d, unsigned long *orig_d) {
	int i = blockIdx.x*blockDim.x + threadIdx.x;
	orig_d[i] = l_d[i] ^ orig_d[i];
}

/* Function called by benchmark for w = 8 region multiply */
void galois_w08_region_multiply(char *region,      /* Region to multiply */
		int multby,       /* Number to multiply by */
		int nbytes,        /* Number of bytes in region */
		char *r2,          /* If r2 != NULL, products go here */
		int add)
{
	unsigned char *ur1, *ur2, *cp;
	unsigned char prod;
	int i, srow, j;
	unsigned long l, *lp2;
	unsigned char *lp;
	int sol;
	cudaError_t ierr;
	ur1 = (unsigned char *) region;
	ur2 = (r2 == NULL) ? ur1 : (unsigned char *) r2;

	// Create tables if needed 
	if (galois_mult_tables[8] == NULL) {
		if (galois_create_mult_tables(8) < 0) {
			fprintf(stderr, "galois_08_region_multiply -- couldn't make multiplication tables\n");
			exit(1);
		}
	}
	srow = multby * nw[8];
	unsigned char * ur1_d;
	unsigned char * ur2_d;
	int * mult_d;
	int *nbytes_d;
	int *srow_d;
	ierr = cudaMalloc((void **)&(srow_d),sizeof(int));
	ierr = cudaMalloc((void **)&(nbytes_d),sizeof(int));
	// Pad region to a multiple of 512 
	// to maximize thread usage.
	int total = nbytes+512-nbytes%512;

	// Allocate device space if needed, use previously
	// allocated space if it has already been allocated.
	if (c_ur1_d == NULL) {
		ierr = cudaMalloc((void **)&(ur1_d),sizeof(unsigned char)*(total));
		ierr = cudaMalloc((void **)&(ur2_d),sizeof(unsigned char)*(total));
		c_ur1_d = ur1_d;
		c_ur2_d = ur2_d;
	}
	else {
		ur1_d = c_ur1_d;
		ur2_d = c_ur2_d;
	}
	// Copy relevant data from host to device 
	cudaMemcpy(nbytes_d,&nbytes,sizeof(int),cudaMemcpyHostToDevice);
	cudaMemcpy(ur1_d,ur1,sizeof(unsigned char)*nbytes,cudaMemcpyHostToDevice);
	mult_d = galois_mult_tables_d[8];
	cudaMemcpy(srow_d, &srow, sizeof(int), cudaMemcpyHostToDevice);
	// Call kernels, error check, and copy result from device to host.
	if(r2 == NULL || !add){
		mult_w8_null<<<total/512,512 >>>(nbytes_d, ur1_d, ur2_d, mult_d, srow_d);
		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error (%d): %s\n",total/512, cudaGetErrorString(ierr));
		}
		cudaMemcpy(ur2, ur2_d, sizeof(unsigned char)*nbytes, 
				cudaMemcpyDeviceToHost); 
	}
	else{
		mult_w8_null<<<total/512,512 >>>(nbytes_d, ur1_d, ur2_d, mult_d, srow_d);

		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error (%d): %s\n", total/512, cudaGetErrorString(ierr));
		}
		cudaMemcpy(ur1_d, ur2, sizeof(unsigned char)*nbytes, 
				cudaMemcpyHostToDevice); 

		if ((nbytes/sizeof(long))%512) 
			mult_w8_notnull<<<(nbytes/sizeof(long))/512+1, 512 >>>((unsigned long *)ur2_d, (unsigned long *)ur1_d);	
		else
			mult_w8_notnull<<<(nbytes/sizeof(long))/512, 512 >>>((unsigned long *)ur2_d, (unsigned long *)ur1_d);

		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error %d: %s\n", nbytes/(sizeof(long))/32, 
				cudaGetErrorString(ierr));
		}

		cudaMemcpy(ur2, ur1_d, sizeof(unsigned char)*nbytes, 
			cudaMemcpyDeviceToHost); 
	}

	return;
}

/* Kernel for multiplying by 0 for w = 16 */
__global__ void init_w16_multby_0(unsigned long *lp2_d) {
	int i = blockIdx.x*blockDim.x+threadIdx.x;
	lp2_d[i] = 0;
}

/* Kernel for null case for w = 16 */
__global__ void mult_w16_null(unsigned short *ur1_d, 
		unsigned short *ur2_d, 
		int *log_d, 
		int *ilog_d, 
		int *log1_d) {
	int i = blockIdx.x*blockDim.x+threadIdx.x;
	int prod;
	prod = log_d[ur1_d[i]] + *log1_d;
	ur2_d[i] = (ur1_d[i] == 0) ? 0 : ilog_d[prod];
}

/* Function called by benchmark for w = 16 region multiply */
void galois_w16_region_multiply(char *region,      /* Region to multiply */
		int multby,       /* Number to multiply by */
		int nbytes,        /* Number of bytes in region */
		char *r2,          /* If r2 != NULL, products go here */
		int add)
{
	unsigned short *ur1, *ur2, *cp;
	int prod;
	int i, log1, j, log2;
	unsigned long l, *lp2, *lptop;
	unsigned short *lp;
	int sol;
	cudaError_t ierr;

	ur1 = (unsigned short *) region;
	ur2 = (r2 == NULL) ? ur1 : (unsigned short *) r2;
	nbytes /= 2;

	unsigned long *lp2_d;

	// Multiply a region by 0 -- special case.
	if (multby == 0) {
		if (!add) {
			lp2 = (unsigned long *) ur2;
			int tot = (nbytes*2)/sizeof(unsigned long);
			int total = tot+512-tot%512;
			ierr = cudaMalloc((void **)&(lp2_d), total*sizeof(unsigned long));
			init_w16_multby_0<<<total/512, 512>>>(lp2_d);
			ierr = cudaGetLastError();
			if (ierr != cudaSuccess) {
				fprintf(stderr, "Error %s\n", cudaGetErrorString(ierr));
			}
			cudaMemcpy(lp2, lp2_d, nbytes*2, cudaMemcpyDeviceToHost);
		}
		return;
	}

	// Create tables if needed.
	if (galois_log_tables[16] == NULL) {
		if (galois_create_log_tables(16) < 0) {
			fprintf(stderr, "galois_16_region_multiply -- couldn't make log tables\n");
			exit(1);
		}
	}
	log1 = galois_log_tables[16][multby];

	unsigned short *ur1_d, *ur2_d;
	int *log_d, *ilog_d;
	int *log1_d;
	int total = nbytes+512-nbytes%512;

	// Allocate space on device if needed. Otherwise,
	// grab the address if it has already been 
	// allocated previously.
	if (s_ur1_d == NULL) {
		ierr = cudaMalloc((void **)&(ur1_d), sizeof(unsigned short)*total);
		ierr = cudaMalloc((void **)&(ur2_d), sizeof(unsigned short)*total);
		s_ur1_d = ur1_d;
		s_ur2_d = ur2_d;
	}
	else {
		ur1_d = s_ur1_d;
		ur2_d = s_ur2_d;
	}
	ierr = cudaMalloc((void **)&(log1_d), sizeof(int));
	// Copy relevant data to device 
	cudaMemcpy(ur1_d, ur1, sizeof(unsigned short)*nbytes, cudaMemcpyHostToDevice);
	cudaMemcpy(log1_d, &log1, sizeof(int), cudaMemcpyHostToDevice);
	log_d = galois_log_tables_d[16];
	ilog_d = galois_ilog_tables_d[16];

	// Call the kernel, error check, and copy result from device to host
	if (r2 == NULL || !add) {
		mult_w16_null<<<total/512, 512>>>(ur1_d, ur2_d, log_d, ilog_d+nwm1[16], log1_d);
		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error(%d) %s\n", total/512, cudaGetErrorString(ierr));
		}
		cudaMemcpy(ur2, ur2_d, sizeof(unsigned short)*nbytes, 
			cudaMemcpyDeviceToHost);
	}
	else {
		mult_w16_null<<<total/512, 512>>>(ur1_d, ur2_d, log_d, ilog_d+nwm1[16], log1_d);
		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error %s\n", cudaGetErrorString(ierr));
		}
		cudaMemcpy(ur1_d, ur2, sizeof(unsigned short)*nbytes, 
			cudaMemcpyHostToDevice); 

		int total = (nbytes*2)/sizeof(long);
		if (total%512) 
			mult_w8_notnull<<<total/512+1, 512 >>>((unsigned long *)ur2_d, (unsigned long *)ur1_d);	
		else
			mult_w8_notnull<<<total/512, 512 >>>((unsigned long *)ur2_d, (unsigned long *)ur1_d);	

		cudaMemcpy(ur2, ur1_d, sizeof(unsigned short)*nbytes, 
			cudaMemcpyDeviceToHost); 
	}
	return; 
}

/* Kernel for w = 32 null case -- adapted from Plank's Fast Galois
   Arithmetic library.  The loop is unrolled to avoid conditionals. */
__global__ void	mult_w32_null(unsigned int *ur1_d, 
		unsigned int *ur2_d, 
		int *split_w8_d, 
		int *acache_d) {
	int acc;
	int a, b, j8, k;
	int i = 1 << 16;
	k = blockIdx.x*blockDim.x+threadIdx.x;
	acc = 0;
	// i = 0
	a = acache_d[0];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[1*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[2*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// i = 1
	a = acache_d[1];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[1*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[2*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// i = 2
	a = acache_d[2];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[2*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[5*i+a|b];
	// i = 3
	a = acache_d[3];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[3*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[5*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[6*i+a|b];

	// Place result in proper place
	ur2_d[k] = acc;
}

/* Kernel for w = 32 not null case -- adapted from Plank's Fast Galois
   Arithmetic library.  The loop is unrolled to avoid conditionals. Identical 
   to null case except for last operation.*/
__global__ void	mult_w32_notnull(unsigned int *ur1_d, 
		unsigned int *ur2_d, 
		int *split_w8_d, 
		int *acache_d) {
	int acc;
	int a, b, j8, k;
	int i = 1 << 16;
	k = blockIdx.x*blockDim.x+threadIdx.x;
	acc = 0;
	// i = 0
	a = acache_d[0];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[1*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[2*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// i = 1
	a = acache_d[1];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[1*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[2*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// i = 2
	a = acache_d[2];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[2*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[3*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[5*i+a|b];
	// i = 3
	a = acache_d[3];
	// j = 0
	b = ((ur1_d[k] >> 0) & 255);
	acc ^= split_w8_d[3*i+a|b];
	//j = 1
	b = ((ur1_d[k] >> 8) & 255);
	acc ^= split_w8_d[4*i+a|b];
	// j = 2
	b = ((ur1_d[k] >> 16) & 255);
	acc ^= split_w8_d[5*i+a|b];
	// j = 3
	b = ((ur1_d[k] >> 24) & 255);
	acc ^= split_w8_d[6*i+a|b];

	// XOR current value with product result
	// and put in proper place in region.
	ur2_d[k] = ur2_d[k] ^ acc;
}

/* Function called by benchmarks for w = 32 region multiply */ 
void galois_w32_region_multiply(char *region,      /* Region to multiply */
		int multby,       /* Number to multiply by */
		int nbytes,        /* Number of bytes in region */
		char *r2,          /* If r2 != NULL, products go here */
		int add)
{
	unsigned int *ur1, *ur2, *cp, *ur2top;
	unsigned long *lp2, *lptop;
	int i, j, a, b, accumulator, i8, j8, k;
	int acache[4];

	ur1 = (unsigned int *) region;
	ur2 = (r2 == NULL) ? ur1 : (unsigned int *) r2;
	nbytes /= sizeof(int);
	ur2top = ur2 + nbytes;

	// Create tables if needed
	if (galois_split_w8[0]== NULL) {
		if (galois_create_split_w8_tables() < 0) {
			fprintf(stderr, "galois_32_region_multiply -- couldn't make split multiplication tables\n");
			exit(1);
		}
	}

	i8 = 0;
	for (i = 0; i < 4; i++) {
		acache[i] = (((multby >> i8) & 255) << 8);
		i8 += 8;
	}

	cudaError_t ierr;
	int *acache_d;
	unsigned int *ur1_d;
	unsigned int *ur2_d;
	int *split_w8_d;
	int total = nbytes+512-nbytes%512;

	// Allocate space on device if needed,
	// use previously allocated space if possible.
	if (l_ur1_d == NULL ) {
		ierr = cudaMalloc((void **)&(ur1_d), sizeof(int)*total);
		if (ierr != cudaSuccess) {
			fprintf(stderr, "error at ur1_d\n");
		}
		ierr = cudaMalloc((void **)&(ur2_d), sizeof(int)*total);
		l_ur1_d = ur1_d;
		l_ur2_d = ur2_d;
	}
	else {
		ur1_d = l_ur1_d;
		ur2_d = l_ur2_d;
	}
	ierr = cudaMalloc((void **)&(acache_d), sizeof(int)*4);
	// Copy data to device
	cudaMemcpy(ur1_d, ur1, sizeof(int)*nbytes, cudaMemcpyHostToDevice);
	cudaMemcpy(acache_d, acache, sizeof(int)*4, cudaMemcpyHostToDevice);

	split_w8_d = galois_split_w8_d;
	// Call kernel, error check, copy result from device to host.
	if (!add) {
		mult_w32_null<<<total/512, 512>>>(ur1_d, ur2_d, split_w8_d, acache_d);
		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error %s\n", cudaGetErrorString(ierr));
		}
		cudaMemcpy(ur2, ur2_d, sizeof(int)*nbytes, cudaMemcpyDeviceToHost);
	}
	else {
		cudaMemcpy(ur2_d, ur2, sizeof(int)*nbytes, cudaMemcpyHostToDevice);
		mult_w32_notnull<<<total/512,512>>>(ur1_d, ur2_d, split_w8_d, acache_d);
		ierr = cudaGetLastError();
		if (ierr != cudaSuccess) {
			fprintf(stderr, "Error\n");
		}

		cudaMemcpy(ur2, ur2_d, sizeof(int)*nbytes, cudaMemcpyDeviceToHost);
	}
	return;
}

/* Function called from benchmarks to create w = 32 
   special case tables. Some functions referenced here are 
   omitted for space.  They do not make use of the GPU, 
   and are the same as in Plank's original library. */
int galois_create_split_w8_tables()
{
	int p1, p2, i, j, p1elt, p2elt, index, ishift, jshift, *table;

	if (galois_split_w8[0] != NULL) return 0;

	if (galois_create_mult_tables(8) < 0) return -1;

	for (i = 0; i < 7; i++) {
		galois_split_w8[i] = (int *) malloc(sizeof(int) * (1 << 16));
		if (galois_split_w8[i] == NULL) {
			for (i--; i >= 0; i--) free(galois_split_w8[i]);
			return -1;
		}
	}

	for (i = 0; i < 4; i += 3) {
		ishift = i * 8;
		for (j = ((i == 0) ? 0 : 1) ; j < 4; j++) {
			jshift = j * 8;
			table = galois_split_w8[i+j];
			index = 0;
			for (p1 = 0; p1 < 256; p1++) {
				p1elt = (p1 << ishift);
				for (p2 = 0; p2 < 256; p2++) {
					p2elt = (p2 << jshift);
					table[index] = galois_shift_multiply(p1elt, p2elt, 32);
					index++;
				}
			}
		}
	}
	/* Allocate space for split tables on device
	   and copy them over */
	cudaError_t ierr;
	ierr = cudaMalloc((void **)&(galois_split_w8_d), sizeof(int)*7*(1 << 16));
	for (i = 0; i < 7;i++) {
		cudaMemcpy(galois_split_w8_d+i*(1 << 16), galois_split_w8[i], 
			sizeof(int)*(1 << 16), cudaMemcpyHostToDevice);
	}
	return 0;
}
\end{verbatim}
\end{tiny}
