#include "jerasure.h"
#include "galois.h"

/********* Constructors ************/

/* This constructor lets the user set ptrs.  If "data_only",
	 then it assumes that the specified coding drives exist, but
	 they are in a failed state.  In a vertical code, it assumes
	 that the coding areas are properly allocated and that the data
	 is in the correct place, but the coding areas are not initialized.
	 That may be yucky. */

JER_Slices::JER_Slices(int n, int k, int ps, int pps, JER_Gen_T *g, vector <unsigned char *> ptrs, int data_only){
	int i;

	N = n;
	K = k;
	PacketSize = ps;
	PacketsPerSlice = pps;
	G = g;
	DataOnly = data_only;

	//setup stats
	XORs = 0;
	GF_Mults = 0;
	Memcpys = 0;

	MultiThreadMethod = "";
	NumberOfCores = 1;

	Ptrs = ptrs;
	States.resize(n);

	if(data_only){
		//set all n drives = down 
		for(i=0;i<n;i++){
			States[i] = 1;
		}
	}else{
		//set all n drives = up
		for(i=0;i<n;i++){
			States[i] = 0;
		}
	}

}

/* This constructor creates new pointers from ptrs, and
	 performs encoding at the same time */
JER_Slices::JER_Slices(int n, int k, int ps, int pps, JER_Gen_T *g, vector <unsigned char *> ptrs){
	int i;

	N = n;
	K = k;
	PacketSize = ps;
	PacketsPerSlice = pps;
	G = g;

	//setup stats
	XORs = 0;
	GF_Mults = 0;
	Memcpys = 0;

	MultiThreadMethod = "";
	NumberOfCores = 1;

	Ptrs = ptrs;
	States.resize(n);

	//set data drive states = up
	for(i=0;i<k;i++){
		States[i] = 0;
	}

	//set coding drive states = down
	for(i=k;i<n;i++){
		States[i] = 1;
	}

	//if encode is successful, it will set the coding drives to up
	Encode();

}

/* This constructor allocates and creates empty slices */
JER_Slices::JER_Slices(int n, int k, int ps, int pps, JER_Gen_T *g){

	int i;
	N = n;
	K = k;
	PacketSize = ps;
	PacketsPerSlice = pps;
	G = g;

	//setup stats
	XORs = 0;
	GF_Mults = 0;
	Memcpys = 0;

	//MultiThreadMethod = "";
	NumberOfCores = 1;

	Ptrs.resize(n);
	States.resize(n,0);

	//set coding States to down 
	for(i=k;i<n;i++){
		States[i] = 1;
	}

}

/********* Manage failures and drives ************/

void JER_Slices::Add_Partial_Failure(int drive, int start, int size){

	JER_Region * jer_reg;
	int end;

	end = start + size;
	end = ( ( ( end - 1 ) / 8 ) + 1 ) * 8;
	jer_reg = new JER_Region;
	jer_reg->drive = drive;
	jer_reg->start = 8 * (start / 8);
	jer_reg->size = end - jer_reg->start;
	Pfs.push_back(jer_reg);
}

void JER_Slices::Add_Partial_Failure(JER_Region &r){
	Pfs.push_back(&r);
}

void JER_Slices::Add_Drive_Failure(int drive){
	States[drive] = 1;	
}

void JER_Slices::Remove_Drive(int drive){

	Ptrs.erase( Ptrs.begin() + drive );
	States.erase( States.begin() + drive );
	if(G == NULL){
		N--;
		if(drive < K){
			K--;
		}
		return;
	}
	if( G->M->R == N * G->WPD )
		G->M->Delete_Row( drive );
	else if( drive > K )
		G->M->Delete_Row( drive - K );
	N--;
	G->N--;
	if( drive == K )
		G->PDrive = false;
	if( drive < K )
	{
		G->M->Delete_Col( drive );
		K--;
		G->K--;
	}

}
void JER_Slices::Remove_Drive_And_Re_Encode(int drive){

	Remove_Drive( drive );

	Encode();
}
int JER_Slices::Update_Region(int drive, int start, int size, unsigned char *new_data){

	int i;

	if(drive >= K){
		//updating coding drive
		memcpy( Ptrs[drive] + start, new_data, size );
		return 0;
	}

	Recover_Partial_Failures();

	memcpy( Ptrs[drive] + start, new_data, size );

	for( i = K; i < N; i++ ){
		Add_Partial_Failure( i, start, size );
	}

	return Recover_Partial_Failures();
}
int JER_Slices::Update_Region(JER_Region &r, unsigned char *new_data){

	int i;

	if(r.drive >= K){
		//updating coding drive
		memcpy( Ptrs[r.drive] + r.start, new_data, r.size );
		return 0;
	}

	Recover_Partial_Failures();

	memcpy( Ptrs[r.drive] + r.start, new_data, r.size );

	for( i = K; i < N; i++ ){
		Add_Partial_Failure( i, r.start, r.size );
	}
	return Recover_Partial_Failures();
}

/********* Encode functions ************/

int JER_Slices::Recover_Partial_Failures()
{
	int prevsize;
	int start, stop;
	int dmids[K];
	int i, j, l;
	int m;
	vector<int> tempstates;
	vector<int> drive;
	vector<int> drive2;
	JER_Matrix *decode_matrix, *prod;

	prevsize == Pfs.size() + 1;

	drive.resize(1);
	drive2.resize(1);
	while( prevsize > Pfs.size() ){
		prevsize = Pfs.size();
		for( i = 0; i < Pfs.size(); i++ )
		{
			//we will try to recover from the ith partial failure
			start = Pfs[i]->start;
			stop = start + Pfs[i]->size;

			tempstates = States;

			for( j = 0; j < Pfs.size(); j++ )
			{
				if( start < Pfs[j]->start + Pfs[j]->size && 
						stop > Pfs[j]->start )
					//this this partial failure overlaps the ith partial failure,
					//we cannot use this drive to recover
					States[Pfs[j]->drive] = 1;
			}
			decode_matrix = Make_Decoding_Matrix( dmids );

			if( decode_matrix != NULL )
			{
				prod = Prod( G->M, decode_matrix );
			}
			else
				prod = NULL;

			drive[0] = Pfs[i]->drive;

			if( Pfs[i]->drive < K && G->Systematic )
			{
				if( decode_matrix != NULL && !Dotprod( decode_matrix, 
							drive, drive, dmids, Pfs[i] ) )
				{
					Pfs.erase( Pfs.begin() + i );
					i--;
				}
				else
					fprintf(stderr,"Dotprod failed\n" );
			}
			else if( prod != NULL )
			{
				drive2[0] = drive[0] - K;
				if( !Dotprod( prod, drive2, drive, dmids, Pfs[i] ) )
				{
					Pfs.erase( Pfs.begin() + i );
					i--;
				}
				else
					fprintf(stderr,"Dotprod failed\n" );
			}
			else
				fprintf(stderr,"Dotprod failed\n" );
			States = tempstates;
		}
	}
	return prevsize;
}

int JER_Slices::Encode(){

	map <string, JER_Schedule* >::iterator it_scheds;

	int i;
	string schedule_key;// '0101' means the 2nd and 4th drives failed
	JER_Schedule *sched;
	int result;

	if(G == NULL){
		//no valid generator	
		return -1;
	}

	if(G->M == NULL){
		//no generator matrix
		return -1;
	}

	if(States.size()!=N){
		//the number of states does not match the number of drives
		return -1;	
	}

	if( Recover_Partial_Failures() )
		return -1;

	if(G->M->W == 1){
		//bitmatrix encode

		//see if there is an encode schedule
		schedule_key = "";
		for(i=0;i<K;i++){
			//all data drives up
			schedule_key += "0";
		}
		for(i=K;i<N;i++){
			//all coding drives down
			schedule_key += "1";
		}

		it_scheds = G->Schedules.find(schedule_key);
		if(it_scheds != G->Schedules.end()){
			//we have a schedule to encode with
			sched = it_scheds->second;

			result = encode_schedule(sched); 
		}else{
			//encode the bitmatrix without a schedule
			result = encode_matrix();
		}

	}else{
		if(N-K == 2 && G->rs_r6 == true){
			//reed sol r6 encode using Anvin's optimization
			//m = 2 and rs_r6 = true
			result = encode_rs_r6();
		}else{
			//matrix encode
			result = encode_matrix();
		}
	}

	for(i=K;i<N;i++){
		//if result is -1, encode failed, set all devices to down (1)
		//if result is 0 ,successful encode, set all coding devices to up (0)
		States[i] = result*-1;
	}

	return result;

}

int JER_Slices::encode_rs_r6(){

	int i, j, k;
	int pstarted;
	int sindex;
	unsigned char *dptr, *pptr, *bdptr, *bpptr;
	int size;

	/* First, put the XOR into coding region 0 of all slices */
	size = PacketSize * PacketsPerSlice;
	bdptr = Ptrs[0];
	bpptr = Ptrs[K];
	memcpy(bpptr, bdptr, size);

	for (i = 1; i < K; i++) galois_region_xor(bpptr, Ptrs[i], bpptr, size);
	XORs += size;

	/* Next, put the sum of (2^j)*Dj into coding region 1 */
	bpptr = Ptrs[K+1];
	for (sindex = 0; sindex < size; sindex+=PacketSize) {
		pptr = bpptr + sindex;
		dptr = Ptrs[K-1] + sindex;
		memcpy(pptr, dptr, PacketSize);

		for (i = K-2; i >= 0; i--) {
			dptr = Ptrs[i] + sindex;
			switch(G->M->W) {
				case 8 : galois_w08_region_multby_2(pptr, PacketSize); break;
				case 16: galois_w16_region_multby_2(pptr, PacketSize); break;
				case 32: galois_w32_region_multby_2(pptr, PacketSize); break;
				default: return -1;
			}
			GF_Mults+=PacketSize;

			galois_region_xor(pptr, dptr, pptr, PacketSize);
			XORs+=PacketSize;
		}

	}
	return 0;
}

void JER_Slices::do_scheduled_operations(vector <unsigned char *> &ptrs, JER_Schedule *sched){
	unsigned char *sptr;
	unsigned char *dptr;
	int op;
	int n0,n1,n2,n3;

	for (op = 0; op< sched->Elts.size(); op++) {
		n0 = (sched->Elts)[op][0];
		n1 = (sched->Elts)[op][1];
		n2 = (sched->Elts)[op][2];
		n3 = (sched->Elts)[op][3];

		sptr = ptrs[n0] + n1*PacketSize;
		dptr = ptrs[n2] + n3*PacketSize;
		if ((sched->Elts)[op][4]) {
			galois_region_xor(sptr, dptr, dptr, PacketSize);
			XORs += PacketSize;
		} else {
			memcpy(dptr, sptr, PacketSize);
			Memcpys += PacketSize;
		}
	}
}
int JER_Slices::encode_schedule(JER_Schedule *sched){

	vector <unsigned char*> ptr_copy;
	int i, j, tdone;
	int size;
	JER_Matrix *jbm;
	int wpd;

	if(sched == NULL){
		return -1;
	}

	if(G==NULL){
		return -1;
	}

	wpd = G->WPD;

	ptr_copy.resize(N);
	size = PacketSize * PacketsPerSlice;  

	for (i = 0; i < N; i++) ptr_copy[i] = Ptrs[i];
	for (tdone = 0; tdone < size; tdone += PacketSize*wpd) {
		do_scheduled_operations(ptr_copy, sched);
		for (i = 0; i < N; i++) ptr_copy[i] += (PacketSize*wpd);
	}

	return 0;
}
int JER_Slices::encode_matrix(){

	int i, w, m;
	int r;
	JER_Matrix * jm;
	vector <int> jm_super_row_ids;
	vector <int> dest_disk_ids;

	//all other error checking is in Encode() or Dotprod()
	jm = G->M;
	if(jm == NULL){
		return -1;
	}

	m = N-K;

	//call Dotprod and encode all of the coding drives
	for(i = 0;i<m;i++){
		jm_super_row_ids.push_back(i);
		dest_disk_ids.push_back(K+i);
	}
	r = Dotprod(jm,jm_super_row_ids,dest_disk_ids,NULL);
	
	return r;

}

//Each thread calls dot product on different areas of the matrix depending upon the method used.
void *JER_Slices::dotprod_thread(void *v){

	int jm_super_row_id,jm_row_id;
	int dest_disk_id, dest_row_id, dest_packet_id;

	int k,i;
	int m,wpd,size, pps, ps;
	JER_Gen_T *gen;
	int num_disks, num_packet_rows, num_packet_cols, num_packets;

	//arguments passed in:
	pt_dotprod_args *pta;
	JER_Slices *This;
	JER_Region *region;
	int t_count;
	int t_id;
	string *method;

	JER_Matrix *jm;
	int *dm_ids;

	//get arguments
	pta = (pt_dotprod_args*)v;
	This = pta->slices;
	region = pta->region;
	t_id = pta->id;
	t_count = pta->thread_count;
	method = pta->method;
	jm = pta->jm;
	vector <int> & jm_super_row_ids = *pta->jm_super_row_ids;
	vector <int> & dest_disk_ids = *pta->dest_disk_ids;
	dm_ids = pta->dm_ids;

	//get info about disk/packet sizes so that we can split up the work among threads
	gen = This->G;
	k = This->K;
	m = This->N - k;
	wpd = gen->WPD;
	pps = This->PacketsPerSlice;
	ps = This->PacketSize;

	num_disks = jm_super_row_ids.size();
	num_packet_rows = num_disks*wpd; 
	size = ps*pps;
	num_packet_cols = size/(ps*wpd);
	num_packets = pps*num_disks;

	//t_count = total number of threads doing this encoding
	//t_id = id of this thread (0 to t_count-1)
	if(*method == "disks"){
		for (i = t_id; i < num_disks; i+=t_count) {
			This->dotprod_disk(jm, jm_super_row_ids[i], dest_disk_ids[i],dm_ids, region);
		}
	}else if(*method == "packet_rows"){
		for (i = t_id; i < num_packet_rows; i+=t_count) {
			//translate i into a packet row id
			//jm_super_row_ids contains id's of disks
			//each disk contains wpd packet rows
			jm_super_row_id = jm_super_row_ids[i/wpd];
			dest_disk_id = dest_disk_ids[i/wpd];

			jm_row_id = jm_super_row_id * wpd;
			jm_row_id += i%wpd;

			dest_row_id = dest_disk_id * wpd;
			dest_row_id += i%wpd;

			This->dotprod_row(jm,jm_row_id,dest_row_id,dm_ids, region);
		}
	}else if(*method == "packet_cols"){
		for (i = t_id; i < num_packet_cols; i+=t_count) {
			//Unlike the others, this encodes the same column in all of the disks. Therefore, we just pass all of the disk ids at once.
			This->dotprod_col(jm,jm_super_row_ids,dest_disk_ids,i,dm_ids, region); 
		}
	}else if(*method == "packets"){
		for (i = t_id; i < num_packets; i+=t_count) {
			
			//translate i into a packet id
			
			//get drive that the packet is in
			jm_super_row_id = jm_super_row_ids[i/pps];
			dest_disk_id = dest_disk_ids[i/pps];
			
			//get first packet id in the drive
			dest_packet_id=dest_disk_id*pps;
			//add the remaining
			dest_packet_id=dest_packet_id+i%pps;

			//convert the super-row to the actual row
			jm_row_id = (jm_super_row_id*wpd) + dest_packet_id%wpd;

			This->dotprod_packet(jm,jm_row_id,dest_packet_id,dm_ids, region);
		}
	}

	return NULL; 

}

int JER_Slices::Dotprod(JER_Matrix *jm, vector <int> & jm_super_row_ids, vector <int> & dest_disk_ids, int *dm_ids, JER_Region *region){

	int number_of_cores;
	int wpd;
	int w;
	string thread_method;
	unsigned long size;
	int num_disks, num_packet_rows, num_packet_cols, num_packets;
	int threads_to_make;
	int i;

	//thread variables:
	pthread_t *pt;
	vector <pthread_t *> pts;
	pt_dotprod_args * pta; 
	vector <pt_dotprod_args *> ptas;

	//first, do error checking

	if(G == NULL){
		return -1;
	}
	wpd = G->WPD;

	if(jm == NULL){
		return -1;
	}
	w = jm->W;
	size = PacketSize*PacketsPerSlice;

	if(wpd == 1){
		//matrix
		if (w != 1 && w != 8 && w != 16 && w != 32) {
			fprintf(stderr,"ERROR Dotprod() matrix: w (%d) not 1, 8, 16, or 32\n",w);
			return -1;
		}
		if(size%8 != 0){
			fprintf(stderr,"ERROR Dotprod() matrix: size (%lu) %% 8 != 0\n",size);
			return -1;
		}
	}else{
		//bitmatrix
		size = PacketSize*PacketsPerSlice;

		if (PacketSize%8 != 0) {
			fprintf(stderr, "ERROR Dotprod() bitmatrix: PacketSize(%d) %% 8 != 0\n", PacketSize);
			return -1;
		}
		if (size%(PacketSize*w) != 0) {
			fprintf(stderr, "ERROR Dotprod() bitmatrix: size(%lu) %% (PacketSize(%d)*w(%d))) != 0\n",size, PacketSize, w);
			return -1;
		}
	}

	num_disks = jm_super_row_ids.size();
	if(num_disks != dest_disk_ids.size()){
		return -1;
	}

	number_of_cores = NumberOfCores;
	if(number_of_cores <= 1){

		//not multi-threaded, just multi-thread disk by disk
		for (i = 0; i < num_disks; i++) {
			dotprod_disk(jm, jm_super_row_ids[i], dest_disk_ids[i],dm_ids, region);
		}

		return 0;
	}else{
		//multi-threaded

		thread_method = MultiThreadMethod;

		if(w==1){
			/*
			bitmatrix

			Valid multi-threading methods:
			disks,packet_rows,packet_cols,packets
			*/
			
			if(thread_method != "disks" && thread_method != "packet_rows" && thread_method != "packet_cols" && thread_method != "packets"){

				/*
					 If the threading method is anything else, we 
					 Try to choose the best threading method for the user
					 Choose the encoding method supporting at least 'number_of_cores' threads.
					 If multiple methods support this many threads, use the method supporting the smaller number.

Example: with m=2, NumbeOfCores=2, PacketsPerSlice = 16
Encoding individual packets supports 16 threads.
Encoding disks supports 2 threads.
We will have each thread encode one disk, as opposed to each thread encoding 8 packets.
				 */
				if(num_disks >= number_of_cores){
					//each thread can work on its own coding device
					thread_method = "disks";
				}else if(num_packet_cols >= number_of_cores){
					//One thread encodes the first wpd packets of all disks, another thread encodes the next wpd packets of all disks, etc...
					thread_method = "packet_cols";
				}else if(num_packet_rows >= number_of_cores){
					//If a coding disk is viewed as a super-row, each thread makes one of the individual rows in the super-row.
					thread_method = "packet_rows";
				}else{
					//A thread creates a single packet for the coding drives.
					if(num_packets < number_of_cores){
						//The number of packets to make is less than the number of cores the PC has. Not all of the cores will be utilized.
						threads_to_make = num_packets;
					}
					thread_method = "packets";
				}
			}

		}else{
			/*
			matrix

			Valid multi-thread methods:
			disks
			*/

			if(thread_method != "disks"){
				thread_method = "disks";
			}

		}

		//assume we can make a thread for each core
		threads_to_make = number_of_cores; 

		num_packet_rows = num_disks*wpd; 
		num_packet_cols = size/(PacketSize*wpd);
		num_packets = PacketsPerSlice*num_disks;

		//let the user pick the multi-threading method
		if(thread_method == "disks"){
			//threads create entire coding drives
			if(num_disks < number_of_cores){
				threads_to_make = num_disks; 
			}
		}else if(thread_method == "packet_rows"){
			//If a coding disk is viewed as a super-row, each thread makes one of the individual rows in the super-row.
			if(num_packet_rows < number_of_cores){
				threads_to_make = num_packet_rows;
			}
		}else if(thread_method == "packet_cols"){
			//One thread encodes the first wpd packets of all disks, another thread encodes the next wpd packets of all disks, etc...
			if(num_packet_cols < number_of_cores){
				threads_to_make = num_packet_cols;
			}
		}else if(thread_method == "packets"){
			//A thread creates a single packet for the coding drives.
			if(num_packets < number_of_cores){
				//The number of packets to make is less than the number of cores the PC has. Not all of the cores will be utilized.
				threads_to_make = num_packets;
			}
		}

		//create the threads
		for (i = 0; i < threads_to_make; i++) {

			pt = (pthread_t *)malloc(sizeof(pthread_t));

			//create args
			pta = (pt_dotprod_args *)malloc(sizeof(pt_dotprod_args));
			pta->slices = this;
			pta->region = region;
			pta->id = i;
			pta->thread_count = threads_to_make;
			pta->method = &thread_method;
			pta->jm_super_row_ids = &jm_super_row_ids;
			pta->dest_disk_ids = &dest_disk_ids;
			pta->dm_ids = dm_ids;
			pta->jm = jm;

			pts.push_back(pt);
			ptas.push_back(pta);

			if(pthread_create(pt,NULL,&JER_Slices::dotprod_thread,(void *)pta) != 0){
				perror("Dotprod, pthread_create");
				exit(1);
			}
		}

		//wait for all threads to complete
		for (i = 0; i < threads_to_make; i++) {
			if(pthread_join(*pts[i],NULL) != 0){
				perror("Dotprod, pthread_join");
				exit(1);
			}
		}

		//free all the arguments and pthreads
		for(i=0;i<threads_to_make;i++){
			free(ptas[i]); //args
			free(pts[i]); //threads
		}
	}

	return 0;

}

void JER_Slices::dotprod_disk(JER_Matrix *jm, int jm_super_row_id, 
				int dest_disk_id, int *dm_ids, JER_Region *region ){

	unsigned char *dptr, *sptr;
	int size, w,wpd;
	char first,add;
	int i, j, sindex;
	int begin, end;

	size = PacketSize * PacketsPerSlice;
	wpd = G->WPD;
	w = jm->W;
	uint64_t val;

	//The following variables are used to Get elements
	//this is faster than calling JER_Matrix::Get()
	uint64_t *cur_elt;
	uint64_t new_elt_mask; //mask for reading a number from a new elt, all but the last bits are 0. ex for w=1: 00000.....00001
	uint64_t cur_mask; //this is a left shifted version of new_elt_mask, to get a different number from cur_elt
	char bits_per_num; //how many bits are in each number (sometimes > w)
	char num_per_elt; //how many numbers are in each elt
	char shift;
	int C; //number of columns in the generator matrix
	int r,c;

	//setup variables needed to Get elements from the padded Elts vector
	switch(w){
		case 1:
			bits_per_num = 1;
			break;
		case 2:
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
		case 8:
			bits_per_num = 8;
			break;
		case 9:
		case 10:
		case 11:
		case 12:
		case 13:
		case 14:
		case 15:
		case 16:
			bits_per_num = 16;
			break;
		case 17:
		case 18:
		case 19:
		case 20:
		case 21:
		case 22:
		case 23:
		case 24:
		case 25:
		case 26:
		case 27:
		case 28:
		case 29:
		case 30:
		case 31:
		case 32:
			bits_per_num = 32;
			break;
	}
	new_elt_mask = ((uint64_t)1<<bits_per_num)-1;
	num_per_elt = 64/bits_per_num;
	C = jm->C;

	dptr =  Ptrs[dest_disk_id];
	if(w == 1){

		//bitmatrix

		//create each packet column in the destination disk
		for (sindex = 0; sindex < size; sindex += (PacketSize*wpd)) {
			//create each packet row in the destination disk
			for( i = 0; i < wpd; i++ )
			{
				if( region == NULL || ( sindex + i * PacketSize <= 
										region->start + region->size &&
										sindex + PacketSize * (i+1) >= 
										region->start ) )
				{
					if( region != NULL && sindex + i * PacketSize < region->start )
						begin = region->start;
					else
						begin = sindex + i * PacketSize;
					if( region != NULL && sindex + PacketSize * (i+1) > 
										  region->start + region->size )
						end = region->start + region->size - begin;
					else
						end = sindex + PacketSize * (i + 1) - begin;
					first = 1;

					//setup the get variables to get the first number
					r = (jm_super_row_id*wpd+i);
					cur_elt = &(jm->Elts[(r*C)/64]);
					cur_mask = new_elt_mask << (r*C%64);

					//create the packet row/col by multiplying K*wpd elements
					for( j = 0; j < K * wpd; j++ ){
						/*
						Previously, we just called Get
						if( jm->Get(jm_super_row_id*wpd+i, j)==1 )
						*/
						if((*cur_elt)&cur_mask){
							if( first ){
								if( dm_ids == NULL )
									memcpy( dptr + begin,
											Ptrs[j/wpd]+
											((j%wpd)-i)*PacketSize + begin, end );
								else
									memcpy( dptr + i*PacketSize + sindex,
											Ptrs[dm_ids[j/wpd]]+
											((j%wpd)-i)*PacketSize + begin, end );
								Memcpys += end;
								first = 0;
							}
							else
							{
								if( dm_ids == NULL )
									galois_region_xor( dptr + i*PacketSize + sindex,
											Ptrs[j/wpd]+
											((j%wpd)-i)*PacketSize + begin,
											dptr + i*PacketSize + sindex,
											end );
								else
									galois_region_xor( dptr + i*PacketSize + sindex,
											Ptrs[dm_ids[j/wpd]]+
											((j%wpd)-i)*PacketSize + begin,
											dptr + i*PacketSize + sindex,
											end );
								XORs += end;
							}
						}
						//setup to get the next number
						cur_mask <<= 1;
						if(cur_mask==0){
							cur_mask = new_elt_mask;
							cur_elt++;
						}
					}
				}
			}
		}

	}else{

		//matrix
		first = 0;

		// First copy or xor any data that does not need to be multiplied by a factor 
		for (i = 0; i < K; i++) {

			//for each column in our generator matrix row
			r = jm_super_row_id;
			c= i;
			cur_elt = &(jm->Elts[(r*C+c)/num_per_elt]);
			shift = bits_per_num*((r*C+c)%num_per_elt);
			cur_mask = new_elt_mask << shift;
			val = ((*cur_elt)&cur_mask) >> shift;

			if(val==1){
				//if (jm->Get(jm_super_row_id,i) == 1) 
				//the element in column i of the generator matrix is 1
				if( dm_ids == NULL ){
					sptr = Ptrs[i]; //get the row of our original data
				}
				else{
					sptr = Ptrs[dm_ids[i]];
				}
				if (first == 0) {
					if( region != NULL )
					{
						memcpy(dptr + region->start, sptr + region->start, region->size);
						Memcpys += region->size;
					} else {
						memcpy(dptr, sptr, size);
						Memcpys += size;
					}
					first = 1;
				} else {
					if( region != NULL )
					{
						galois_region_xor(sptr + region->start, dptr + region->start, 
								dptr + region->start, region->size);
						XORs += region->size;
					} else {
						galois_region_xor(sptr, dptr, dptr, size);
						XORs += size;
					}
				}
			}else if(val != 0){
				if (dm_ids == NULL)
					sptr = Ptrs[i];
				else
					sptr = Ptrs[dm_ids[i]];
				if( region != NULL ){
					switch (w) {
						case 8:  galois_w08_region_multiply(sptr + region->start, 
										 val, region->size, dptr + region->start, first);
								 break;
						case 16: galois_w16_region_multiply(sptr + region->start,
										 val, region->size, dptr + region->start, first);
								 break;
						case 32: galois_w32_region_multiply(sptr + region->start,
										 val, region->size, dptr + region->start, first);
								 break;
					}
					GF_Mults += region->size;
				} else {
					switch (w) {
						case 8:  galois_w08_region_multiply(sptr, val,
										 size, dptr, first);
								 break;
						case 16: galois_w16_region_multiply(sptr, val,
										 size, dptr, first);
								 break;
						case 32: galois_w32_region_multiply(sptr, val,
										 size, dptr, first);
								 break;
					}
					GF_Mults += size;
				}
				first = 1;

			}

			//setup to get the next number
			cur_mask <<= bits_per_num;
			shift += bits_per_num;
			if(cur_mask==0){
				shift = 0;
				cur_mask = new_elt_mask;
				cur_elt++;
			}

		}
	}
}

void JER_Slices::dotprod_row(JER_Matrix *jm, int jm_row_id, int dest_row_id, int *dm_ids, JER_Region *region){

	//For bitmatrices, each disk contains wpd smaller rows. 
	//This creates one of the wpd smaller rows.
	unsigned char *dest_disk_ptr, *src_ptr;
	int size, w,wpd;
	char first;
	int sindex;
	int jm_col_id;
	int dest_disk;
	int begin, end;

	//The following variables are used to Get elements
	//this is faster than calling JER_Matrix::Get()
	uint64_t *cur_elt;
	uint64_t new_elt_mask;
	uint64_t cur_mask; //this is a left shifted version of new_elt_mask, to get a different number from cur_elt
	int C; //number of columns in the generator matrix
	int r;

	new_elt_mask = 1;
	C = jm->C;
	size = PacketSize * PacketsPerSlice;
	wpd = G->WPD;

	dest_disk = dest_row_id/wpd;
	dest_disk_ptr = Ptrs[dest_disk];

	//for each packet column in the destination row
	for (sindex = 0; sindex < size; sindex += (PacketSize*wpd)) {
		first = 1;
		if( region == NULL || ( region->start < sindex + PacketSize*((dest_row_id%wpd) + 1) &&
					region->start + region->size > sindex + PacketSize*(dest_row_id%wpd) ) )
		{
			if( region != NULL && region->start > sindex + PacketSize*(dest_row_id%wpd) )
				begin = region->start;
			else
				begin = sindex + PacketSize*(dest_row_id%wpd);
			if( region != NULL && region->start + region->size < sindex + PacketSize*((dest_row_id%wpd)+1) )
				end = region->start + region->size - begin;
			else
				end = sindex + PacketSize*((dest_row_id%wpd)+1) - begin;

			r = jm_row_id;
			cur_elt = &(jm->Elts[(r*C)/64]);
			cur_mask = new_elt_mask << (r*C%64);

			//for each number in the source row
			for( jm_col_id = 0; jm_col_id < K * wpd; jm_col_id++ ){

				if(((*cur_elt)&cur_mask) != 0){
				//if( jm->Get(jm_row_id, jm_col_id)==1 )
					if( first ){
						if(dm_ids == NULL){
							//adding sindex offsets a number of packet columns
							memcpy( dest_disk_ptr + begin,
									Ptrs[jm_col_id/wpd]+
									((jm_col_id%wpd) - (dest_row_id%wpd))*PacketSize + begin,
									end );
						}else{
							memcpy( dest_disk_ptr + begin,
									Ptrs[dm_ids[jm_col_id/wpd]]+
									((jm_col_id%wpd) - (dest_row_id%wpd))*PacketSize + begin,
									end );
						}
						Memcpys += end;
						first = 0;
					}else{
						if(dm_ids == NULL){
							galois_region_xor( dest_disk_ptr + begin,
									Ptrs[jm_col_id/wpd]+((jm_col_id%wpd) - (dest_row_id%wpd))*PacketSize + begin,
									dest_disk_ptr + PacketSize*((jm_row_id%wpd) - (dest_row_id%wpd)) + begin,
									end );
						}else{
							galois_region_xor( dest_disk_ptr + begin,
									Ptrs[dm_ids[jm_col_id/wpd]]+((jm_col_id%wpd) - (dest_row_id%wpd))*PacketSize + begin,
									dest_disk_ptr + PacketSize*((jm_row_id%wpd) - (dest_row_id%wpd)) + begin,
									end );
						}
						XORs += end;
					}
				}
				//setup to get the next number
				cur_mask <<= 1;
				if(cur_mask==0){
					cur_mask = new_elt_mask;
					cur_elt++;
				}
			}
		}
	}

}

//col is a number 0 to size/(PacketSize*WPD)
//Each drive can be viewed as having size/(PacketSize*WPD) columns of data
//This will encode the same column of every coding device.
void JER_Slices::dotprod_col(JER_Matrix *jm, vector <int> & jm_super_row_ids, vector <int> & dest_disk_ids, int dest_col_id, int *dm_ids, JER_Region *region){

	int d;
	int jm_row_id,jm_col_id;
	int m,wpd;
	int dest_drive,src_drive;
	unsigned char *dest_drive_ptr, *src_drive_ptr;
	char first;
	int gen_row;
	int begin, end;

	//The following variables are used to Get elements
	//this is faster than calling JER_Matrix::Get()
	uint64_t *cur_elt;
	uint64_t new_elt_mask;
	uint64_t cur_mask; //this is a left shifted version of new_elt_mask, to get a different number from cur_elt
	int C; //number of columns in the generator matrix
	int r;

	new_elt_mask = 1;
	C = jm->C;

	wpd = G->WPD;
	m = N-K;

	//for all the disks we want to dotprod
	for(d=0;d<dest_disk_ids.size();d++){
		dest_drive = dest_disk_ids[d];
		dest_drive_ptr = Ptrs[dest_drive];

		first = 1;

		gen_row = jm_super_row_ids[d]*wpd;

		//for all rows in this generator super-row
		for(jm_row_id = gen_row;jm_row_id<gen_row+wpd;jm_row_id++){

			//for all elements in this generator row
			r = jm_row_id;
			cur_elt = &(jm->Elts[(r*C)/64]);
			cur_mask = new_elt_mask << (r*C%64);

			for(jm_col_id=0;jm_col_id<K*wpd;jm_col_id++){

				if( region == NULL || ( region->start < PacketSize*wpd*dest_col_id + ((jm_row_id%wpd)+1)*PacketSize &&
							region->start + region->size > PacketSize*wpd*dest_col_id + (jm_row_id%wpd)*PacketSize ) ){

					if( region != NULL && region->start > PacketSize*wpd*dest_col_id + (jm_row_id%wpd)*PacketSize )
						begin = region->start;
					else
						begin = PacketSize*wpd*dest_col_id + (jm_row_id%wpd)*PacketSize;
					if( region != NULL && region->start + region->size < PacketSize*wpd*dest_col_id + ((jm_row_id%wpd)+1)*PacketSize )
						end = region->start + region->size - begin;
					else
						end = PacketSize*wpd*dest_col_id + ((jm_row_id%wpd)+1)*PacketSize - begin;
					src_drive = jm_col_id/wpd;
					if(dm_ids == NULL){
						src_drive_ptr = Ptrs[src_drive];
					}else{
						src_drive_ptr = Ptrs[dm_ids[src_drive]];
					}

					if(((*cur_elt)&cur_mask) != 0){
					//if(jm->Get(jm_row_id,jm_col_id)==1)
						if(first){
							memcpy( dest_drive_ptr + begin,
									src_drive_ptr + begin + ((jm_col_id%wpd) - (jm_row_id%wpd))*PacketSize,
									end);
							Memcpys += end;
							first = 0;
						}else{
							galois_region_xor( dest_drive_ptr + begin,
									src_drive_ptr + begin + ((jm_col_id%wpd) - (jm_row_id%wpd))*PacketSize,
									dest_drive_ptr + begin,
									end );
							XORs += end;
						}
					}
					//setup to get the next number
					cur_mask <<= 1;
					if(cur_mask==0){
						cur_mask = new_elt_mask;
						cur_elt++;
					}
				}
			}
		}
	}
}

/*
Only generates one packet
The following shows how packets are numbered:
Coding drive 0 
				P0 P3 P6
				P1 P4 P7
				P2 P5 P8
Coding drive 1
				P9 P12 P15
			   P10 P13 P16
			   P11 P14 P17
*/
void JER_Slices::dotprod_packet(JER_Matrix *jm, int jm_row_id, int dest_packet_id, int *dm_ids, JER_Region *region){

	char first;
	int wpd;
	int jm_col_id;
	int src_disk_id;
	unsigned char *src_disk_ptr;
	int dest_disk_id;
	unsigned char *dest_packet_ptr;
	int dest_sub_row, dest_sub_col;
	int begin, end;
	//The following variables are used to Get elements
	//this is faster than calling JER_Matrix::Get()
	uint64_t *cur_elt;
	uint64_t new_elt_mask;
	uint64_t cur_mask; //this is a left shifted version of new_elt_mask, to get a different number from cur_elt
	int C; //number of columns in the generator matrix
	int r;

	new_elt_mask = 1;
	C = jm->C;

	wpd = G->WPD;

	//set a pointer to the location where we will write a packet of data
	dest_disk_id = dest_packet_id/PacketsPerSlice;
	dest_sub_row = dest_packet_id%wpd;
	dest_sub_col = (dest_packet_id-(PacketsPerSlice*dest_disk_id))/wpd; 
	dest_packet_ptr = Ptrs[dest_disk_id];	
	dest_packet_ptr += dest_sub_row*PacketSize;
	dest_packet_ptr += dest_sub_col*PacketSize*wpd;

	if( region == NULL || ( region->start < dest_packet_ptr - Ptrs[dest_disk_id] + PacketSize && 
				region->start + region->size > dest_packet_ptr - Ptrs[dest_disk_id] ) )
	{
		if( region != NULL && region->start > dest_packet_ptr - Ptrs[dest_disk_id] )
			begin = region->start;
		else
			begin = dest_packet_ptr - Ptrs[dest_disk_id];
		if( region != NULL && region->start + region->size < 
				dest_packet_ptr - Ptrs[dest_disk_id] + PacketSize )
			end = region->start + region->size - begin;
		else
			end = dest_packet_ptr - Ptrs[dest_disk_id] + PacketSize - begin;
		//loop through all elements in the jm_row of the generator matrix
		first = 1;

		//for all elements in this generator row
		r = jm_row_id;
		cur_elt = &(jm->Elts[(r*C)/64]);
		cur_mask = new_elt_mask << (r*C%64);

		for(jm_col_id = 0; jm_col_id<K*wpd;jm_col_id++){

			if(((*cur_elt)&cur_mask) != 0){
			//if(jm->Get(jm_row_id,jm_col_id)==1)
				src_disk_id = jm_col_id/wpd;
				if(dm_ids == NULL){
					src_disk_ptr = Ptrs[src_disk_id];
				}else{
					src_disk_ptr = Ptrs[dm_ids[src_disk_id]];
				}
				if(first){
					memcpy( Ptrs[dest_disk_id] + begin,
							src_disk_ptr + begin + ((jm_col_id%wpd) - dest_sub_row)*PacketSize,
							end);
					Memcpys += end;
					first = 0;
				}else{
					galois_region_xor( Ptrs[dest_disk_id] + begin,
							src_disk_ptr + begin + ((jm_col_id%wpd) - dest_sub_row)*PacketSize,
							Ptrs[dest_disk_id] + begin, end );
					XORs += end;
				}
			}
			//setup to get the next number
			cur_mask <<= 1;
			if(cur_mask==0){
				cur_mask = new_elt_mask;
				cur_elt++;
			}
		}		
	}

}

void JER_Slices::Do_Parity( unsigned char *parity_ptr )
{
	int i;
	int size;

	size = PacketSize*PacketsPerSlice;

	memcpy(parity_ptr, Ptrs[0], size);
	Memcpys += size;

	for (i = 1; i < K; i++) {
		galois_region_xor(Ptrs[i], parity_ptr, parity_ptr, size);
		XORs += size;
	}
}

/********* Decode functions ************/
int JER_Slices::decode_schedule(JER_Schedule * sched){

	vector <unsigned char *>*ptrs;
	int size;
	int wpd;
	int k,m;
	int tdone,i;

	if(G == NULL){
		return -1;
	}

	wpd = G->WPD;

	k = K;
	m = N-K;
	size = PacketSize*PacketsPerSlice;

	ptrs = set_up_ptrs_for_scheduled_decoding();
	if (ptrs == NULL) return -1;

	for (tdone = 0; tdone < size; tdone += PacketSize*wpd) {
		do_scheduled_operations(*ptrs, sched);
		for (i = 0; i < k+m; i++) (*ptrs)[i] += (PacketSize*wpd);
	}

	delete(ptrs);

	return 0;

}

vector <unsigned char *> *JER_Slices::set_up_ptrs_for_scheduled_decoding(){
	int ddf, cdf;
	vector <int> erased;
	vector <unsigned char *> *ptrs;
	int i, j, x;

	int k = K;
	int m = N-K;

	/* Set up ptrs.  It will be as follows:

		 - If data drive i has not failed, then ptrs[i] = data_ptrs[i].
		 - If data drive i has failed, then ptrs[i] = coding_ptrs[j], where j is the 
		 lowest unused non-failed coding drive.
		 - Elements k to k+ddf-1 are data_ptrs[] of the failed data drives.
		 - Elements k+ddf to k+ddf+cdf-1 are coding_ptrs[] of the failed data drives.

		 The array row_ids contains the ids of ptrs.
		 The array ind_to_row_ids contains the row_id of drive i.

		 However, we're going to set row_ids and ind_to_row in a different procedure.
	 */

	ptrs = new vector <unsigned char *>;
	try{ ptrs->resize(k+m);}
	catch(...){
		delete(ptrs);
		return NULL;
	}

	j = k;
	x = k;
	for (i = 0; i < k; i++) {
		if (States[i] == 0) {
			(*ptrs)[i] = Ptrs[i];
		} else {
			while (States[j]) j++;
			(*ptrs)[i] = Ptrs[j];
			j++;
			(*ptrs)[x] = Ptrs[i];
			x++;
		}
	}
	for (i = k; i < k+m; i++) {
		if (States[i]) {
			(*ptrs)[x] = Ptrs[i];
			x++;
		}
	}
	return ptrs;
}

// Not responsible if m, k, and w in slices and jm don't match.
//decodes both bitmatrices and matrices
int JER_Slices::decode_matrix(){

	int row_k_ones;
	int i, j; 
	JER_Matrix *decoding_matrix, *jm;
	int m, w, wpd;
	int dm_ids[K];
	int ddf, lastdrive;
	vector <int> jm_super_row_ids;
	vector <int> dest_disk_ids;

	if(G == NULL){
		return -1;
	}

	row_k_ones = G->PDrive;
	wpd = G->WPD;
	jm=G->M;

	if(jm == NULL){
		return -1;
	}

	w = jm->W;
	m = N-K;

	if(w != 1){
		//non-bitmatrix
		/* Only works for 8,16,32. Return with error otherwise. */
		if (w != 8 && w != 16 && w != 32) return -1;
	}

	// Find the number of data drives failed
	lastdrive = K;

	ddf = 0;
	for (i = 0; i < K; i++) {
		if (States[i]==1) {
			ddf++;
			lastdrive = i;
		}
	}

	/*
	You only need to create the decoding matrix in the following cases:

	1. ddf > 0 and row_k_ones is false.
	2. ddf > 0 and row_k_ones is true and coding device 0 has been erased.
	3. ddf > 1

	We're going to use lastdrive to denote when to stop decoding data.
	At this point in the code, it is equal to the last erased data device.
	However, if we can't use the parity row to decode it (i.e. row_k_ones=0
	or States[k] = 1, we're going to set it to k so that the decoding 
	pass will decode all data.

	If the first coding drive is not all ones or if it was erased, lastdrive equals k and we have to decode all drives with a decoding matrix.
	*/
	if (row_k_ones != 1 || States[K] == 1) lastdrive = K;

	decoding_matrix = NULL;

	//If we have to make a decoding matrix, allocate the data first and call the make_decoding_matrix method.
	if (ddf > 1 || (ddf > 0 && (row_k_ones != 1 || States[K] == 1))) {

		decoding_matrix = Make_Decoding_Matrix(dm_ids);
		if ( decoding_matrix == NULL) {
			return -1;
		}
	}

	/*
	Decode the data drives.  
	If row_k_ones is true and coding device 0 is intact, then only decode ddf-1 drives.
	This is done by stopping at lastdrive.
	We test whether ddf > 0 so that we can exit the loop early if we're done.
	*/
	for (i = 0; ddf > 0 && i < lastdrive; i++) {
		if (States[i] == 1) {
			jm_super_row_ids.push_back(i);
			dest_disk_ids.push_back(i);
			ddf--;
		}
	}
	if(jm_super_row_ids.size() > 0){
		Dotprod(decoding_matrix,jm_super_row_ids,dest_disk_ids,dm_ids);
	}
	jm_super_row_ids.clear();
	dest_disk_ids.clear();

	//Then if necessary, decode drive lastdrive
	if (ddf > 0) {
		for (i = 0; i < K; i++) {
			//first K-1 data drives survived (skipping lastdrive)
			//also, drive K survived (row_k_ones)
			dm_ids[i] = (i < lastdrive) ? i : i+1;
		}
		jm_super_row_ids.push_back(0);
		dest_disk_ids.push_back(lastdrive);
		Dotprod(jm, jm_super_row_ids,dest_disk_ids, dm_ids);
		jm_super_row_ids.clear();
		dest_disk_ids.clear();
	}

	//Finally, re-encode any erased coding devices
	for (i = 0; i < m; i++) {
		if (States[K+i]==1) {
			jm_super_row_ids.push_back(i);
			dest_disk_ids.push_back(K+i);
		}
	}
	if(jm_super_row_ids.size() > 0){
		Dotprod(jm, jm_super_row_ids, dest_disk_ids, NULL);
	}

	if (decoding_matrix != NULL) delete decoding_matrix;

	return 0;
}
JER_Matrix * JER_Slices::Make_Decoding_Matrix(int *dm_ids){

	int i, j, l;
	JER_Matrix * jm;
	JER_Matrix *jm_survivors;
	JER_Gen_T * gen;
	int wpd;
	int k, m, w;
	JER_Matrix * jm_decoding;

	//The following variables are used to Get elements
	//this is faster than calling JER_Matrix::Get()
	uint64_t *elt;
	uint64_t new_elt_mask; //mask for reading a number from a new elt, all but the last bits are 0. ex for w=1: 00000.....00001
	uint64_t mask; //this is a left shifted version of new_elt_mask, to get a different number from cur_elt
	char bits_per_num; //how many bits are in each number (sometimes > w)
	char num_per_elt; //how many numbers are in each elt
	char shift;
	int C; //number of columns in the generator matrix
	int r,c;
				
	gen = G;
	if(gen == NULL){
		return NULL;
	}

	jm = G->M;
	if(jm == NULL){
		return NULL;
	}
	w = jm->W;

	wpd = gen->WPD;

	jm_survivors = new JER_Matrix( K*wpd, K*wpd, w );
	if (jm_survivors == NULL) {
		return NULL;
	}
	C = K*wpd;

	//setup variables needed to Get elements from the padded Elts vector
	switch(w){
		case 1:
			bits_per_num = 1;
			break;
		case 2:
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
		case 8:
			bits_per_num = 8;
			break;
		case 9:
		case 10:
		case 11:
		case 12:
		case 13:
		case 14:
		case 15:
		case 16:
			bits_per_num = 16;
			break;
		case 17:
		case 18:
		case 19:
		case 20:
		case 21:
		case 22:
		case 23:
		case 24:
		case 25:
		case 26:
		case 27:
		case 28:
		case 29:
		case 30:
		case 31:
		case 32:
			bits_per_num = 32;
			break;
	}
	new_elt_mask = ((uint64_t)1<<bits_per_num)-1;
	num_per_elt = 64/bits_per_num;
	C = jm->C;

	m = N-K;
	k = K;

	/* Determine first k surviving drives. */
	j = 0;
	for (i = 0; j < k; i++) {
		if (States[i] == 0) {
			dm_ids[j] = i;
			j++;
		}
	}

	/* If the ith drive is a data drive, create the dm_ids[i]th super row of the
		 Identity matrix. Otherwise use the dm_ids[i]-kth super row of the generator
		 matrix. */
	for (i = 0; i < k; i++) {
		if (dm_ids[i] < k) {
			
			//manually Set() using bitshifting (faster than Set())
			for( l = 0; l < wpd; l++ ){
				r = i*wpd+l;
				c = dm_ids[i]*wpd+l; 
				
				elt = &(jm_survivors->Elts[(r*C+c)/num_per_elt]);
				shift = bits_per_num*((r*C+c)%num_per_elt);
				mask = new_elt_mask << shift;

				//first set previous value to 0 
				(*elt) &= (~mask);
				//now set the new value
				(*elt) ^= (((uint64_t)1)<<shift);

				//jm_survivors->Set(i*wpd+l, dm_ids[i]*wpd+l, 1);
			}
		} else {

			/*
			for( j = 0; j < wpd; j++ ){
				//decoding matrix row i = jm matrix row dm_ids[i]-k
			}
			*/
			jm_survivors->Copy_Panel(jm,(dm_ids[i]-k)*wpd,0,i*wpd,0,wpd,jm->C);
		}
	}


	/* Invert the current decoding matrix. */
	jm_decoding = Inverse(jm_survivors);
	delete jm_survivors;

	return jm_decoding;

}

int JER_Slices::Decode(){

	map <string, JER_Schedule * >::iterator it_scheds;
	int i;
	string schedule_key;// '0101' means the 2nd and 4th drives failed
	JER_Schedule *sched;
	int result;

	if(G == NULL){
		return -1;
	}

	if(G->M == NULL){
		return -1;
	}

	if( Recover_Partial_Failures() )
		return -1;

	sched = NULL;
	if(G->M->W == 1){
		//bitmatrix decode, see if we have a schedule

		//first try to decode with a schedule if m = 2
		if(N-K == 2 && G->Schedules.size() != 0){
			//we have a cache with schedules

			//see which drives have failed
			//0 = up, 1 = failed
			schedule_key = "";
			for(i=0;i<N;i++){
				if(States[i] == 2){
					//TODO: handle unusable state
					return -1;
				}
				if(States[i] == 1){
					schedule_key += "1";
				}else{
					schedule_key += "0";
				}
			}

			//try to find a schedule to fix what has failed
			it_scheds = G->Schedules.find(schedule_key);

			if(it_scheds != G->Schedules.end()){
				//we have a schedule and can use it to decode
				sched = it_scheds->second;

			}

		}

	}

	if(sched != NULL){
		result = decode_schedule(sched);
	}else{
		//decode a bitmatrix or matrix without a schedule
		result = decode_matrix();
	}

	//if decode failed, result = -1, don't change the state of the data drives
	//if decode successful, result = 0, set all devices to up (0)
	if(result == 0){
		for(i=0;i<N;i++){
			States[i] = 0;
		}
	}

	return result;

}

int JER_Slices::Decode_Schedule_Lazy(int smart){

	int i;
	JER_Schedule *sched;
	string schedule_key;// '0101' means the 2nd and 4th drives failed
	map <string, JER_Schedule * >::iterator it_scheds;
	int result;
	bool all_good;
	bool created_new_sched;

	if(G == NULL){
		return -1;
	}

	if(G->M == NULL){
		return -1;
	}

	if(G->M->W != 1){
		return -1;
	}

	//see which drives have failed
	//0 = up, 1 = failed
	schedule_key = "";
	all_good = true;
	for(i=0;i<N;i++){
		if(States[i] == 2){
			//TODO, handle unusable
			return -1;
		}
		if(States[i] == 1){
			schedule_key += "1";
			all_good = false;
		}else{
			schedule_key += "0";
		}
	}
	if(all_good){
		//all drives working
		return 0;
	}

	//try to find a schedule to fix what has failed
	it_scheds = G->Schedules.find(schedule_key);

	if(it_scheds != G->Schedules.end()){
		//we have a schedule and can use it to decode
		sched = it_scheds->second;
		created_new_sched = false;
	}else{
		//create a schedule
		sched = G->Create_Single_Decode_Schedule(States, smart);
		created_new_sched = true;
	}

	if(sched == NULL){
		return -1;
	}

	result = decode_schedule(sched);

	//if decode failed, result = -1, don't change the state of the data drives
	//if decode successful, result = 0, set all data devices to up (0)
	if(result == 0){
		for(i=0;i<N;i++){
			States[i] = 0;
		}
	}

	if(created_new_sched){
		delete sched;
	}

	return result;

}

