#include <stdio.h>

typedef unsigned int sector_t;
#define SECPERCHUNK	4
#define MAX_SECPERCHUNK	 4//1024	// for testing diff configurations.
#define MIN_RAID_DISKS	4			// this is the min that a RAID6 can have
#define MIN_DATA_DISKS	MIN_RAID_DISKS-1
#define MAX_RAID_DISKS  6//16 		// change this to configure the number of disks in the RAID.
#define FIRST_LOGICAL_SEC 0
#define MAX_LOGICAL_SEC		100//(1<<16)

#define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
#define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
#define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
#define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */

/* Define non-rotating (raid4) algorithms.  These allow
 * conversion of raid4 to raid5.
 */
#define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
#define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */


/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
 * Firstly, the exact positioning of the parity block is slightly
 * different between the 'LEFT_*' modes of md and the "_N_*" modes
 * of DDF.
 * Secondly, or order of datablocks over which the Q syndrome is computed
 * is different.
 * Consequently we have different layouts for DDF/raid6 than md/raid6.
 * These layouts are from the DDFv1.2 spec.
 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
 * leaves RLQ=3 as 'Vendor Specific'
 */

#define ALGORITHM_ROTATING_ZERO_RESTART	6// 8 /* DDF PRL=6 RLQ=1 */
#define ALGORITHM_ROTATING_N_RESTART	7// 9 /* DDF PRL=6 RLQ=2 */
#define ALGORITHM_ROTATING_N_CONTINUE	8//10 /*DDF PRL=6 RLQ=3 */


/* For every RAID5 algorithm we define a RAID6 algorithm
 * with exactly the same layout for data and parity, and
 * with the Q block always on the last device (N-1).
 * This allows trivial conversion from RAID5 to RAID6
 */
#define ALGORITHM_LEFT_ASYMMETRIC_6	9//16
#define ALGORITHM_RIGHT_ASYMMETRIC_6	10//17
#define ALGORITHM_LEFT_SYMMETRIC_6	11//18
#define ALGORITHM_RIGHT_SYMMETRIC_6	12//19
#define ALGORITHM_PARITY_0_6		13//20
#define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N

#define RAID4		4
#define RAID5		5
#define RAID6		6


# define sector_div(n, b)( \
{ \
	int _res; \
		_res = (n) % (b); \
			(n) /= (b); \
				_res; \
} \
)

struct diskinfo{
	int chunk_size;
	int raid_disks;
	int data_disks;
	int ddisk;
	int pdisk;
	int qdisk;
};

sector_t raid5_log2phy(sector_t r_sector, struct diskinfo *disks, int algo, int raid_level);
sector_t raid5_phy2log(sector_t r_sector, struct diskinfo disks, int algo, int raid_level);


int main () {
	int log_sec = 0;
	int phy_sec = 0;
	int temp = 0;
	struct diskinfo disks;
	int algo;
	int error = 0;
	int chunk_size, raid_disks, data_disks;
	int counter=0;
	int raid_level;
	int max_algo = 0;

	for (raid_level = RAID4; raid_level <= RAID6; raid_level++) {
		max_algo = (raid_level==RAID5) ? ALGORITHM_PARITY_N : ALGORITHM_PARITY_0_6;
		max_algo = (raid_level==4) ? 0 : max_algo;
		for (algo = 0; (algo <= max_algo) && !error; algo++) {
			raid_disks = MIN_RAID_DISKS;
			data_disks = MIN_DATA_DISKS;
			if (raid_level == RAID6)
				data_disks -= 1;
			for (; (raid_disks <= MAX_RAID_DISKS) && !error; raid_disks++, data_disks++) {
				disks.raid_disks = raid_disks;
				disks.data_disks = data_disks;
				for (chunk_size = SECPERCHUNK; (chunk_size <= MAX_SECPERCHUNK) && !error; chunk_size*=2) {
					disks.chunk_size = chunk_size;
					for (log_sec = FIRST_LOGICAL_SEC; (log_sec <= MAX_LOGICAL_SEC) && !error; log_sec ++) {
						counter++;
						phy_sec = raid5_log2phy(log_sec, &disks, algo, raid_level);
						temp = raid5_phy2log(phy_sec, disks, algo, raid_level);
//						printf("log_sec:%d, phy_sec:%d, ddisk:%d, pdisk:%d, qdisk:%d, rev_map:%d, algo:%d, csize:%d ddisks: %d rdisks:%d level:%d\n", log_sec, phy_sec, disks.ddisk, disks.pdisk, disks.qdisk, temp, algo, chunk_size,data_disks, raid_disks, raid_level);
					if (temp != log_sec) {
							error = 1;
							printf("\n*****ERROR: log_sec:%d, phy_sec:%d, ddisk:%d, pdisk:%d, qdisk:%d, rev_map:%d, algo:%d, chunk_size:%d raid_disks:%d raid_level:%d***\n", log_sec, phy_sec, disks.ddisk, disks.pdisk, disks.qdisk, temp, algo, chunk_size, raid_disks, raid_level);
						}
					}
				}
			}
			if (!error)
				printf("Success for algo %d\n", algo);
		}
		printf ("Success for raid level %d\n", raid_level);
	}
	printf("counter: %d\n", counter);
	return 0;
}

sector_t raid5_log2phy(sector_t r_sector,	struct diskinfo *disks, int algo, int raid_level)
{
	int *dd_idx = &disks->ddisk;
	int ddf_layout;
	sector_t stripe, stripe2;
	sector_t chunk_number;
	unsigned int chunk_offset;
	int pd_idx, qd_idx;
	sector_t new_sector;
	int chunk_size = SECPERCHUNK;
	int raid_disks = disks->raid_disks;
	int data_disks = disks->data_disks;

//	printf("log_sec:%d, raid_disks:%d, data_disks:%d chunk_size:%d\n", r_sector, raid_disks, data_disks, disks->chunk_size);

	/*
	 * Compute the chunk number and the sector offset inside the chunk
	 */
	chunk_offset = sector_div(r_sector, chunk_size);
	chunk_number = r_sector;

	/*
	 * Compute the stripe number
	 */
	stripe = chunk_number;
	*dd_idx = sector_div(stripe, data_disks);
	stripe2 = stripe;
	
	/*
	 * Select the parity disk based on the user selected algorithm.
	 */
	pd_idx = qd_idx = -1;
	switch (raid_level) {
		case RAID4:
			break;

		case RAID5:
			switch (algo) {
				case ALGORITHM_LEFT_ASYMMETRIC:
					pd_idx = data_disks - sector_div(stripe2, raid_disks);
					if (*dd_idx >= pd_idx)
						(*dd_idx)++;
					break;
				case ALGORITHM_RIGHT_ASYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					if (*dd_idx >= pd_idx)
						(*dd_idx)++;
					break;
				case ALGORITHM_LEFT_SYMMETRIC:
					pd_idx = data_disks - sector_div(stripe2, raid_disks);
					*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
					break;
				case ALGORITHM_RIGHT_SYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
					break;
				case ALGORITHM_PARITY_0:
					pd_idx = 0;
					(*dd_idx)++;
					break;
				case ALGORITHM_PARITY_N:
					pd_idx = data_disks;
					break;
				default:
					printf("******invalid algo******\n");
			}
			break;

		case RAID6:
			switch (algo){
				case ALGORITHM_LEFT_ASYMMETRIC:
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					qd_idx = pd_idx + 1;
					if (pd_idx == raid_disks-1) {
						(*dd_idx)++;	/* Q D D D P */
						qd_idx = 0;
					} else if (*dd_idx >= pd_idx)
						(*dd_idx) += 2; /* D D P Q D */
					break;
				case ALGORITHM_RIGHT_ASYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					qd_idx = pd_idx + 1;
					if (pd_idx == raid_disks-1) {
						(*dd_idx)++;	/* Q D D D P */
						qd_idx = 0;
					} else if (*dd_idx >= pd_idx)
						(*dd_idx) += 2; /* D D P Q D */
					break;
				case ALGORITHM_LEFT_SYMMETRIC:
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					qd_idx = (pd_idx + 1) % raid_disks;
					*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
					break;
				case ALGORITHM_RIGHT_SYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					qd_idx = (pd_idx + 1) % raid_disks;
					*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
					break;
		
				case ALGORITHM_PARITY_0:
					pd_idx = 0;
					qd_idx = 1;
					(*dd_idx) += 2;
					break;
				case ALGORITHM_PARITY_N:
					pd_idx = data_disks;
					qd_idx = data_disks + 1;
					break;
		
				case ALGORITHM_ROTATING_ZERO_RESTART:
					/* Exactly the same as RIGHT_ASYMMETRIC, but or
					 * of blocks for computing Q is different.
					 */
					pd_idx = sector_div(stripe2, raid_disks);
					qd_idx = pd_idx + 1;
					if (pd_idx == raid_disks-1) {
						(*dd_idx)++;	/* Q D D D P */
						qd_idx = 0;
					} else if (*dd_idx >= pd_idx)
						(*dd_idx) += 2; /* D D P Q D */
					ddf_layout = 1;
					break;

				case ALGORITHM_ROTATING_N_RESTART:
					/* Same a left_asymmetric, by first stripe is
					 * D D D P Q  rather than
					 * Q D D D P
					 */
					stripe2 += 1;
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					qd_idx = pd_idx + 1;
					if (pd_idx == raid_disks-1) {
						(*dd_idx)++;	/* Q D D D P */
						qd_idx = 0;
					} else if (*dd_idx >= pd_idx)
						(*dd_idx) += 2; /* D D P Q D */
					ddf_layout = 1;
					break;

				case ALGORITHM_ROTATING_N_CONTINUE:
					/* Same as left_symmetric but Q is before P */
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
					*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
					ddf_layout = 1;
					break;

				case ALGORITHM_LEFT_ASYMMETRIC_6:
					/* RAID5 left_asymmetric, with Q on last device */
					pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
					if (*dd_idx >= pd_idx)
						(*dd_idx)++;
					qd_idx = raid_disks - 1;
					break;

				case ALGORITHM_RIGHT_ASYMMETRIC_6:
					pd_idx = sector_div(stripe2, raid_disks-1);
					if (*dd_idx >= pd_idx)
						(*dd_idx)++;
					qd_idx = raid_disks - 1;
					break;

				case ALGORITHM_LEFT_SYMMETRIC_6:
					pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
					*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
					qd_idx = raid_disks - 1;
					break;

				case ALGORITHM_RIGHT_SYMMETRIC_6:
					pd_idx = sector_div(stripe2, raid_disks-1);
					*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
					qd_idx = raid_disks - 1;
					break;

				case ALGORITHM_PARITY_0_6:
					pd_idx = 0;
					(*dd_idx)++;
					qd_idx = raid_disks - 1;
					break;

				default:
					printf("**** Invalid algorithm*** \n");

			}
	}
	/*
	 * Finally, compute the new sector number
	 */
	new_sector = (sector_t)stripe * chunk_size + chunk_offset;
	disks->pdisk = pd_idx;
	disks->qdisk = qd_idx;
	return new_sector;
}

sector_t raid5_phy2log(sector_t r_sector, struct diskinfo disks, int algo, int raid_level) 
{
	int dd_idx = disks.ddisk;
	sector_t stripe, stripe2;
	unsigned int chunk_offset;
	int pd_idx, qd_idx;
	sector_t new_sector;
	int chunk_size = SECPERCHUNK;
	int raid_disks = disks.raid_disks;
	int data_disks = disks.data_disks;
	
	/*
	 * Compute the chunk number and the sector offset inside the chunk
	 */
	chunk_offset = sector_div(r_sector, chunk_size);
	stripe = r_sector;
	stripe2 = stripe;

	pd_idx = qd_idx = -1;
	switch (raid_level) {
		case RAID4: 
			break;

		case RAID5:
			switch (algo) {
				case ALGORITHM_LEFT_ASYMMETRIC:
					pd_idx = data_disks - sector_div(stripe2, raid_disks);
					if (dd_idx > pd_idx)
						dd_idx--;
					break;
				case ALGORITHM_RIGHT_ASYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					if (dd_idx > pd_idx)
						dd_idx--;
					break;
				case ALGORITHM_LEFT_SYMMETRIC:
					pd_idx = data_disks - sector_div(stripe2, raid_disks);
					dd_idx = (raid_disks + dd_idx - pd_idx - 1)%raid_disks;
					break;
				case ALGORITHM_RIGHT_SYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					dd_idx = (raid_disks + dd_idx - pd_idx - 1)%raid_disks;
					break;
				case ALGORITHM_PARITY_0:  /* P D D D */
					dd_idx--;
					break;
				case ALGORITHM_PARITY_N: /* D D D P */
					break;

				default:
					printf("******invalid algo******\n");
			}
			break;

		case RAID6:
			switch (algo) {
				case ALGORITHM_LEFT_ASYMMETRIC:
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					if (pd_idx == raid_disks-1)
						dd_idx--;	/* Q D D D P */
					else if (dd_idx > pd_idx)
						dd_idx -= 2; /* D D P Q D */
					break;
	
			
				case ALGORITHM_RIGHT_ASYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					if (pd_idx == raid_disks-1)
						dd_idx--;	/* Q D D D P */
					else if (dd_idx > pd_idx)
						dd_idx -= 2; /* D D P Q D */
					break;

				case ALGORITHM_LEFT_SYMMETRIC:
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					dd_idx = (raid_disks + dd_idx - pd_idx - 2)%raid_disks;
					break;

				case ALGORITHM_RIGHT_SYMMETRIC:
					pd_idx = sector_div(stripe2, raid_disks);
					dd_idx = (raid_disks + dd_idx - pd_idx - 2)%raid_disks;
					break;

				case ALGORITHM_PARITY_0:  /* P Q D D D */
					dd_idx -= 2;
					break;
				
				case ALGORITHM_PARITY_N: /* D D D P Q */
					break;
			
				case ALGORITHM_ROTATING_ZERO_RESTART:
					/* Exactly the same as RIGHT_ASYMMETRIC, but or
			 		* of blocks for computing Q is different.
			 		*/
					pd_idx = sector_div(stripe2, raid_disks);
					if (pd_idx == raid_disks-1)
						dd_idx--;	/* Q D D D P */
					else if (dd_idx > pd_idx)
						dd_idx -= 2; /* D D P Q D */
					break;

				case ALGORITHM_ROTATING_N_RESTART:
					/* Same a left_asymmetric, by first stripe is
			 		* D D D P Q  rather than
			 		* Q D D D P
			 		*/
					stripe2 += 1;
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					if (pd_idx == raid_disks-1)
						dd_idx--;	/* Q D D D P */
					else if (dd_idx > pd_idx)
						dd_idx -= 2; /* D D P Q D */
					break;
	
				case ALGORITHM_ROTATING_N_CONTINUE:
					stripe2 += 1;
					/* Same as left_symmetric but Q is before P */
					pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
					dd_idx = (raid_disks + dd_idx - pd_idx - 2)%raid_disks;
					break;
			
				case ALGORITHM_LEFT_ASYMMETRIC_6:
					/* RAID5 left_asymmetric, with Q on last device */
					pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
					if (dd_idx > pd_idx)
						dd_idx--;
					break;

				case ALGORITHM_RIGHT_ASYMMETRIC_6:
					pd_idx = sector_div(stripe2, raid_disks-1);
					if (dd_idx > pd_idx)
						dd_idx--;
					break;

				case ALGORITHM_LEFT_SYMMETRIC_6:
					pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
					dd_idx = (raid_disks -1 + dd_idx - pd_idx -1) % (raid_disks-1);
					break;

				case ALGORITHM_RIGHT_SYMMETRIC_6:
					pd_idx = sector_div(stripe2, raid_disks-1);
					dd_idx = (raid_disks -1 + dd_idx - pd_idx -1) % (raid_disks-1);
					break;

				case ALGORITHM_PARITY_0_6:
					dd_idx--;
					break;

				default:
					printf("******invalid algo******\n");
			}
	}
	new_sector = stripe*data_disks*chunk_size + dd_idx*chunk_size + chunk_offset;
	return new_sector;
}
