#include<linux/module.h>
#include<linux/kernel.h>
#include<linux/init.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/crc-ccitt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#include <linux/device-mapper.h>
#include <linux/crc-ccitt.h>

#include <linux/mempool.h>
#include <linux/pagemap.h>
#include <linux/workqueue.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>

#define NICE_BIO_VERIFIED 5

struct evil_payload
{
	struct bio *original_bio;
	int evil_count;
};

struct nice_payload {
	int count;
	struct bio *bio_parts[2];
	unsigned int corrupt_mirrors;
	struct evil_payload *evil_payload; 
};

struct final_write_work {
	struct bio *original_bio;
	struct work_struct work1;	
	struct nice_payload *nice_payload;
	int error;
};

struct my_dm_target {
	struct dm_dev *dev;
	sector_t start;
};

struct imd_tuple {
	__be16 csum;
	__be16 flags;
	__be32 tag;
} __attribute__ ((packed));


static struct workqueue_struct *pending_writes;
static DECLARE_MUTEX(bio_lock);

static struct workqueue_struct *alt_read_list;
static struct workqueue_struct *recovery_list;

struct mirror {
	struct mirror_set *ms;
	atomic_t error_count;
	unsigned long error_type;
	struct dm_dev *dev;
	sector_t offset;
};

struct mirror_set {
	struct dm_target *ti;
	struct list_head list;

	uint64_t features;

	spinlock_t lock;	/* protects the lists */
	struct bio_list reads;
	struct bio_list writes;
	struct bio_list failures;
	struct bio_list holds;	/* bios are waiting until suspend */

	struct dm_region_hash *rh;
	struct dm_kcopyd_client *kcopyd_client;
	struct dm_io_client *io_client;
	mempool_t *read_record_pool;

	/* recovery */
	region_t nr_regions;
	int in_sync;
	int log_failure;
	int leg_failure;
	atomic_t suspend;

	atomic_t default_mirror;	/* Default mirror */

	struct workqueue_struct *kmirrord_wq;
	struct work_struct kmirrord_work;
	struct timer_list timer;
	unsigned long timer_pending;

	struct work_struct trigger_event;

	unsigned nr_mirrors;
	struct mirror mirror[0];
};

struct dm_region_hash {
        uint32_t region_size;
        unsigned region_shift;

        /* holds persistent region state */
        struct dm_dirty_log *log;

        /* hash table */
        rwlock_t hash_lock;
        mempool_t *region_pool;
        unsigned mask;
        unsigned nr_buckets;
        unsigned prime;
        unsigned shift;
        struct list_head *buckets;

        unsigned max_recovery; /* Max # of regions to recover in parallel */

        spinlock_t region_lock;
        atomic_t recovery_in_flight;
        struct semaphore recovery_count;
        struct list_head clean_regions;
        struct list_head quiesced_regions;
        struct list_head recovered_regions;
        struct list_head failed_recovered_regions;

        /*
         * If there was a barrier failure no regions can be marked clean.
         */
        int barrier_failure;
        void *context;
        sector_t target_begin;

        /* Callback function to schedule bios writes */
        void (*dispatch_bios)(void *context, struct bio_list *bios);

        /* Callback function to wakeup callers worker thread. */
        void (*wakeup_workers)(void *context);

        /* Callback function to wakeup callers recovery waiters. */
        void (*wakeup_all_recovery_waiters)(void *context);
};

static struct mirror M;

struct dm_region {
	struct dm_region_hash *rh;	/* FIXME: can we get rid of this ? */
	region_t key;
	int state;

	struct list_head hash_list;
	struct list_head list;

	atomic_t pending;
	struct bio_list delayed_bios;
};

extern struct mirror_set *radc_mirror_set;

extern void read_async_bio(struct mirror *, struct bio *);
extern void do_recovery(struct mirror_set *);
extern int recover(struct mirror_set *, struct dm_region *);	
extern int region_in_sync(struct mirror_set *, region_t ,int );
extern void set_default_mirror(struct mirror *);
extern struct mirror *get_default_mirror(struct mirror_set *);
extern struct dm_region *__rh_find(struct dm_region_hash *, region_t );

static void read_end_io(struct bio *, int);

static inline int get_zone_number(sector_t non_adjusted_sector)
{
	//Returns zone no of non adjusted sector no
	//floor removed
	return (int)(((unsigned long)non_adjusted_sector/63));
}

static inline sector_t imd_sectors_passed(sector_t non_adjusted_sector)
{
	//returns the no of imd sectors passed till non_adjusted_sector
	return ((unsigned long)non_adjusted_sector/63) + 1;
}

static inline sector_t get_imd_sector(sector_t non_adjusted_sector)
{
	//get imd sector corresponding to non_adjusted_sector
	return (get_zone_number(non_adjusted_sector) * 64);
}

static int is_bio_evil(struct bio *bio) 
{
	sector_t start,end;
	unsigned long start_zone, end_zone;

	//check if bio is evil without mapping to actual sector numbers
	start = bio->bi_sector;
	end = ((bio->bi_sector + (bio->bi_size >> 9)) - 1);

	start_zone = (unsigned long)start/63;		//divisor changed to 63
	end_zone = (unsigned long)end/63;

	if(start_zone == end_zone)
		return 0;	//bio is nice
	else
		return 1;	//bio is evil
}

static void disp_bio_fields(char *s,struct bio *bio)
{
	printk(KERN_CRIT "\n <<in function disp_bio_fields (for function %s) \n",s);

	if(bio==NULL)
	{
		printk(KERN_CRIT "\n %s disp_bio_fields: bio is null.... \n",s);
		return;
	}

	printk(KERN_CRIT "\n disp_bio_fields: bio address = %u \n",(unsigned)bio);
	printk(KERN_CRIT "\n disp_bio_fields: bi_sector = %llu \n",bio->bi_sector);
	printk(KERN_CRIT "\n disp_bio_fields: bi_rw = %u \n",(unsigned)bio->bi_rw);
	printk(KERN_CRIT "\n disp_bio_fields: bi_vcnt = %d \n",bio->bi_vcnt);
	printk(KERN_CRIT "\n disp_bio_fields: bi_idx = %d \n",bio->bi_idx);
	printk(KERN_CRIT "\n disp_bio_fields: bi_size = %u \n",bio->bi_size);
	printk(KERN_CRIT "\n disp_bio_fields: bi_io_vec = %u \n",(unsigned)bio->bi_io_vec);
	printk(KERN_CRIT "\n disp_bio_fields: bi_end_io = %u \n",(unsigned)bio->bi_end_io);
	printk(KERN_CRIT "\n disp_bio_fields: bi_private = %u \n",(unsigned)bio->bi_private);
//	printk(KERN_CRIT "\n disp_bio_fields: address of bio->bi_destructor = %u \n",bio->bi_destructor);
		
	printk(KERN_CRIT "\n >>out function disp_bio_fields (for function %s) \n",s);
}

static void recovery_complete(int read_err, unsigned long write_err,
			      void *context)
{
	
	if(read_err)
		printk(KERN_CRIT "\n recovery_complete : could not read from source for kcopyd.... \n");

	if(write_err)
		printk(KERN_CRIT "\n recovery_complete : could not write to all destinations for kcopyd.... \n");

	return;

}

static void radc_recover(struct work_struct * work)
{
	struct nice_payload *nice_payload;
	struct final_write_work *recovery_work;
	struct bio *bio=NULL;

	struct dm_region *reg;
	region_t region;
	
	int i=0;
	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;

	unsigned long flags = 0;
	region_t key;
	sector_t region_size;
	struct mirror *m;

	int r;
	int flag = 0;

	printk(KERN_CRIT "\n <<in function radc_recover \n");	

	recovery_work = container_of(work,struct final_write_work,work1);
	BUG_ON(recovery_work == NULL);

	nice_payload = recovery_work->nice_payload;
	BUG_ON(nice_payload == NULL);

	if(nice_payload->corrupt_mirrors == 65536)
	{
		printk(KERN_CRIT "\n radc_recover: corrupt mirrors = 65536 \n");
		return;
	}

	bio = recovery_work->original_bio;
	BUG_ON(bio == NULL);

	region = dm_rh_bio_to_region(radc_mirror_set->rh,bio);
	printk(KERN_CRIT "\n radc_recover : got region....region = %u \n",region);	

	i = 0;
	m = radc_mirror_set->mirror;

	while(i < (radc_mirror_set->nr_mirrors))
     	{
		printk(KERN_CRIT "\n radc_recover : inside first while....i = %d \n",i); 		

 		M = *m;
		printk(KERN_CRIT "\n radc_recover : inside second while....m = %u \n",m); 		

		if((nice_payload->corrupt_mirrors & (1 << i)) == 0)
			 break;

		i++;
		m++;
	}//end of while

	BUG_ON(region == NULL);

	read_lock(&radc_mirror_set->rh->hash_lock);
	reg = __rh_find(radc_mirror_set->rh,region);
        read_unlock(&radc_mirror_set->rh->hash_lock);

	if(reg == NULL)
		printk(KERN_CRIT "\n radc_recover : dm_region is NULL \n");

	BUG_ON(reg == NULL);
	key = dm_rh_get_region_key(reg);
	printk(KERN_CRIT "\n radc_recover : got region key....key = %ld \n",key);	

	region_size = dm_rh_get_region_size(radc_mirror_set->rh);
	printk(KERN_CRIT "\n radc_recover : got region size....size = %ld \n",region_size);	
		
	/* fill in the source */
//---------------------------------------------
	from.bdev = m->dev->bdev;
	from.sector = m->offset + dm_rh_region_to_sector(radc_mirror_set->rh, key);
	if (key == (radc_mirror_set->nr_regions - 1)) {
		/*
		 * The final region may be smaller than
		 * region_size.
		 */
//---------------------------------------------

		from.count = radc_mirror_set->ti->len & (region_size - 1);
		if (!from.count)
			from.count = region_size;
	} else
		from.count = region_size;
	/* fill in the destinations */
//---------------------------------------------
	for (i = 0, dest = to; i < radc_mirror_set->nr_mirrors; i++) {
		if ((nice_payload->corrupt_mirrors & (1 << i))!=1)
			continue;

		M = *(radc_mirror_set->mirror + i);
		dest->bdev = M.dev->bdev;
		dest->sector = M.offset + dm_rh_region_to_sector(radc_mirror_set->rh, key);
		dest->count = from.count;
		dest++;
	}
	nice_payload->corrupt_mirrors = 0;
	nice_payload->count = 1;
	/* hand to kcopyd */
//---------------------------------------------
//	if (!errors_handled(radc_mirror_set))
//		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);

	r = dm_kcopyd_copy(radc_mirror_set->kcopyd_client, &from, radc_mirror_set->nr_mirrors - 1, to,flags, recovery_complete, reg);

	read_end_io(bio,0);
	printk(KERN_CRIT "\n <<out function radc_recover \n");	
}

static void queue_for_radc_recover(struct bio *bio)
{
	struct nice_payload *nice_payload = bio->bi_private; 	
	struct final_write_work *recovery_work;

	recovery_work = kmalloc(sizeof(struct final_write_work),GFP_NOIO);
	recovery_work->original_bio = bio;
	recovery_work->nice_payload = nice_payload;
	
	INIT_WORK(&(recovery_work->work1),radc_recover);
	queue_work(recovery_list,&(recovery_work->work1));
}

static void radc_alt_read(struct work_struct *work)
{
	struct nice_payload *nice_payload;
	struct final_write_work *alt_read_work;
	struct bio *bio;
	struct mirror *m;
	region_t region;
	unsigned int i = 0;

	printk(KERN_CRIT "\n<<in function radc_alt_read \n");	

	alt_read_work = container_of(work,struct final_write_work,work1);
	BUG_ON(alt_read_work == NULL);

	nice_payload = alt_read_work->nice_payload;
	BUG_ON(nice_payload == NULL);

	bio = nice_payload->bio_parts[0];
	
	printk(KERN_CRIT "\n radc_alt_read : corrupt_mirrors before modify = %d \n",nice_payload->corrupt_mirrors); 		
	if(nice_payload->corrupt_mirrors == 0)
		nice_payload->corrupt_mirrors |= (1 << 16);

	else
	{
		if((nice_payload->corrupt_mirrors)>>16 == 1)
		{
			nice_payload->corrupt_mirrors = 0;
		}
		for(i = 0;i < radc_mirror_set->nr_mirrors;i++)
		{
			if((nice_payload->corrupt_mirrors & (1 << i)) == 0)
			{
				nice_payload->corrupt_mirrors |= (1 << i);
				break;
			}
		}	
	}
	printk(KERN_CRIT "\n radc_alt_read : corrupt_mirrors after modify = %d \n",nice_payload->corrupt_mirrors); 		

	nice_payload->count = 1;
	
	if(!radc_mirror_set->in_sync)
		printk(KERN_CRIT "\n radc_alt_read : mirror set is not in sync... \n");

	region = dm_rh_bio_to_region(radc_mirror_set->rh, bio);

	if(likely(region_in_sync(radc_mirror_set, region, 1)))
		printk(KERN_CRIT "\n radc_alt_read : region is in sync.... \n");

	else	
	{
		printk(KERN_CRIT "\n radc_alt_read : region is not in sync.... \n");
	
		do_recovery(radc_mirror_set);
	}
 	m = radc_mirror_set->mirror;

	printk(KERN_CRIT "\n radc_alt_read : radc_mirror_set->mirror = %u radc_mirror_set->mirror->dev->bdev = %u \n",m,(radc_mirror_set->mirror)->dev->bdev); 		

	printk(KERN_CRIT "\n radc_alt_read : radc_mirror_set->mirror + 1 = %u (radc_mirror_set->mirror + 1)->dev->bdev = %u \n",(m + 1),(radc_mirror_set->mirror + 1)->dev->bdev); 		
	
	i = 0;	
	while(i < radc_mirror_set->nr_mirrors)
	{
		printk(KERN_CRIT "\n radc_alt_read : inside second while....i = %d \n",i); 		

		M = *m;

		printk(KERN_CRIT "\n radc_alt_read : inside second while....m = %u \n",m); 		

		if( (nice_payload->corrupt_mirrors & (1 << i)) == 0)
		{
			if(!atomic_read(&(m->error_count)))	
			{			
				break;
			}
		}			
		else if(atomic_read(&(M.error_count)))
		{	
			printk(KERN_CRIT "\n radc_alt_read : inside second while....M = %u is not in sync... calling do_recovery... \n",M); 		
			do_recovery(radc_mirror_set);
			break;
		}	
		i++;
		m++;
	}

	if(i == radc_mirror_set->nr_mirrors)
	{
		printk(KERN_CRIT "\n Requested data corrupt on all mirrors..... \n");
//		bio_endio(alt_read_work->nice_payload->evil_payload->original_bio,-EIO); 
		printk(KERN_CRIT "\n<<out function radc_alt_read \n");	
		return;
	}
	
	else
	{
		printk(KERN_CRIT "\n Alternate mirror obtained..... m = %u m->dev->bdev = %u \n",m,m->dev->bdev);

		
//		bio->bi_bdev = m->dev->bdev;
		bio->bi_private = nice_payload;
		bio->bi_end_io = read_end_io;	

		set_default_mirror(m);

		printk(KERN_CRIT "\n radc_alt_read default = %u \n",get_default_mirror(radc_mirror_set));

		submit_bio(bio->bi_rw,bio);
//		read_async_bio(m,bio);
	}
	printk(KERN_CRIT "\n<<out function radc_alt_read \n");	
}

static void queue_for_radc_alt_read(struct bio *bio)
{
	struct nice_payload *nice_payload = bio->bi_private; 	
	struct final_write_work *alt_read_work;

	alt_read_work = kmalloc(sizeof(struct final_write_work),GFP_NOIO);
	alt_read_work->original_bio = bio;
	alt_read_work->nice_payload = nice_payload;
	
	INIT_WORK(&(alt_read_work->work1),radc_alt_read);
	queue_work(alt_read_list,&(alt_read_work->work1));
}

/* Calculate the CRCs for the sectors in given bio. It assumes there is enough
 * space in crc for all the sectors (i.e. crc can hold at least
 * bio_sectors(bio) 16 bit integers). */
static void crc_sectors_from_bio(struct bio *bio,u16 crc[64])
{
	int segno;
	struct bio_vec *bvec;
	unsigned long flags;
	unsigned int sectors;
	size_t len;
	u16 current_crc;
	unsigned char *temp_data_buf,*buffer;
	u16 *temp_crc;
	int cnt=0;

	/* bytes left in the current bvec */
	unsigned int left_in_bvec;

	temp_crc = crc;
	sectors = bio_sectors(bio);
	current_crc = 0;
	segno = 0;

	bio_for_each_segment(bvec,bio,segno) {
		
		temp_data_buf = bvec_kmap_irq(bvec,&flags);

		buffer = temp_data_buf;	
		left_in_bvec = bvec->bv_len;

		while(left_in_bvec && sectors)
		{
			len = 512;
		
			current_crc = crc_ccitt(0,buffer,len);
			*temp_crc = current_crc;
			temp_crc++;
			cnt++;

			buffer += len;

			left_in_bvec -= len;
			sectors--;
		}

		bvec_kunmap_irq(temp_data_buf,&flags);
	}
}

static int verify(struct nice_payload *nice_payload)
{
	int i,imd_tuple_number_of_bio = 0,j,k;
	struct bio *imd_bio = NULL;
	struct imd_tuple *imd_tuple = NULL;
	unsigned char *imd_buf = NULL;
	u16 crc[64];
	u16 imd_csum = 0;
	sector_t num_of_sectors;

	unsigned long flags=0;

	if(nice_payload->bio_parts[0] == NULL || nice_payload->bio_parts[1] == NULL)
	{
		printk(KERN_CRIT "\nverify: bio parts is null \n");	
		BUG_ON(nice_payload->bio_parts[0] == NULL);
		BUG_ON(nice_payload->bio_parts[1] == NULL);
		return 0;
	}

	if (((unsigned long)(nice_payload->bio_parts[0]->bi_sector)%64) == 0)
	{
		imd_bio = nice_payload->bio_parts[0];
		i = 0;
		k = 1;
	}
	else if(((unsigned long)(nice_payload->bio_parts[1]->bi_sector)%64==0))
	{
		imd_bio = nice_payload->bio_parts[1];
		i = 1;
		k = 0;
	}
	else
	{
		printk(KERN_CRIT "\nverify: no imd received...... \n");	
		return 0;
	}

	//check if needs to set flags for disabling intrrupts
		
	imd_buf = bvec_kmap_irq(&(imd_bio->bi_io_vec[0]),&flags);
	BUG_ON(imd_buf == NULL);
	imd_tuple = (struct imd_tuple *)imd_buf;
	BUG_ON(imd_tuple == NULL);
	imd_csum = crc_ccitt(0,(imd_buf + 8),504);

	printk(KERN_CRIT "\nverify: imd_csum = %u \n",imd_csum);	

	if(imd_tuple->csum != imd_csum)	
	{
		printk(KERN_CRIT "\n imd bio is corrupt....imd_csum = %u \n",imd_csum);		
		return i;		
	}

	bvec_kunmap_irq(imd_buf,&flags);

	//check if needs to set flags for disabling intrrupts

	crc_sectors_from_bio(nice_payload->bio_parts[k],crc);

	num_of_sectors = ((bio_sectors(nice_payload->bio_parts[k])));


	imd_tuple_number_of_bio = (nice_payload->bio_parts[k]->bi_sector - (((unsigned long)(nice_payload->bio_parts[k]->bi_sector/64))*64));
	imd_buf = bvec_kmap_irq(&(imd_bio->bi_io_vec[0]),&flags);
	BUG_ON(imd_buf == NULL);
	imd_tuple = (struct imd_tuple *)imd_buf;
	BUG_ON(imd_tuple == NULL);
	j = 0;
	while(j < num_of_sectors)
	{
		if(*(crc + j) != ((imd_tuple + j + imd_tuple_number_of_bio)->csum))
		{			
			printk(KERN_CRIT "\n data bio is corrupt....imd_tuple_csum = %u csum in crc array = %u   \n",((imd_tuple + j + imd_tuple_number_of_bio)->csum),*(crc + j));		
			
			return k;		//checksum mismatch
		}
		j++;
	}				
	bvec_kunmap_irq(imd_buf,&flags);
	
	return NICE_BIO_VERIFIED;		//bio verified

}

static void swap_bios(int result,struct nice_payload *nice_payload)
{
	struct bio *temp_bio;

	if(result == 0)
		return;
	else
	{
		temp_bio = nice_payload->bio_parts[0];
		nice_payload->bio_parts[0] = nice_payload->bio_parts[result];
		nice_payload->bio_parts[result] = temp_bio;
	}
}

/*	function called by lower layer when it finishes the processing on the bio
 *	parameters are the bio returned and the error which occured
 *	this function checks whether the remaining siblings of bio have been processed and accordingly calls the       	  *     verification and recovery functions as required. 
 */
static void read_end_io(struct bio *bio, int error)
{

	struct nice_payload *nice_payload;
	int result = 0;
	nice_payload = (struct nice_payload *)bio->bi_private;

	nice_payload->bio_parts[nice_payload->count -1] = bio;

	nice_payload->count = nice_payload->count - 1; 	
		
	if(nice_payload->count == 0)
	{
		result = verify(nice_payload);
		
		//one nice bio has completely been processed.

		if((result == NICE_BIO_VERIFIED) && (nice_payload->corrupt_mirrors == 0))
		{
			nice_payload->evil_payload->evil_count = nice_payload->evil_payload->evil_count - 1;

			if(nice_payload->evil_payload->evil_count<=0)	
			{
				up(&bio_lock);
				bio_endio(nice_payload->evil_payload->original_bio,error);	

				kfree(nice_payload->evil_payload);

				bio_put(nice_payload->bio_parts[0]);

				bio_put(nice_payload->bio_parts[1]);

				kfree(nice_payload);

			}
		}
		
		else if((result == NICE_BIO_VERIFIED) && (nice_payload->corrupt_mirrors != 0))
		{
			printk(KERN_CRIT "\n read_end_io : Error in verify...(result == 5) && (nice_payload->corrupt_mirrors != 0) \n");			
			queue_for_radc_recover(bio);
		}

		else if((result != 0) && (nice_payload->corrupt_mirrors != 0))
		{	
			printk(KERN_CRIT "\n read_end_io : Error in verify...(result != 0) && (nice_payload->corrupt_mirrors != 0) \n");		
			queue_for_radc_recover(bio);

		}
		else
		{			
			printk(KERN_CRIT "\n read_end_io : Error in verify... \n");
			printk(KERN_CRIT "\n read_end_io : bio->bi_sector = %llu \n size = %d \n",nice_payload->bio_parts[result]->bi_sector, nice_payload->bio_parts[result]->bi_size>>9  );

			swap_bios(result,nice_payload);
			queue_for_radc_alt_read(nice_payload->bio_parts[0]);
		}
	}
}

static struct bio * prepare_imd_bio( sector_t imd_sector, struct bio * original_bio)
{
	struct page *page = NULL;
	struct bio *bio = NULL;

	BUG_ON(original_bio == NULL);

	page = alloc_page(GFP_NOIO);
	if (page == NULL)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....page not allocated \n");
		goto error;
	}

	bio = bio_alloc(GFP_NOIO,1);
	if (bio == NULL)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....bio not allocated \n");
		goto error;
	}
	bio->bi_bdev = original_bio->bi_bdev;
	bio->bi_sector = imd_sector;
	bio->bi_size = 0;
	bio->bi_rw = READ;
	bio->bi_vcnt = 0;

	if (bio_add_page(bio,page,512,0) == 0)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....page not added to bio\n");
		goto error;
	}

	return bio;

error:
	if (page)
		__free_page(page);
	if (bio) {
		bio->bi_end_io = NULL;
		bio_put(bio);
	}
	return NULL;
}

static void read_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct bio *imd_bio;
	sector_t imd_sector;
	struct nice_payload *nice_payload;

	BUG_ON(bio == NULL);

  	nice_payload = bio->bi_private;
	BUG_ON(nice_payload == NULL);
	
	imd_sector = get_imd_sector(bio->bi_sector);
	imd_bio = prepare_imd_bio(imd_sector,bio);	

	BUG_ON(imd_bio == NULL);

	imd_bio->bi_end_io = read_end_io;
	imd_bio->bi_private = nice_payload ;

	bio->bi_sector = (bio->bi_sector + (imd_sectors_passed(bio->bi_sector)));

	bio->bi_rw |= (1 << BIO_RW_BARRIER);
	imd_bio->bi_rw |= (1 << BIO_RW_BARRIER);
	
	submit_bio(imd_bio->bi_rw, imd_bio);
	submit_bio(bio->bi_rw, bio);
}

static void write_end_io(struct bio *bio, int error)
{
	struct nice_payload *nice_payload;

	nice_payload = (struct nice_payload *)bio->bi_private;
	BUG_ON(nice_payload==NULL);

	nice_payload->count -= 1;

	nice_payload->bio_parts[nice_payload->count] = bio;

	if(nice_payload->count == 0)
	{
		nice_payload->evil_payload->evil_count -= 1;

		BUG_ON(nice_payload->evil_payload->original_bio == NULL);

		if(nice_payload->evil_payload->evil_count <= 0)	
		{
			up(&bio_lock);
			bio_endio(nice_payload->evil_payload->original_bio,error);
			kfree(nice_payload->evil_payload);
		}

		kfree(nice_payload);
	}	
	bio_put(bio);
}

static void final_write(struct work_struct *work)
{
	struct bio *data = NULL, *imd_bio = NULL;
	struct nice_payload *nice_payload = NULL;
	struct final_write_work *final_write_work = NULL;
	char *temp_imd_buf = NULL;
	sector_t num_of_sectors,imd_tuple_number_of_bio;
	struct bio_vec *bvec = NULL;
	struct imd_tuple *imd_tuple = NULL, *temp_imd_tuple = NULL;
	unsigned long flags;
	u16 crc[64],*temp_crc = NULL;

	final_write_work = container_of(work,struct final_write_work,work1);
	BUG_ON(final_write_work == NULL);
	nice_payload = final_write_work->nice_payload;
	BUG_ON(nice_payload == NULL);

	data = nice_payload->bio_parts[0];
	BUG_ON(data == NULL);
	imd_bio = nice_payload->bio_parts[1];
	BUG_ON(imd_bio == NULL);

	crc_sectors_from_bio(nice_payload->bio_parts[0],crc);
	
	num_of_sectors = bio_sectors(nice_payload->bio_parts[0]);

	imd_tuple_number_of_bio = (nice_payload->bio_parts[0]->bi_sector - (((unsigned long)(nice_payload->bio_parts[0]->bi_sector/64))*64));

	bvec = &imd_bio->bi_io_vec[0];

	temp_imd_buf = bvec_kmap_irq(bvec,&flags);
	
	imd_tuple = (struct imd_tuple *)temp_imd_buf;
	temp_imd_tuple = imd_tuple;
	temp_imd_tuple = temp_imd_tuple + imd_tuple_number_of_bio;
	temp_crc = crc;

	while(num_of_sectors > 0)
	{
		temp_imd_tuple->csum = *temp_crc;

		temp_imd_tuple++;
		temp_crc++;

		num_of_sectors = num_of_sectors - 1;
	}				

	imd_tuple->csum = crc_ccitt(0,(temp_imd_buf + 8),504);

	bvec_kunmap_irq(temp_imd_buf,&flags);

	imd_bio->bi_end_io = write_end_io;
	imd_bio->bi_private = nice_payload;
	imd_bio->bi_rw |= WRITE;
	imd_bio->bi_rw |= (1 << BIO_RW_BARRIER);
	data->bi_rw |= (1 << BIO_RW_BARRIER);
	
	submit_bio(imd_bio->bi_rw,imd_bio);

	submit_bio(data->bi_rw,data);

	kfree(final_write_work);
}

static void queue_for_final_write(struct bio *imd_bio, int error)
{
	struct final_write_work *final_write_work = imd_bio->bi_private;

	if (error)
		final_write_work->error = error;

	final_write_work->nice_payload->bio_parts[1] = imd_bio;

	INIT_WORK(&(final_write_work->work1), final_write);
	queue_work(pending_writes,&(final_write_work->work1));
}

static void write_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct bio *imd_bio;
	sector_t imd_sector;
	struct nice_payload *nice_payload;
	struct final_write_work *final_write_work;

	BUG_ON(bio == NULL);

	nice_payload = bio->bi_private;
	BUG_ON(nice_payload == NULL);

	nice_payload->bio_parts[0] = bio;	

	imd_sector = get_imd_sector(bio->bi_sector);
	imd_bio = prepare_imd_bio(imd_sector,bio);	

	BUG_ON(imd_bio == NULL);

  	final_write_work = kmalloc(sizeof(struct final_write_work),GFP_NOIO);
	BUG_ON(final_write_work == NULL);

	final_write_work->nice_payload = nice_payload;

	imd_bio->bi_private = final_write_work;
	imd_bio->bi_end_io = queue_for_final_write;

	bio->bi_sector = (bio->bi_sector + (imd_sectors_passed(bio->bi_sector)));

	printk(KERN_CRIT "\n write_nice_bio: bi_sector: %llu size = %d \n",bio->bi_sector, bio->bi_size>>9);

	imd_bio->bi_rw |= (1 << BIO_RW_BARRIER);

	submit_bio(imd_bio->bi_rw, imd_bio);	

}

static void handle_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct nice_payload * nice_payload=NULL;
	
	nice_payload = kmalloc(sizeof(struct nice_payload ),GFP_NOIO);
	BUG_ON(nice_payload==NULL);

	nice_payload->count = 2;
	nice_payload->evil_payload = evil_payload;
	nice_payload->corrupt_mirrors = 0;
	nice_payload->bio_parts[0] = NULL;
	nice_payload->bio_parts[1] = NULL;

	bio->bi_private = nice_payload;	

	if((bio->bi_rw & WRITE) == WRITE)
	{
		bio->bi_end_io = write_end_io;

		write_nice_bio(bio,evil_payload);
	}
	else
	{
		bio->bi_end_io = read_end_io;

		read_nice_bio(bio,evil_payload);
	}	
}

static void set_nice_fields(struct bio *original_bio,struct bio *bio, sector_t start, sector_t end_of_bio)
{
	//todo: change end_of_bio to length of bio
	struct bio_vec *bvec;
	int i = 0,current_bvec_no=0;
	sector_t old_len;	
	sector_t len = original_bio->bi_sector;

	old_len = original_bio->bi_sector;

	bio_for_each_segment(bvec,original_bio,i)
	{
		len += (bvec->bv_len >> 9);
		current_bvec_no++;
		if(len >= start)
		{
			if(len==start)
				bio->bi_idx = (current_bvec_no);
			else
			{
				//bvec correction
				bio->bi_idx = (current_bvec_no-1);

				bio->bi_io_vec[bio->bi_idx].bv_offset += (( start - old_len )*512);
				bio->bi_io_vec[bio->bi_idx].bv_len -= ((start - old_len) * 512);

			}
			break;
		}
		old_len = len;
	}

	bio->bi_sector = start;
	bio->bi_size = (( (end_of_bio-start) +1)*512);
}

static void handle_evil_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	sector_t end_of_temp;
	struct bio *temp;
	sector_t start,end;

	//start and end should be non adjusted

	start = bio->bi_sector;
	end = (bio->bi_sector + (bio->bi_size >> 9)) - 1;

	evil_payload->evil_count = ((get_zone_number(end)) - (get_zone_number(start))) + 1;

	end_of_temp = (((get_zone_number(bio->bi_sector)) * 63) + 62) ;
	
	temp = bio_clone(bio,GFP_NOIO);	
	if(temp == NULL)
	{
		printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
		BUG_ON(temp == NULL);
	}

	set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

	handle_nice_bio(temp,evil_payload);

	temp = bio_clone(bio,GFP_NOIO);
	if(temp == NULL)
	{
		printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
		BUG_ON(temp == NULL);
	}

	temp->bi_sector = end_of_temp + 1;

	while(1)
	{
		if((temp->bi_sector + 62) < end)
		{
			end_of_temp = temp->bi_sector + 62;
			set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

			handle_nice_bio(temp,evil_payload);
		
			//temp has been submitted for processing so now again allocate 				//space for temp

			temp = bio_clone(bio,GFP_NOIO);
			if(temp == NULL)
			{
				printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
				BUG_ON(temp == NULL);
			}

			temp->bi_sector = end_of_temp + 1;
		}
		else
			break;
	}

	end_of_temp = end;
	set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

	handle_nice_bio(temp,evil_payload);
}

static int radc_map(struct dm_target *ti, struct bio *bio,union map_info *map_context)
{
	struct my_dm_target *mdt = (struct my_dm_target *) ti->private;
	struct bio *data1;
	struct evil_payload *evil_payload=NULL;

	printk(KERN_CRIT "\n<<in function radc_map \n");

	down(&bio_lock);
	bio->bi_bdev = mdt->dev->bdev;

	evil_payload = kmalloc(sizeof(struct evil_payload ),GFP_NOIO);	
	BUG_ON(evil_payload==NULL);
	
	evil_payload->original_bio = bio;

	if(is_bio_evil(bio))
	{
		printk(KERN_CRIT "\nradc_map::bio is evil \n");
		handle_evil_bio(bio,evil_payload);
	}
	else		
	{				
		printk(KERN_CRIT "\nradc_map::bio is nice \n");
		evil_payload->evil_count = 0;

		data1 = bio_clone(bio,GFP_NOIO);	
		handle_nice_bio(data1,evil_payload);
	}
	printk(KERN_CRIT "\n>>out function radc_map \n");	
	return DM_MAPIO_SUBMITTED;
}

static int radc_ctr(struct dm_target *ti,unsigned int argc,char **argv)
{
	struct my_dm_target *mdt;
	unsigned long long start;

//----------------------------------------------------------
	struct mirror *m;

 	m = radc_mirror_set->mirror;
	
	printk(KERN_CRIT "\n radc_alt_read : radc_mirror_set->mirror = %u radc_mirror_set->mirror->dev->bdev = %u \n",m,(radc_mirror_set->mirror)->dev->bdev); 		

	printk(KERN_CRIT "\n radc_alt_read : radc_mirror_set->mirror + 1 = %u (radc_mirror_set->mirror + 1)->dev->bdev = %u \n",(m + 1),(radc_mirror_set->mirror + 1)->dev->bdev); 		
	
	m = get_default_mirror(radc_mirror_set);
	printk(KERN_CRIT "\n radc_ctr : default mirror = %u \n",m->dev->bdev);

//	printk(KERN_CRIT "\n radc_alt_read : radc_mirror_set->mirror + 2 = %u (radc_mirror_set->mirror + 2)->dev->bdev = %u \n",(m + 2),(radc_mirror_set->mirror + 2)->dev->bdev); 		

//-----------------------------------------------------------
	if (argc != 2) {
		printk(KERN_CRIT "\n Invalid no.of arguments.\n");
		ti->error = "Invalid argument count";
		return -EINVAL;
	}

	mdt = kmalloc(sizeof(struct my_dm_target), GFP_KERNEL);
	if(mdt==NULL)
	{
		printk(KERN_CRIT "\n Mdt is null\n");
		ti->error = "dm-radc: Cannot allocate linear context";
		return -ENOMEM;
	}	

	if(sscanf(argv[1], "%llu", &start)!=1)
	{
		ti->error = "dm-radc: Invalid device sector";
		goto bad;
	}

	mdt->start=(sector_t)start;

	//update length of device to have complete zones only
	ti->len = (unsigned long)((ti->len >> 6) * 63);

	printk(KERN_CRIT "\n Length available to radc user : start = %llu  len = %llu",ti->begin, ti->len);
	printk(KERN_CRIT "\n Length RADC is using : start = %llu len = %llu",ti->begin,ti->len + imd_sectors_passed(ti->len));


 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &mdt->dev)) {
		ti->error = "dm-radc: Device lookup failed";
		goto bad;
	}

	ti->private = mdt;
	pending_writes = create_workqueue("radc-writes");
	alt_read_list = create_workqueue("radc-alt-reads");
	recovery_list = create_workqueue("radc-recovery");

	return 0;

  bad:
	kfree(mdt);
	return -EINVAL;
}

static void radc_dtr(struct dm_target *ti)
{
	struct my_dm_target *mdt = (struct my_dm_target *) ti->private;

	dm_put_device(ti, mdt->dev);
	destroy_workqueue(pending_writes);
	destroy_workqueue(alt_read_list);
	destroy_workqueue(recovery_list);

	kfree(mdt);
}

static struct target_type radc_target = {

	.name = "radc",
	.version = {1,0,0},
	.module = THIS_MODULE,
	.ctr = radc_ctr,
	.dtr = radc_dtr,
	.map = radc_map,
};
	
static int init_radc_target(void)
{
	int result;

	result = dm_register_target(&radc_target);
	if(result < 0)
		printk(KERN_CRIT "\n Error in registering target \n");

	return 0;
}

static void cleanup_radc_target(void)
{
	dm_unregister_target(&radc_target);
}

module_init(init_radc_target);
module_exit(cleanup_radc_target);
MODULE_LICENSE("GPL");
