#include<linux/module.h>
#include<linux/kernel.h>
#include<linux/init.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/crc-ccitt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#include <linux/device-mapper.h>
#include <linux/crc-ccitt.h>

#include <linux/mempool.h>
#include <linux/pagemap.h>
#include <linux/workqueue.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>


struct evil_payload
{
	struct bio *original_bio;
	int evil_count;
};

struct nice_payload {
	int count;
	struct bio *bio_parts[3];
	unsigned int corrupt_mirrors;
	struct evil_payload *evil_payload; 
};

struct final_write_work {
	struct bio *original_bio;
	struct work_struct work1;	
	struct nice_payload *nice_payload;
	int error;
};

struct my_dm_target {
	struct dm_dev *dev;
	sector_t start;
};

struct imd_tuple {
	__be16 csum;
	__be16 flags;
	__be32 tag;
} __attribute__ ((packed));


static struct workqueue_struct *pending_writes;


static inline int get_zone_number(sector_t non_adjusted_sector)
{
	//Returns zone no of non adjusted sector no
	//floor removed
	return (int)(((unsigned long)non_adjusted_sector/63));
}

static inline sector_t imd_sectors_passed(sector_t non_adjusted_sector)
{
	//returns the no of imd sectors passed till non_adjusted_sector
	return ((unsigned long)non_adjusted_sector/63) + 1;
}

static inline sector_t get_imd_sector(sector_t non_adjusted_sector)
{
	//get imd sector corresponding to non_adjusted_sector
	return (get_zone_number(non_adjusted_sector) * 64);
}

static int is_bio_evil(struct bio *bio) 
{
	sector_t start,end;
	unsigned long start_zone, end_zone;

	//check if bio is evil without mapping to actual sector numbers
	start = bio->bi_sector;
	end = ((bio->bi_sector + (bio->bi_size >> 9)) - 1);

	start_zone = (unsigned long)start/63;		//divisor changed to 63
	end_zone = (unsigned long)end/63;

	if(start_zone == end_zone)
		return 0;	//bio is nice
	else
		return 1;	//bio is evil
}

static void disp_bio_fields(char *s,struct bio *bio)
{
	printk(KERN_CRIT "\n <<in function disp_bio_fields (for function %s) \n",s);

	if(bio==NULL)
	{
		printk(KERN_CRIT "\n %s disp_bio_fields: bio is null.... \n",s);
		return;
	}

	printk(KERN_CRIT "\n disp_bio_fields: bio address = %u \n",(unsigned)bio);
	printk(KERN_CRIT "\n disp_bio_fields: bi_sector = %llu \n",bio->bi_sector);
	printk(KERN_CRIT "\n disp_bio_fields: bi_rw = %u \n",(unsigned)bio->bi_rw);
	printk(KERN_CRIT "\n disp_bio_fields: bi_vcnt = %d \n",bio->bi_vcnt);
	printk(KERN_CRIT "\n disp_bio_fields: bi_idx = %d \n",bio->bi_idx);
	printk(KERN_CRIT "\n disp_bio_fields: bi_size = %u \n",bio->bi_size);
	printk(KERN_CRIT "\n disp_bio_fields: bi_io_vec = %u \n",(unsigned)bio->bi_io_vec);
	printk(KERN_CRIT "\n disp_bio_fields: bi_end_io = %u \n",(unsigned)bio->bi_end_io);
	printk(KERN_CRIT "\n disp_bio_fields: bi_private = %u \n",(unsigned)bio->bi_private);
//	printk(KERN_CRIT "\n disp_bio_fields: address of bio->bi_destructor = %u \n",bio->bi_destructor);
		
	printk(KERN_CRIT "\n >>out function disp_bio_fields (for function %s) \n",s);
}

static void write_end_io(struct bio *bio, int error)
{
	struct nice_payload *nice_payload;

	nice_payload = (struct nice_payload *)bio->bi_private;
	BUG_ON(nice_payload==NULL);

	nice_payload->count -= 1;

	nice_payload->bio_parts[nice_payload->count] = bio;

	if(nice_payload->count == 0)
	{
		nice_payload->evil_payload->evil_count -= 1;

		BUG_ON(nice_payload->evil_payload->original_bio == NULL);

		if(nice_payload->evil_payload->evil_count <= 0)	
		{
//			printk(KERN_CRIT "\n write_end_io calling orignal bio's endio\n");
			bio_endio(nice_payload->evil_payload->original_bio,error);
			kfree(nice_payload->evil_payload);
		}
//		printk(KERN_CRIT "\n write_end_io another nice done");

		kfree(nice_payload);
	}	
	bio_put(bio);
}

static struct bio * prepare_imd_bio( sector_t imd_sector, struct bio * original_bio)
{
	struct page *page = NULL;
	struct bio *bio = NULL;

	BUG_ON(original_bio == NULL);

	page = alloc_page(GFP_NOIO);
	if (page == NULL)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....page not allocated \n");
		goto error;
	}

	bio = bio_alloc(GFP_NOIO,1);
	if (bio == NULL)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....bio not allocated \n");
		goto error;
	}
	bio->bi_bdev = original_bio->bi_bdev;
	bio->bi_sector = imd_sector;
	bio->bi_size = 0;
	bio->bi_rw = READ;
	bio->bi_vcnt = 0;

	if (bio_add_page(bio,page,512,0) == 0)
	{
		printk(KERN_CRIT "\n in function prepare_imd_bio ::: error....page not added to bio\n");
		goto error;
	}

	return bio;

error:
	if (page)
		__free_page(page);
	if (bio) {
		bio->bi_end_io = NULL;
		bio_put(bio);
	}
	return NULL;
}

static void read_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct bio *imd_bio;
	sector_t imd_sector;
	struct nice_payload *nice_payload;

	BUG_ON(bio == NULL);

  	nice_payload = bio->bi_private;
	BUG_ON(nice_payload == NULL);
	
	imd_sector = get_imd_sector(bio->bi_sector);
	imd_bio = prepare_imd_bio(imd_sector,bio);	

	BUG_ON(imd_bio == NULL);

	imd_bio->bi_end_io = write_end_io;
	imd_bio->bi_private = nice_payload ;

	bio->bi_sector = (bio->bi_sector + (imd_sectors_passed(bio->bi_sector)));

	submit_bio(bio->bi_rw, bio);	
	submit_bio(imd_bio->bi_rw, imd_bio);	
}


/* Calculate the CRCs for the sectors in given bio. It assumes there is enough
 * space in crc for all the sectors (i.e. crc can hold at least
 * bio_sectors(bio) 16 bit integers). */
static void crc_sectors_from_bio(struct bio *bio,u16 crc[64])
{
	int segno;
	struct bio_vec *bvec;
	unsigned long flags;
	unsigned int sectors;
	size_t len;
	u16 current_crc;
	unsigned char *temp_data_buf,*buffer;
	u16 *temp_crc;
	
	/* bytes left in the current bvec */
	unsigned int left_in_bvec;

	temp_crc = crc;

	sectors = bio_sectors(bio);

	current_crc = (u16)0;
	segno = 0;

	bio_for_each_segment(bvec,bio,segno) {
		
		temp_data_buf = bvec_kmap_irq(bvec,&flags);

		buffer = temp_data_buf;	
		left_in_bvec = bvec->bv_len;

		while(left_in_bvec && sectors)
		{

			len = 512;
		
			current_crc = crc_ccitt(current_crc,buffer,len);
			printk(KERN_CRIT "\n crc_sectors_from_bio : current_crc obtained... current_crc = %u \n",current_crc);

			*temp_crc = current_crc;
			temp_crc++;

			current_crc = (u16)0;

			buffer += len;

			left_in_bvec -= len;
			sectors--;
		}

		bvec_kunmap_irq(temp_data_buf,&flags);
	}
}

static void final_write(struct work_struct *work)
{
	struct bio *data, *imd_bio;
	struct nice_payload *nice_payload;
	struct final_write_work *final_write_work;
	char *temp_imd_buf;
	sector_t num_of_sectors,imd_tuple_number_of_bio;
	struct bio_vec *bvec;
	struct imd_tuple *imd_tuple, *temp_imd_tuple;
	unsigned long flags;
	u16 crc[64];

	final_write_work = container_of(work,struct final_write_work,work1);
	nice_payload = final_write_work->nice_payload;
	BUG_ON(nice_payload == NULL);

	data = nice_payload->bio_parts[0];
	BUG_ON(data == NULL);
	imd_bio = nice_payload->bio_parts[1];
	BUG_ON(imd_bio == NULL);

	crc_sectors_from_bio(nice_payload->bio_parts[0],crc);
		
	num_of_sectors = bio_sectors(nice_payload->bio_parts[0]);

	imd_tuple_number_of_bio = (nice_payload->bio_parts[0]->bi_sector - (((unsigned long)(nice_payload->bio_parts[0]->bi_sector/64))*64));


	bvec = &imd_bio->bi_io_vec[0];

	temp_imd_buf = bvec_kmap_irq(bvec,&flags);

	imd_tuple = (struct imd_tuple *)temp_imd_buf;
	while(num_of_sectors > 0)
	{
		temp_imd_tuple = (imd_tuple + (num_of_sectors - 1) + imd_tuple_number_of_bio);

//		printk(KERN_CRIT "\n final_write : inside while : imd_tuple = %u , temp_imd_tuple = %u \n",imd_tuple,temp_imd_tuple);

		(temp_imd_tuple)->csum = (__be16)(*(crc + (num_of_sectors - 1)));

//		printk(KERN_CRIT "\n final_write : inside while : temp_imd_tuple updated....temp_imd_tuple->csum = %u \n",temp_imd_tuple->csum);
			
		num_of_sectors = num_of_sectors - 1;
	}				
	
	bvec_kunmap_irq(temp_imd_buf,&flags);
	imd_bio->bi_end_io = write_end_io;
	imd_bio->bi_private = nice_payload;
	imd_bio->bi_rw = WRITE;
//	imd_bio->bi_rw |= (1 << BIO_RW_BARRIER);
	
	submit_bio(imd_bio->bi_rw,imd_bio);
	submit_bio(data->bi_rw,data);

	kfree(final_write_work);
}

static void queue_for_final_write(struct bio *imd_bio, int error)
{
	struct final_write_work *final_write_work = imd_bio->bi_private;

	if (error)
		final_write_work->error = error;

	final_write_work->nice_payload->bio_parts[1] = imd_bio;

	INIT_WORK(&(final_write_work->work1), final_write);
	queue_work(pending_writes,&(final_write_work->work1));
}

static void write_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct bio *imd_bio;
	sector_t imd_sector;
	struct nice_payload *nice_payload;
	struct final_write_work *final_write_work;

	BUG_ON(bio == NULL);

	nice_payload = bio->bi_private;
	BUG_ON(nice_payload == NULL);

	nice_payload->bio_parts[0] = bio;	

	imd_sector = get_imd_sector(bio->bi_sector);
	imd_bio = prepare_imd_bio(imd_sector,bio);	

	BUG_ON(imd_bio == NULL);

  	final_write_work = kmalloc(sizeof(struct final_write_work),GFP_NOIO);
	BUG_ON(final_write_work == NULL);

	final_write_work->nice_payload = nice_payload;

	imd_bio->bi_private = final_write_work;
	imd_bio->bi_end_io = queue_for_final_write;

	bio->bi_sector = (bio->bi_sector + (imd_sectors_passed(bio->bi_sector)));

	submit_bio(imd_bio->bi_rw, imd_bio);	
}

static void handle_nice_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	struct nice_payload * nice_payload=NULL;
	
	nice_payload = kmalloc(sizeof(struct nice_payload ),GFP_NOIO);
	BUG_ON(nice_payload==NULL);

	nice_payload->count = 2;
	nice_payload->evil_payload = evil_payload;
	nice_payload->corrupt_mirrors = 0;

	bio->bi_private = nice_payload;	
	bio->bi_end_io = write_end_io;

	if((bio->bi_rw & WRITE) == WRITE)
	{
//		printk(KERN_CRIT "\n handle_nice : bio is a write request.... \n");
		write_nice_bio(bio,evil_payload);
	}
	else
	{
//		printk(KERN_CRIT "\n handle_nice : bio is a read request.... \n");
		read_nice_bio(bio,evil_payload);
	}	
}


static void set_nice_fields(struct bio *original_bio,struct bio *bio, sector_t start, sector_t end_of_bio)
{
	struct bio_vec *bvec;
	int i = 0,current_bvec_no=0;
	sector_t old_len;	
	sector_t len = original_bio->bi_sector;

	old_len = original_bio->bi_sector;

	bio_for_each_segment(bvec,original_bio,i)
	{
		len += (bvec->bv_len >> 9);
		current_bvec_no++;
		if(len >= start)
		{
			if(len==start)
				bio->bi_idx = (current_bvec_no);
			else
			{
				//bvec correction
				bio->bi_idx = (current_bvec_no-1);

				bio->bi_io_vec[bio->bi_idx].bv_offset += (( start - old_len )*512);
				bio->bi_io_vec[bio->bi_idx].bv_len -= ((start - old_len) * 512);
			}
			break;
		}
		old_len = len;
	}

	bio->bi_sector = start;
	bio->bi_size = (( (end_of_bio-start) +1)*512);
	bio->bi_rw |= (1 << BIO_RW_BARRIER);
}

static void handle_evil_bio(struct bio * bio, struct evil_payload * evil_payload)
{
	sector_t end_of_temp;
	struct bio *temp;
	sector_t start,end;

	//start and end should be non adjusted

	start = bio->bi_sector;
	end = (bio->bi_sector + (bio->bi_size >> 9)) - 1;

	evil_payload->evil_count = ((get_zone_number(end)) - (get_zone_number(start))) + 1;

	end_of_temp = (((get_zone_number(bio->bi_sector)) * 63) + 62) ;
	
	temp = bio_clone(bio,GFP_NOIO);	
	if(temp == NULL)
	{
		printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
		BUG_ON(temp == NULL);
	}

	set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

	handle_nice_bio(temp,evil_payload);

	temp = bio_clone(bio,GFP_NOIO);
	if(temp == NULL)
	{
		printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
		BUG_ON(temp == NULL);
	}

	temp->bi_sector = end_of_temp + 1;

	while(1)
	{
		if((temp->bi_sector + 62) < end)
		{
			end_of_temp = temp->bi_sector + 62;
			set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

			handle_nice_bio(temp,evil_payload);
		
			//temp has been submitted for processing so now again allocate 				//space for temp

			temp = bio_clone(bio,GFP_NOIO);
			if(temp == NULL)
			{
				printk(KERN_CRIT "\nhandle_evil_bio : BIO IS NULL... \n");
				BUG_ON(temp == NULL);
			}

			temp->bi_sector = end_of_temp + 1;
		}
		else
			break;
	}

	end_of_temp = end;
	set_nice_fields(bio,temp, temp->bi_sector, end_of_temp);

	handle_nice_bio(temp,evil_payload);
}

static int radc_map(struct dm_target *ti, struct bio *bio,union map_info *map_context)
{
	struct my_dm_target *mdt = (struct my_dm_target *) ti->private;
	struct bio *data1;
	struct evil_payload *evil_payload=NULL;

	bio->bi_bdev = mdt->dev->bdev;

	evil_payload = kmalloc(sizeof(struct evil_payload ),GFP_NOIO);	
	BUG_ON(evil_payload==NULL);
	
	evil_payload->original_bio = bio;

	if(is_bio_evil(bio))
	{
//		printk(KERN_CRIT "\nradc_map::bio is evil \n");
		handle_evil_bio(bio,evil_payload);
	}
	else		
	{				
//		printk(KERN_CRIT "\nradc_map::bio is nice \n");
		evil_payload->evil_count = 0;

		data1 = bio_clone(bio,GFP_NOIO);	
		handle_nice_bio(data1,evil_payload);
	}
	return DM_MAPIO_SUBMITTED;
}

static int radc_ctr(struct dm_target *ti,unsigned int argc,char **argv)
{
	struct my_dm_target *mdt;
	unsigned long long start;

	if (argc != 2) {
		printk(KERN_CRIT "\n Invalid no.of arguments.\n");
		ti->error = "Invalid argument count";
		return -EINVAL;
	}

	mdt = kmalloc(sizeof(struct my_dm_target), GFP_KERNEL);
	if(mdt==NULL)
	{
		printk(KERN_CRIT "\n Mdt is null\n");
		ti->error = "dm-radc: Cannot allocate linear context";
		return -ENOMEM;
	}	

	if(sscanf(argv[1], "%llu", &start)!=1)
	{
		ti->error = "dm-radc: Invalid device sector";
		goto bad;
	}

	mdt->start=(sector_t)start;

	//update length of device to have complete zones only
	ti->len = (unsigned long)((ti->len >> 6) * 63);

	printk(KERN_CRIT "\n Length available to radc user : start = %llu  len = %llu",ti->begin, ti->len);
	printk(KERN_CRIT "\n Length RADC is using : start = %llu len = %llu",ti->begin,ti->len + imd_sectors_passed(ti->len));


 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &mdt->dev)) {
		ti->error = "dm-radc: Device lookup failed";
		goto bad;
	}

	ti->private = mdt;
	pending_writes = create_workqueue("radc-writes");

	return 0;

  bad:
	kfree(mdt);
	return -EINVAL;
}

static void radc_dtr(struct dm_target *ti)
{
	struct my_dm_target *mdt = (struct my_dm_target *) ti->private;

	dm_put_device(ti, mdt->dev);
	destroy_workqueue(pending_writes);
	kfree(mdt);
}

static struct target_type radc_target = {

	.name = "radc",
	.version = {1,0,0},
	.module = THIS_MODULE,
	.ctr = radc_ctr,
	.dtr = radc_dtr,
	.map = radc_map,
};
	
static int init_radc_target(void)
{
	int result;

	result = dm_register_target(&radc_target);
	if(result < 0)
		printk(KERN_CRIT "\n Error in registering target \n");

	return 0;
}

static void cleanup_radc_target(void)
{
	dm_unregister_target(&radc_target);
}

module_init(init_radc_target);
module_exit(cleanup_radc_target);
MODULE_LICENSE("GPL");
