/**
 * ypdisk.c -- New member training, step 4
 *
 * Author: yapngpeng
 **/

#include "ypdisk.h"

/*
 * Global variables
 *
 * ypdisk_major: device number
 *
 * yp_cd, yp_mm, yp_wb, yp_be: four levels
 *
 * bit_mask: used fot bitwise execution
 */
static int ypdisk_major = YPDISK_MAJOR;

static struct yp_comm_disk * yp_cd = NULL;
static struct yp_writeback * yp_wb = NULL;
static struct yp_mem * yp_mm = NULL;
static struct yp_bio_exc * yp_be = NULL;

static char bit_mask[8] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80};

void print_bio(struct bio * bio) {
       int i ;
       struct bio_vec * bvec;

       printk(KERN_ERR "bi_size = %u ( %u sectors) \n", bio->bi_size, (bio->bi_size) >> 9);
       printk(KERN_ERR "bi_vcnt = %u \n", bio->bi_vcnt);

       for (i=0; i < bio->bi_vcnt; i++) {
               printk(KERN_ERR "In bi_io_vec[%d], bv_len = %u (%u sectors) \n",
                        i, (bio->bi_io_vec[i]).bv_len, (bio->bi_io_vec[i].bv_len) >> 9);
       }

       printk(KERN_ERR "use bio_for_each_segment and bio_cur_sectors \n");
       i=0;
       bio_for_each_segment(bvec, bio, i) {
               printk(KERN_ERR "segment is %d, bio_cur_sectors = %d \n", i, bio_cur_sectors(bio));
               bio->bi_idx ++;
       }
} 
/*
 * yp_bio_endio - callback fuction 
 * used for create_dbmp(), disk_read() and save_dbmp_last() 
 */
static int yp_bio_endio(struct bio *bio, unsigned int bytes_done, int error){
	struct bio_data *priv = bio->bi_private;

	PDEBUG("Enter yp_bio_endio\n");
	priv->flag = 1;
	priv->done = bytes_done;
	priv->err = error;
	
	bio_put(bio);

	wake_up_interruptible(priv->queue);
	return 0;
}

/*
 * create_dbmp - create disk bitmap 
 */
static int create_dbmp(struct yp_comm_disk * cd, struct page *label_p, struct page * dbmp_p) {
	struct bio * bio = NULL;
	struct bio_data * priv = NULL;
	char * tmp_label = NULL, * tmp_dbmp = NULL;
	int ret = 0, done = 0;

	PDEBUG("Enter create_dbmp \n");
	priv = kmalloc(sizeof(struct bio_data), GFP_KERNEL);
	if (priv == NULL) {
		return -ENOMEM;
	}

	priv->queue = &(cd->read_queue);
	priv->flag = 0;
	priv->done = 0;
	priv->err = 0;

	bio = bio_alloc(GFP_NOIO, 2);

	bio->bi_sector = 0;
	bio->bi_bdev = cd->low_bdev;

	bio->bi_io_vec[0].bv_page = label_p;
	bio->bi_io_vec[0].bv_len = 512;
	bio->bi_io_vec[0].bv_offset = 0;

	bio->bi_io_vec[1].bv_page = dbmp_p;
	bio->bi_io_vec[1].bv_len = 4096;
	bio->bi_io_vec[1].bv_offset = 0;

	bio->bi_vcnt = 2;
	bio->bi_idx = 0;
	bio->bi_size = 4608;
	
	bio->bi_end_io = yp_bio_endio;
	bio->bi_private = priv;
	bio_get(bio);
	submit_bio(READ, bio);
        bio_put(bio);
	

	while (!priv->flag) {
		if (wait_event_interruptible(cd->read_queue, priv->flag)){
			return -ERESTARTSYS;
		}
	}

	ret = priv->err;
	done = priv -> done;
	kfree(priv);

	if (ret < 0)
		return ret;
	if (done != 4608)
		return -1;

	tmp_label = page_address(label_p);
	tmp_dbmp = page_address(dbmp_p);

	if (strncmp("ypdisk", tmp_label, 6)) {
		PDEBUG(KERN_ERR "Not ypdisk \n");
		strncpy(tmp_label, "ypdisk", 6);
		memset(tmp_dbmp, 0, 4096);
	} else
		PDEBUG(KERN_ERR "It's a ypdisk \n");

	return 0;
}

/*
 * disk_read - read data from disk
 *
 * use a wait queue to read disk synchronization
 * after submit_bio start to wait
 * and callback function yp_bio_endio will wake it up
 */
static int disk_read(struct yp_comm_disk * cd, struct tree_node * p, unsigned long sector){
	struct bio * bio = NULL;
	struct bio_data * priv = NULL;
	int ret = 0, done = 0;
	
	PDEBUG("Enter disk_raed \n");
	priv = kmalloc(sizeof (struct bio_data), GFP_KERNEL);
	if (priv == NULL) {
		return -ENOMEM;
	}

	priv->queue = &(cd->read_queue);
        priv->flag = 0;
	priv->done = 0;
	priv->err = 0;
	
	bio = bio_alloc(GFP_NOIO, 1);
	
	bio->bi_sector = sector + DISK_OFFSET;
	bio->bi_bdev = cd->low_bdev;

	bio->bi_io_vec[0].bv_page = p->page_p;
	bio->bi_io_vec[0].bv_len = 4096;
	bio->bi_io_vec[0].bv_offset = 0;

	bio->bi_vcnt = 1;
	bio->bi_idx = 0;
	bio->bi_size = 4096;

	bio->bi_end_io = yp_bio_endio;
	bio->bi_private = priv;
	bio_get(bio);
	submit_bio(READ, bio);
        bio_put(bio);

	while (!priv->flag)  {
		if (wait_event_interruptible(cd->read_queue, priv->flag)) {
			return -ERESTARTSYS;
		}
	}

	ret = priv->err;
	done = priv->done;
	kfree(priv);

	if (ret < 0)
		return ret;
	if (done != 4096)
		return -1;

	return 0;
}

/*
 * save_dbmp_endio - callback function, used for save_dbmp 
 */
static int save_dbmp_endio (struct bio * bio, unsigned int bytes_done, int error) {
        bio_put(bio);
        return 0;
}

/*
 * save_dbmp - write disk bitmap to disk
 */
static void save_dbmp (struct yp_comm_disk * cd, struct page *label_p, struct page * dbmp_p) {
        struct bio * bio = bio_alloc(GFP_NOIO, 2);

        bio->bi_sector = 0;
        bio->bi_bdev = cd->low_bdev;

        bio->bi_io_vec[0].bv_page = label_p;
        bio->bi_io_vec[0].bv_len = 512;
        bio->bi_io_vec[0].bv_offset = 0;

        bio->bi_io_vec[1].bv_page = dbmp_p;
        bio->bi_io_vec[1].bv_len = 4096;
        bio->bi_io_vec[1].bv_offset = 0;

        bio->bi_vcnt = 2;
        bio->bi_idx = 0;
        bio->bi_size = 4608;

        bio->bi_end_io = save_dbmp_endio;
        bio->bi_private = NULL;
        bio_get(bio);
        submit_bio(WRITE, bio);
        bio_put(bio);
}

/* 
 * save_dbmp_last: save the disk bitmap for the last time
 *
 * when "rmmod ypdisk" after save disk bitmap, the page to 
 * store it will be freed, so this save_dbmp_last should 
 * be executed synchronized
 */
static int save_dbmp_last(struct yp_comm_disk * cd, struct page *label_p, struct page *dbmp_p) {
        struct bio * bio = NULL;
	struct bio_data * priv = NULL;
        int ret = 0, done = 0;

        bio = bio_alloc(GFP_NOIO, 2);
        priv = kmalloc(sizeof (struct bio_data), GFP_KERNEL);

	if (priv == NULL) {
		return -ENOMEM;
	}

	priv->queue = &(cd->dbmp_queue);
        priv->flag = 0;
	priv->done = 0;
        priv->err = 0;

        bio->bi_sector = 0;
        bio->bi_bdev = cd->low_bdev;

        bio->bi_io_vec[0].bv_page = label_p;
        bio->bi_io_vec[0].bv_len = 512;
        bio->bi_io_vec[0].bv_offset = 0;

        bio->bi_io_vec[1].bv_page = dbmp_p;
        bio->bi_io_vec[1].bv_len = 4096;
        bio->bi_io_vec[1].bv_offset = 0;

        bio->bi_vcnt = 2;
        bio->bi_idx = 0;
        bio->bi_size = 4608;

        bio->bi_end_io = yp_bio_endio;
        bio->bi_private = priv;
        bio_get(bio);
        submit_bio(WRITE, bio);
        bio_put(bio);

	while (!priv->flag)  {
		if (wait_event_interruptible(cd->dbmp_queue, priv->flag)) {
			return -ERESTARTSYS;
		}
	}

	ret = priv->err;
	done = priv->done;
	kfree(priv);

	if (ret < 0)
		return ret;
	if (done != 4608)
		return -1;

	return 0;
}

/*
 * write_endio - callback function for disk_write
 * 
 * when called disk_write, the page has been setted locked
 * write_endio set the page unlocked, and wake up threads
 */
static int write_endio (struct bio *bio, unsigned int bytes_done, int error) {
        struct write_bio_data * priv = bio->bi_private;
        
        yp_put_treenode(priv->p);

        return 0;
} 

static int write_endio_last (struct bio *bio, unsigned int bytes_done, int error) {
        struct write_bio_data * priv = bio->bi_private;
        if (priv->p != NULL) {
                __free_page(priv->p->page_p);
                priv->p = NULL;
        }
        //kfree(priv->p);

        return 0;
} 

/*
 * disk_write - write data to disk
 */
static int disk_write (struct yp_mem * mm, struct yp_comm_disk * cd, 
                struct tree_node * p, unsigned long sector, int is_last){
       struct bio * bio = NULL;
       struct write_bio_data * priv = NULL;

       priv = kmalloc(sizeof (struct write_bio_data), GFP_KERNEL);
       if (priv == NULL) {
               return -ENOMEM;
       }

       priv->p = p;

       bio = bio_alloc(GFP_NOIO, 1);

       bio->bi_sector = sector + DISK_OFFSET;
       bio->bi_bdev = cd->low_bdev;

       bio->bi_io_vec[0].bv_page = p->page_p;
       bio->bi_io_vec[0].bv_len = 4096;
       bio->bi_io_vec[0].bv_offset = 0;

       bio->bi_vcnt = 1;
       bio->bi_idx = 0;
       bio->bi_size = 4096;

       if (is_last == 1) {
               bio->bi_end_io = write_endio_last;
       } else {
               bio->bi_end_io = write_endio;
       }
       bio->bi_private = priv;
       bio_get(bio);
       submit_bio(WRITE, bio);
       bio_put(bio);
       
       return 0;
}

/* 
 * yp_comm_disk_init - initiate yp_comm_disk level
 */
static int yp_comm_disk_init(struct yp_comm_disk * cd) {
	PDEBUG("Enter comm_disk_init \n");
	memset(cd, 0, sizeof (struct yp_comm_disk));

	cd->low_bdev = open_bdev_excl(LOW_PATH, 0, NULL);
	if (IS_ERR(cd->low_bdev)) {
		printk(KERN_ERR "Open /dev/sdc failed \n");
		return -ENODEV;
	}

	init_waitqueue_head(&cd->read_queue);
        init_waitqueue_head(&cd->dbmp_queue);
	return 0;
}

/*
 * yp_comm_disk_destroy - destroy yp_comm_disk_level
 */
static void yp_comm_disk_destroy(struct yp_comm_disk * cd) {
	PDEBUG("Enter comm_disk_destroy \n");
        if (cd->low_bdev != NULL && !IS_ERR(cd->low_bdev)) {                               
                close_bdev_excl(cd->low_bdev);
        }  
}

/* 
 * trigger_thread_wb - trigger the writeback thread
 *
 * after triggered the writeback thread will executed every INTERVAL s
 */
static inline void trigger_thread_wb(struct yp_writeback * wb) {
        wake_up_process(wb->wb_thread);
}

/* 
 * thread_wb - function associated to writeback thread
 */
static int thread_wb(void * mm) {
        unsigned long timeout = INTERVAL * HZ;
        while (1) {
                if (kthread_should_stop())
                        break;
                while (timeout > 0)
                        timeout = schedule_timeout(timeout);
                timeout = INTERVAL * HZ;

                write_dirty_pages((struct yp_mem *) mm, 1);
        }
        return 0;
}

/*
 * trigger_work - trigger the workqueue of writeback
 */
static inline void trigger_work_wb(struct yp_writeback * wb) {
        schedule_work(wb->wb_work);
}

/*
 * work_wb - function associated to writeback workqueue
 */
static void work_wb(void * mm) {
        write_dirty_pages((struct yp_mem *) mm, 0);
}

/*
 * yp_writeback_init - initiate yp_writeback
 */
static int yp_writeback_init(struct yp_writeback * wb) {
        memset(wb, 0, sizeof (struct yp_writeback));

        wb->wb_work = kmalloc(sizeof (struct work_struct), GFP_KERNEL);
        if (wb->wb_work == NULL) {
                printk(KERN_ERR "alloc work list failed \n");
                return -ENOMEM;
        }
        INIT_WORK(wb->wb_work, work_wb, yp_mm);

        wb->wb_thread = kthread_create(thread_wb, yp_mm, "Writeback thread");
        if (IS_ERR(wb->wb_thread)) {
                 printk("Unable to create a kthread \n");
                 kfree(wb->wb_work);
                 return PTR_ERR(wb->wb_thread);
        }
        return 0;
}

/*
 * yp_writeback_destroy - destroy yp_writeback_destroy
 */
static void yp_writeback_destroy(struct yp_writeback * wb) {
        PDEBUG("Enter yp_writeback_destroy \n");
        kthread_stop(wb->wb_thread);
        flush_scheduled_work();
        kfree(wb->wb_work);
}

/* 
 * functions for bitwise execution
 */
static inline int yp_tests_dbmp(struct yp_mem *mm, int index){
	if (mm->disk_bmp[index] != 0)
		return 1;
	else
		return 0;
}
static inline char yp_gets_dbmp(struct yp_mem *mm, int index) {
	return mm->disk_bmp[index];
}
static inline void yp_sets_dbmp(struct yp_mem *mm, struct dirty_list * d_node) {
        int index = d_node->index;

	mm->disk_bmp[index] |= d_node->tree_p->page_bits;
}
static inline int yp_test_pbmp(struct tree_node *p, int sector) {
	int offset = sector % 8;

	if (p->page_bits & bit_mask[offset])
		return 1;
	else
		return 0;
}
static inline void yp_set_pbmp(struct tree_node *p, int sector) {
	int offset = sector % 8;
	p->page_bits |= bit_mask[offset];
}

static int yp_get_treenode(struct tree_node *p) {
	spin_lock(p->page_lock);
	while (p->is_locked) {
		spin_unlock(p->page_lock);
		if (wait_event_interruptible(*(p->page_queue), !p->is_locked)){
			return -ERESTARTSYS;
		}
		spin_lock(p->page_lock);
	}
	p->is_locked = 1;
	spin_unlock(p->page_lock);

        return 0;
}

static inline void yp_put_treenode(struct tree_node *p) {
        spin_lock(p->page_lock);
        p->is_locked = 0;
        spin_unlock(p->page_lock);
        wake_up_interruptible(p->page_queue);
}

static inline int is_treenode_locked(struct tree_node *p) {
        int ret = 0;

        spin_lock(p->page_lock);
        ret = p->is_locked;
        spin_unlock(p->page_lock);

        return ret;
}

/*
 * test/ set and clear treenode's dirty tag
 *
 * Notic: before call these function, should locke the treenode
 */
static inline int is_treenode_dirty(struct tree_node *p) {
        return p->d_flag;
}
static inline void set_treenode_dirty(struct tree_node *p) {
        p->d_flag = 1;
}
static inline void clear_treenode_dirty(struct tree_node *p) {
        p->d_flag = 0;
}

static inline int init_treenode(struct tree_node *p, struct yp_mem * mm) {

	p->page_p = alloc_page(GFP_KERNEL);
	if (p->page_p == NULL) {
		printk(KERN_ERR "ypdisk: alloc page failed \n");
		kfree(p);
		return -ENOMEM;
	}
	p->d_flag = 0;
        p->is_locked = 0;
        p->page_bits = 0x0; 
        p->page_lock = &mm->page_lock;
        p->page_queue = &mm->page_queue;

        return 0;
}

/*
 * write_dirty_page - write back dirty page to disk 
 *
 * type = 0: for work list write back WORK_NUM nodes
 * type = 1: for kthread write back the dirty node crerated before INTERVAL seconds
 * type = 2: write back all dirty node
 */

static void write_dirty_pages(struct yp_mem *mm, int type) {
        struct list_head * p = NULL;
        struct dirty_list * d_node = NULL;
        int index, wb_num = 0;
        unsigned long timestamp, expired_stamp = jiffies - HZ * INTERVAL;

        PDEBUG("type is %d \n", type);

        while (1) {
                spin_lock(&mm->list_lock);

                p = (&mm->d_list)->next;

                if (p == &mm->d_list) {
                        spin_unlock(&mm->list_lock);
                        break;
                }
                
                d_node = list_entry(p, struct dirty_list, list);
                timestamp = d_node->timestamp;

                if (type == 1 && time_after(timestamp, expired_stamp)) {
                        spin_unlock(&mm->list_lock);
                        break;
                }

                if (type == 0 && wb_num >= WORK_NUM) {
                        spin_unlock(&mm->list_lock);
                        break;
                }

                list_del(p);
                spin_unlock(&mm->list_lock);

                atomic_dec(&mm->d_num);

                yp_get_treenode(d_node->tree_p);

                index = d_node->index;
                clear_treenode_dirty(d_node->tree_p);

                PDEBUG("disk write page %d to disk \n", index);
                if (disk_write(mm, yp_cd, d_node->tree_p, index << 3, type == 2) != 0)  {
                        set_treenode_dirty(d_node->tree_p);
                        d_node->timestamp = jiffies;
                        spin_lock(&mm->list_lock);
                        list_add_tail(&(d_node->list), &mm->d_list);
                        spin_unlock(&mm->list_lock);
                        
                        yp_put_treenode(d_node->tree_p);

                        atomic_inc(&mm->d_num);
                } else {
                        yp_sets_dbmp(mm, d_node);
                }

                wb_num ++;
        }

        if (type == 2) {
               while (save_dbmp_last(yp_cd, mm->label_page, mm->dbmp_page)!=0) {
                       schedule_timeout(HZ);
               }
        }
        else
                save_dbmp(yp_cd, mm->label_page, mm->dbmp_page);
}

/*
 * mem_write - write data to yp_mem
 *
 * if there are data on the disk, read them firstly
 * then write new data to the sector of yp_mem
 *
 * when write WORK_RATE times, check if d_num > DIRTY_THRESHOLD
 */
static int mem_write(struct yp_mem *mm, unsigned long sector,
		unsigned long nsect, char * buffer, int * count) {
	struct tree_node * p;
	char * tmp_p;
	int i = 0, index, offset;
	struct dirty_list * list_p;
	int ret = 0;
        static int thread_flag = 0;
	
        if (thread_flag == 0)
                trigger_thread_wb(yp_wb);

	for (; i < nsect; i++) {
		index = (sector + i) >> 3;
		offset = (sector + i) % 8;

                PDEBUG("mem write to sector %lu , index is %d \n", sector + i, index);
		down_interruptible(mm->sem_tree);
		p = radix_tree_lookup(&mm->ypdisk_tree, index);
		if (p == NULL) {
			p = kmalloc(sizeof (struct tree_node), GFP_KERNEL);
			if (p == NULL) {
				printk(KERN_ERR "ypdisk: kmalloc tree_node failed \n");
				up(mm->sem_tree);
				return -ENOMEM;
			}

                        if ((ret = init_treenode(p, mm)) != 0) {
                                up(mm->sem_tree);
                                return ret;
                        }
			radix_tree_preload(GFP_KERNEL);
			if (radix_tree_insert(&mm->ypdisk_tree, index, p)) {
				printk(KERN_ERR "ypdisk: radix tree inser failed \n");
				__free_page(p->page_p);
				kfree(p);
				up(mm->sem_tree);
				return -ENOMEM;
			}
			radix_tree_preload_end();
			up(mm->sem_tree);

			if (yp_tests_dbmp(mm, index)) {
                                PDEBUG("for index %d , disk_bitmap is not 0 \n", index);

                                if ((ret = yp_get_treenode(p)) < 0) {
                                        return ret;
                                }
                                PDEBUG("disk read page is %d \n", index);
				ret = disk_read(yp_cd, p, index << 3);
                                if (ret == 0) 
				        p->page_bits = yp_gets_dbmp(mm, index);

                                yp_put_treenode(p);
				if (ret < 0) 
					return ret;
				
			}		

		}
		else 
			up(mm->sem_tree);

                if ((ret = yp_get_treenode(p)) < 0) {
                        return ret;
                }

		tmp_p = page_address(p->page_p);
		tmp_p += KERNEL_SECTOR_SIZE * offset;
		memcpy(tmp_p, buffer, KERNEL_SECTOR_SIZE);
                yp_set_pbmp(p, sector + i);
		buffer += KERNEL_SECTOR_SIZE;
		*count += KERNEL_SECTOR_SIZE;

		if (!is_treenode_dirty(p)){
			set_treenode_dirty(p);
			list_p = kmalloc(sizeof(struct dirty_list), GFP_KERNEL);
			INIT_LIST_HEAD(&(list_p->list));
			list_p->index = index;
			list_p->timestamp = jiffies;
			list_p->tree_p = p;

                        spin_lock(&mm->list_lock);
			list_add_tail(&(list_p->list), &mm->d_list);
                        spin_unlock(&mm->list_lock);

			atomic_inc(&mm->d_num);
			atomic_inc(&mm->w_rate);
		}

                yp_put_treenode(p);

		if (atomic_read(&mm->w_rate) >= WORK_RATE) {
			atomic_sub(WORK_RATE, &mm->w_rate);
			if (atomic_read(&mm->d_num) >= DIRTY_THRESHOLD) {
                                trigger_work_wb(yp_wb);
			}
		}
	}
	return 0;
}

/*
 * mem_read - read data from yp_mem
 *
 * if there are data on the disk, read them firstly
 */
static int mem_read(struct yp_mem *mm, unsigned long sector,
		unsigned long nsect, char * buffer, int * count) {
	struct tree_node * p;
	int i = 0, index, offset;
	char * tmp_p;
	int ret = 0;
	
	PDEBUG("Enter mem_read \n");
	for (; i < nsect; i++) {
		index = (sector + i) >> 3;
		offset = (sector + i) % 8;

		down_interruptible(mm->sem_tree);
		p=radix_tree_lookup(&mm->ypdisk_tree, index);
                PDEBUG("mem read from sector is %lu index is %d \n", sector + i, index);

		if (p == NULL && yp_tests_dbmp(mm, index)) {
                        PDEBUG("index is %d, test ret value is not 0 \n", index);
		        p = kmalloc(sizeof(struct tree_node), GFP_KERNEL);
			if (p == NULL) {
	        		printk(KERN_ERR "ypdisk: kmalloc tree_node failed \n");
	        		up(mm->sem_tree);
				return -ENOMEM;
			}
                        if ((ret = init_treenode(p, mm)) != 0) {
                                up(mm->sem_tree);
                                return ret;
                        }

			radix_tree_preload(GFP_KERNEL);
			if (radix_tree_insert(&mm->ypdisk_tree, index, p)) {
				printk(KERN_ERR "ypdisk: radix tree insert failed \n");
				__free_page(p->page_p);
				kfree(p);
				up(mm->sem_tree);
				return -ENOMEM;
			}
			radix_tree_preload_end();
			up(mm->sem_tree);

                        if ((ret = yp_get_treenode(p)) < 0) {
                                return ret;
                        }

                        PDEBUG("disk read page is %d \n", index);
			ret = disk_read(yp_cd, p, index << 3);
                        if (ret == 0)
		        	p->page_bits = yp_gets_dbmp(mm, index);

                        yp_put_treenode(p);

			if (ret < 0) {
				return ret;
			}
		} else 
			up(mm->sem_tree);
	
		if (p != NULL && yp_test_pbmp(p, sector + i)){
                        if ((ret = yp_get_treenode(p)) < 0) {
                                return ret;
                        }
                        
			tmp_p = page_address(p->page_p);
			tmp_p += KERNEL_SECTOR_SIZE * offset;
			memcpy(buffer, tmp_p, KERNEL_SECTOR_SIZE);

                        yp_put_treenode(p);
		} else {
			memset(buffer, 0, KERNEL_SECTOR_SIZE);
		}

		buffer += KERNEL_SECTOR_SIZE;
		*count += KERNEL_SECTOR_SIZE;
	}

        PDEBUG("In mem_read count is %d \n", *count);
	return 0;
}

/*
 * transfer_data - interface of yp_mem for yp_bi_exc
 */
static int transfer_data(struct yp_mem *mm, unsigned long sector,
		unsigned long nsect, char * buffer, int write, int *count) {
	unsigned long offset = sector * KERNEL_SECTOR_SIZE;
	unsigned long nbytes = nsect * KERNEL_SECTOR_SIZE;

	PDEBUG("Enter transfer_data \n");
	if((offset + nbytes) > mm->size) {
		printk(KERN_ERR "ypdisk: beyond size \n");
		return -1;
	}
	if(write)
		return mem_write(mm, sector, nsect, buffer, count);
	else
		return mem_read(mm, sector, nsect, buffer, count);
}

/* 
 * yp_mem_init - initiate yp_mem
 */
static int yp_mem_init(struct yp_mem * mm) {
	PDEBUG("Enter transfer_data \n");
	memset(mm, 0, sizeof(struct yp_mem));

	INIT_RADIX_TREE(&mm->ypdisk_tree, GFP_KERNEL);
	mm->sem_tree = kmalloc(sizeof(struct semaphore), GFP_KERNEL);
	if(mm->sem_tree == NULL) {
		printk(KERN_ERR "ypdisk: alloc tree_mem failed \n");
		goto out_free1;
	}
	init_MUTEX(mm->sem_tree);
	
	spin_lock_init(&mm->page_lock);
	init_waitqueue_head(&mm->page_queue);

	INIT_LIST_HEAD(&mm->d_list);
	spin_lock_init(&mm->list_lock);

	mm->label_page = alloc_page(GFP_KERNEL);
	if(!mm->label_page) {
		printk(KERN_ERR "ypdisk: alloc page for label failed \n");
		goto out_free2;
	}
	mm->label = page_address(mm->label_page);

	mm->dbmp_page = alloc_page(GFP_KERNEL);
	if(!mm->dbmp_page) {
		printk(KERN_ERR "ypdisk: alloc page for disk bitmap failed \n");
		goto out_free3;
	}
	mm->disk_bmp = page_address(mm->dbmp_page);

	create_dbmp(yp_cd, mm->label_page, mm->dbmp_page);

	atomic_set(&mm->d_num, 0);
	atomic_set(&mm->w_rate, 0);

	mm->size = KERNEL_SECTOR_SIZE * NSECTORS;

	return 0;

out_free3:
	__free_page(mm->label_page);
out_free2:
	kfree(mm->sem_tree);
out_free1:
	return -ENOMEM;
}

/*
 * yp_mem_destroy - destroy yp_mem
 *
 * Notic: when destroy, maybe some page is still in 
 *        transfering to disk
 */
static void yp_mem_destroy(struct yp_mem * mm) {
        struct tree_node * p; 
        int i = 0, n = NSECTORS / 8;

	PDEBUG("Enter yp_mem_destroy \n");

        write_dirty_pages(mm, 2);

        for (; i < n; i++) {
                if ((p = radix_tree_delete(&mm->ypdisk_tree, i)) != NULL) {
                        if (!is_treenode_locked(p)) {
                                if (p->page_p != NULL) {
                                        __free_page(p->page_p);
                                }
                        }
                        kfree(p);
                }
        }

	if (mm->dbmp_page)
		__free_page(mm->dbmp_page);
	if (mm->label_page)
		__free_page(mm->label_page);

	if (mm->sem_tree)
		kfree(mm->sem_tree);
}

/* 
 * block_device_operations 
 */
static int ypdisk_open(struct inode * inode, struct file * filp) {
	struct yp_bio_exc * bd = inode->i_bdev->bd_disk->private_data;
	PDEBUG("Enter yp_open \n");
	filp->private_data = bd;
	return 0;
}
static int ypdisk_release(struct inode * inode, struct file * filp) {
	PDEBUG("Enter yp_release \n");
	return 0;
}
static struct block_device_operations ypdisk_ops = {
	.owner = THIS_MODULE,
	.open = ypdisk_open,
	.release = ypdisk_release,
};

/*
 * ypdisk_make_request - ypdisk's interface to kernel
 */
static int ypdisk_make_request(request_queue_t *q, struct bio * bio) {
	struct yp_mem * mm = q->queuedata;
	int ret = 0, count=0;
	int i = 0;
	struct bio_vec * bvec;
	sector_t sector = bio->bi_sector;
	char * buffer = NULL;
	
	#ifdef YPDISK_DEBUG                                      
        int tmp_count = bio->bi_size;
	#endif

	PDEBUG("Enter ypdisk_make_request \n");
	bio_for_each_segment(bvec, bio, i) {
		buffer = __bio_kmap_atomic(bio, i, KM_USER0);
		ret = transfer_data(mm, sector, bio_cur_sectors(bio),
				buffer, bio_data_dir(bio)==WRITE, &count);
		sector += bio_cur_sectors(bio);
                bio->bi_idx ++;
		__bio_kunmap_atomic(bio, KM_USER0);

		if (ret<0)
			break;
	}
        /*
        if (bio->bi_size != count) {
                print_bio(bio);
        }
        */
        PDEBUG("In transfer_data: tmp_count is %d, bio->bi_size is %d, done is %d \n", 
                tmp_count, bio->bi_size, count);
	bio_endio(bio, count, ret);
	//bio_endio(bio, bio->bi_size, ret);
	return 0;
}

/*
 * yp_bio_exc_init - initiate yp_bio_exc
 */
static int yp_bio_exc_init(struct yp_bio_exc * be) {
	int ret = 0; 

	PDEBUG("Enter yp_bio_exc_init \n");
	memset(be, 0, sizeof (struct yp_bio_exc));

	be->queue = blk_alloc_queue(GFP_KERNEL);
	if (be->queue == NULL) {
		printk(KERN_ERR "alloc request queue fail \n");
		ret = -ENOMEM;
		goto out_free1;
	}
	blk_queue_make_request(be->queue, ypdisk_make_request);
	blk_queue_hardsect_size(be->queue, KERNEL_SECTOR_SIZE);
	be->queue->queuedata = yp_mm;

	be->gd = alloc_disk(YPDISK_MINORS);
	if (be->gd == NULL) {
		printk(KERN_ERR "alloc gendisk fail \n");
		ret = -ENOMEM;
		goto out_free2;
	}
	be->gd->major = ypdisk_major;
	be->gd->first_minor = 0;
	be->gd->fops = &ypdisk_ops;
	be->gd->queue = be->queue;
	be->gd->private_data = be;
	snprintf(be->gd->disk_name, 32, "ypdisk");
	set_capacity(be->gd, NSECTORS * (KERNEL_SECTOR_SIZE / KERNEL_SECTOR_SIZE));

	add_disk(be->gd);

	return 0;

out_free2:
	blk_put_queue(be->queue);
out_free1:
	return ret;
}

void yp_bio_exc_destroy(struct yp_bio_exc * be) {
	PDEBUG("Enter yp_bio_exc_destroy \n");
	if (be->gd)
		del_gendisk(be->gd);
	if (be->queue)
		blk_put_queue(be->queue);
}
		
/*
 * Clear 
 * by the sequence of yp_bio_exc, yp_writeback, yp_mem, yp_comm_disk
 */
void cleanup(struct yp_bio_exc * be, struct yp_writeback * wb,
	struct yp_mem * mm, struct yp_comm_disk * cd){

	PDEBUG("Enter clearup \n");

	if (be) {
		yp_bio_exc_destroy(be);
		kfree(be);
	}

	if (wb) {
		yp_writeback_destroy(wb);
		kfree(wb);
	}

	if (mm) {
		yp_mem_destroy(mm);
		kfree(mm);
	}
	if (cd) {
		yp_comm_disk_destroy(cd);
		kfree(cd);
	}
}

/*
 * Initiate this module 
 * by the sequence of yp_comm_disk, yp_mem, yp_writeback, yp_bio_exc
 */
static int __init ypdisk_init(void) {
	int ret = 0;

	printk(KERN_INFO "\n\n#####\nEnter ypdisk_init \n");
	ypdisk_major = register_blkdev(ypdisk_major, "ypdisk");
	if (ypdisk_major <= 0) {
		printk(KERN_ERR "ypdisk: unable to get major number. \n");
		return -EBUSY;
	}

	yp_cd = kmalloc(sizeof (struct yp_comm_disk), GFP_KERNEL);
	if (yp_cd == NULL) {
		ret = -ENOMEM;
		goto err_out;
	}
	ret = yp_comm_disk_init(yp_cd);
	if (ret < 0){
		goto err_out;
	}

	yp_mm = kmalloc(sizeof (struct yp_mem), GFP_KERNEL);
	if (yp_mm == NULL) {
		ret = -ENOMEM;
		goto err_out;
	}
	ret = yp_mem_init(yp_mm);
	if (ret < 0){
		goto err_out;
	}

	yp_wb = kmalloc(sizeof (struct yp_writeback), GFP_KERNEL);
	if (yp_wb == NULL) {
		ret = -ENOMEM;
		goto err_out;
	}
	ret = yp_writeback_init(yp_wb);
	if (ret < 0){
		goto err_out;
	}

	yp_be = kmalloc(sizeof (struct yp_bio_exc), GFP_KERNEL);
	if (yp_be == NULL) {
		ret = -ENOMEM;
		goto err_out;
	}
	ret = yp_bio_exc_init(yp_be);
	if (ret < 0){
		goto err_out;
	}

	return 0;

err_out:
	printk("Init ypdisk failed \n");
	cleanup(yp_be, yp_wb, yp_mm, yp_cd);
	unregister_blkdev(ypdisk_major, "ypdisk");

	return ret;
}

static void __exit ypdisk_exit(void) {
	cleanup(yp_be, yp_wb, yp_mm, yp_cd);
	unregister_blkdev(ypdisk_major, "ypdisk");
}

MODULE_LICENSE("GPL");

module_init(ypdisk_init);
module_exit(ypdisk_exit);

