/*
 * migratalbe loopback device
 * this code is heavily based on Linux source code drivers/block/loop.c
 */

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/file.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/kthread.h>

#include "loop.h"
#include "bitmap.h"

#define TRACE(fmt, args...) printk(KERN_DEBUG "sloop:%s:%d " fmt "\n", \
    __FUNCTION__, __LINE__, ##args)

#define inode_get_lo(inodep) ((inodep)->i_bdev->bd_disk->private_data)
#define inode_blocksize(inodep) (S_ISBLK((inodep)->i_mode) ? (inodep)->i_blksize : PAGE_SIZE)

MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);

static int max_loop = 8;
module_param(max_loop, int, 0);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");

enum {
	LO_UNBOUND,
	LO_BOUND,
	LO_RUNDOWN,
};

enum {
	LO_FLAGS_RO = 1,
};

struct loop_device {
	struct mutex mutex;
	int number;
	int refcnt;
	int state;
	int flags;
	unsigned blocksize;
	char filename[LO_NAME_SIZE];
	struct file* in_fp;
	struct block_device* bdev;

	spinlock_t lock;
	struct request_queue* queue;
	struct bio* bio;
	struct bio* biotail;
	gfp_t old_gfp_mask;

	struct bitmap* wlimit;
	u32 blocks;
	struct proc_dir_entry* proc_subdir;
	struct proc_dir_entry* proc_wlimit_c;

	struct bitmap* rlimit;
	struct bitmap* rblocked;
	struct proc_dir_entry* proc_rlimit_c;
	struct proc_dir_entry* proc_rblocked_c;

	struct bio* rblocked_bio;
	struct bio* rblocked_biotail;

	struct proc_dir_entry* proc_wlimit;
	struct proc_dir_entry* proc_rlimit;
	struct proc_dir_entry* proc_rblocked;
	struct proc_dir_entry* proc_rblocked_kick;

	wait_queue_head_t wait;
	wait_queue_head_t rb_wait;
	struct task_struct* thread;
};

static struct loop_device* loop_devs;
static struct gendisk** disks;
static struct proc_dir_entry* proc_loop_dir;

static loff_t get_loop_size(struct loop_device* lo, struct file* fp)
{
	loff_t loopsize;
	loopsize = i_size_read(fp->f_mapping->host);
	// FIXME: should implement sizelimit
	// FIXME: should implement offset
	return loopsize >> 9;
}

static void lo_add_bio(struct loop_device* lo, struct bio* bio)
{
	if (lo->biotail) {
		lo->biotail->bi_next = bio;
		lo->biotail = bio;
	}
	else {
		lo->bio = lo->biotail = bio;
	}
}

static struct bio* lo_get_bio(struct loop_device* lo)
{
	struct bio* bio;

	if ((bio = lo->bio)) {
		if (bio == lo->biotail)
			lo->biotail = NULL;
		lo->bio = bio->bi_next;
		bio->bi_next = NULL;
	}
	return bio;
}

static void lo_add_rblocked_bio(struct loop_device* lo, struct bio* bio)
{
	if (lo->rblocked_biotail) {
		lo->rblocked_biotail->bi_next = bio;
		lo->rblocked_biotail = bio;
	}
	else {
		lo->rblocked_bio = lo->rblocked_biotail = bio;
	}
}

static struct bio* lo_get_rblocked_bio(struct loop_device* lo)
{
	struct bio* bio;

	if ((bio = lo->rblocked_bio)) {
		if (bio == lo->rblocked_biotail)
			lo->rblocked_biotail = NULL;
		lo->rblocked_bio = bio->bi_next;
		bio->bi_next = NULL;
	}
	return bio;
}

static struct bio* lo_next_rblocked_bio(struct loop_device* lo, struct bio* bio, sector_t bi_sector)
{
	// find bio by bi_sector
	while (bio != NULL) {
		sector_t min = bio->bi_sector;
		sector_t max = min + (bio->bi_size >> 9);
		if (bi_sector >= min && bi_sector < max)
			break;

		bio = bio->bi_next;
	}
	return bio;
}

static void lo_remove_rblocked_bio(struct loop_device* lo, struct bio* obj)
{
	struct bio* p = lo->rblocked_bio;
	struct bio* prev_bio = NULL;
	
	// find bio by bi_sector
	while (p != NULL) {
		if (p == obj)
			break;

		prev_bio = p;
		p = p->bi_next;
	}

	// if found nothing
	if (!p)
		return;
	
	if (!prev_bio) // bio is head, set new head
		lo->rblocked_bio = p->bi_next;
	else           // not head, remove bio from linked list
		prev_bio->bi_next = p->bi_next;
	p->bi_next = NULL;
	
	// if bio is tail, set new tail
	if (p == lo->rblocked_biotail)
		lo->rblocked_biotail = prev_bio;
}

static int lo_make_request(struct request_queue* q, struct bio* bio)
{
	struct loop_device* lo = q->queuedata;
	int rw = bio_rw(bio);

	if (rw == READA)
		rw = READ;
	BUG_ON(!lo || (rw != READ && rw != WRITE));

	spin_lock_irq(&lo->lock);
	if (lo->state != LO_BOUND)
		goto out;
	if (unlikely(rw == WRITE && (lo->flags & LO_FLAGS_RO)))
		goto out;
	lo_add_bio(lo, bio);
	wake_up(&lo->wait);
	spin_unlock_irq(&lo->lock);
	return 0;

out:
	spin_unlock_irq(&lo->lock);
	bio_io_error(bio, bio->bi_size);
	return 0;
}

static void lo_unplug(struct request_queue* q)
{
	struct loop_device* lo = q->queuedata;
	clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
	blk_run_address_space(lo->in_fp->f_mapping);
}

struct lo_read_cookie {
	struct page* page;
	unsigned offset;
};

static int lo_read(struct page *raw_page, unsigned raw_off,
		   struct page *loop_page, unsigned loop_off, int size)
{
	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;

	memcpy(loop_buf, raw_buf, size);

	kunmap_atomic(raw_buf, KM_USER0);
	kunmap_atomic(loop_buf, KM_USER1);
	//cond_resched();
	return 0;
}

static int lo_read_actor(read_descriptor_t* desc, struct page* page, unsigned long offset, unsigned long size)
{
	unsigned long count = desc->count;
	struct lo_read_cookie* p = desc->arg.data;

	if (size > count)
		size = count;
	
	if (lo_read(page, offset, p->page, p->offset, size)) {
		size = 0;
		printk(KERN_ERR "sloop: error transfering block %lu\n", page->index);
		desc->error = -EINVAL;
	}

	flush_dcache_page(p->page);

	desc->count = count - size;
	desc->written += size;
	p->offset += size;
	return size;
}

static int do_lo_receive(struct file* fp, struct bio_vec* bvec, loff_t pos)
{
	struct lo_read_cookie cookie;
	int err;

	cookie.page = bvec->bv_page;
	cookie.offset = bvec->bv_offset;

	err = fp->f_op->sendfile(fp, &pos, bvec->bv_len, lo_read_actor, &cookie);
	return (err < 0)? err : 0;
}

static int lo_receive(struct file* fp, struct bio* bio, loff_t pos)
{
	struct bio_vec *bvec;
	int i, err = 0;

	bio_for_each_segment(bvec, bio, i) {
		err = do_lo_receive(fp, bvec, pos);
		if (err < 0)
			break;
		pos += bvec->bv_len;
	}
	return err;
}

static int lo_fop_write(struct file* fp, struct page* page, unsigned int poff, unsigned int plen, loff_t pos)
{
	ssize_t err;
	mm_segment_t old_fs = get_fs();
	u8 __user* buf = (u8 __user*) kmap(page) + poff;

	set_fs(get_ds());
	err = fp->f_op->write(fp, buf, plen, &pos);
	set_fs(old_fs);

	if (likely(err == plen))
		err = 0;
	else
		goto fail;

ret:
	kunmap(page);
	//cond_resched();
	return err;

fail:
       	printk(KERN_ERR "sloop: error writing at %llu, length %i.\n", pos, plen);
       	if (err >= 0)
	       	err = -EIO;
	goto ret;
}

static int lo_send(struct file* fp, struct bio* bio, loff_t pos)
{
	struct bio_vec *bvec;
	int i, err = 0;

	bio_for_each_segment(bvec, bio, i) {
		err = lo_fop_write(fp, bvec->bv_page, bvec->bv_offset, bvec->bv_len, pos);
		if (err < 0)
			break;
		pos += bvec->bv_len;
	}

	return err;
}

enum {
	REQ_SYNC_BLOCK,
	REQ_CHANGE_FD,
};

struct magic_request {
	int type;
	u32 bindex;
	struct page* page;
	struct file* fp;
	struct completion wait;
};

static void do_lo_sync_block(struct loop_device* lo, u32 bindex, struct page* page);

static void do_lo_switch(struct loop_device* lo, struct file* fp);

static inline void lo_handle_request(struct loop_device* lo, struct magic_request* req)
{
	if (req->type == REQ_SYNC_BLOCK) {
		do_lo_sync_block(lo, req->bindex, req->page);
	}
	else if (req->type == REQ_CHANGE_FD) {
		do_lo_switch(lo, req->fp);
	}
	complete(&req->wait);
}

static inline void lo_handle_bio(struct file* fp, struct bio* bio)
{
	int ret;
	loff_t pos;
	
	// FIXME: should support offset
	pos = ((loff_t) bio->bi_sector << 9);

	if (bio_rw(bio) == WRITE)
		ret = lo_send(fp, bio, pos);
	else
		ret = lo_receive(fp, bio, pos);
	bio_endio(bio, bio->bi_size, ret);
	cond_resched();
}

static int has_wlimit(struct loop_device* lo, struct bio* bio)
{
	// FIXME: assume blocksize = 4096
	if (bio_rw(bio) == WRITE) {
		u32 bindex = bio->bi_sector >> 3;
		u32 bindex_max = bindex + (bio->bi_size >> 12);


		spin_lock_irq(&lo->lock);

		for (; bindex < bindex_max; ++bindex)
			bm_setb(lo->wlimit, bindex, 1);

		spin_unlock_irq(&lo->lock);
	}
	return 0;
}

struct rblocked_cookie {
	unsigned short is_new;
	unsigned short blocked_vcnt;
	void* old_private;
};

static int has_rlimit(struct loop_device* lo, struct bio* bio)
{
	// FIXME: assume blocksize = 4096
	u32 bindex = bio->bi_sector >> 3;
	u32 bindex_max = bindex + (bio->bi_size >> 12);
	int num = 0;
	int rw = bio_rw(bio);


	spin_lock_irq(&lo->lock);
	for (; bindex < bindex_max; ++bindex) {
		if (0 == bm_getb(lo->rlimit, bindex))
			continue;

		if (rw == WRITE) {
			bm_setb(lo->rlimit, bindex, 0);
		}
		else { // READ/READA 
			bm_setb(lo->rblocked, bindex, 1);
			++num;
		}
	}
	if (num)
		lo_add_rblocked_bio(lo, bio);
	spin_unlock_irq(&lo->lock);

	if (num) {
		struct rblocked_cookie* cookie;
		cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
		memset(cookie, 0, sizeof(*cookie));
		cookie->is_new = 1;
		cookie->blocked_vcnt = num;
		cookie->old_private = bio->bi_private;
		bio->bi_private = cookie;

		wake_up_interruptible(&lo->rb_wait);
	}
	return num;
}

static inline int has_rw_limit(struct loop_device* lo, struct bio* bio)
{
	int ret = 0;

	if (unlikely(!bio->bi_bdev)) {
		lo_handle_request(lo, bio->bi_private);
		bio_put(bio);
		return 1;
	}

	if (lo->wlimit)
		ret = has_wlimit(lo, bio);

	if (lo->rlimit)
		ret = has_rlimit(lo, bio);

	return ret;
}

static int lo_thread(void* data)
{
	struct loop_device* lo = data;
	struct bio* bio;

	set_user_nice(current, -20);

	while(!kthread_should_stop() || lo->bio) {

		wait_event_interruptible(lo->wait, kthread_should_stop() || lo->bio);
		if (!lo->bio)
			continue;

		spin_lock_irq(&lo->lock);
		bio = lo_get_bio(lo);
		spin_unlock_irq(&lo->lock);

		BUG_ON(!bio);

		// implement r/w limit here
		if (!has_rw_limit(lo, bio))
			lo_handle_bio(lo->in_fp, bio);
	}
	return 0;
}

static int setup_lo_thread(struct loop_device* lo)
{
	lo->bio = lo->biotail = NULL;
	blk_queue_make_request(lo->queue, lo_make_request);
	lo->queue->queuedata = lo;
	lo->queue->unplug_fn = lo_unplug;

	lo->rblocked_bio = lo->rblocked_biotail = NULL;

	lo->thread = kthread_create(lo_thread, lo, "sloop%d", lo->number);
	if (IS_ERR(lo->thread))
		return PTR_ERR(lo->thread);

	lo->state = LO_BOUND;
	wake_up_process(lo->thread);
	return 0;
}

static int lo_set_fd(struct loop_device* lo, struct file* lo_fp, struct block_device* bdev, unsigned int in_fd)
{
	struct file* in_fp;
	struct address_space* in_mapping;
	struct inode* in_inode;
	int lo_flags = 0;
	unsigned lo_blocksize;
	loff_t lo_size;
	int err;

	__module_get(THIS_MODULE);

	err = -EBADF;
	in_fp = fget(in_fd);
	if (!in_fp)
		goto out0;
	
	err = -EBUSY;
	if (lo->state != LO_UNBOUND)
		goto out1;
	
	// FIXME: should avoid recursion here
	
	// set RO
	in_mapping = in_fp->f_mapping;
	in_inode = in_mapping->host;

	if (!(lo_fp->f_mode & FMODE_WRITE))
		lo_flags |= LO_FLAGS_RO;

	if (!(in_fp->f_mode & FMODE_WRITE))
		lo_flags |= LO_FLAGS_RO;

	err = -EINVAL;
	if (!S_ISREG(in_inode->i_mode) && !S_ISBLK(in_inode->i_mode))
		goto out1;
	
	if (!in_fp->f_op->sendfile) // we must be able to read
		goto out1;
	if (!in_fp->f_op->write)
		lo_flags |= LO_FLAGS_RO;

	set_device_ro(bdev, (lo_flags & LO_FLAGS_RO) != 0);

	// set size
	lo_blocksize = inode_blocksize(in_inode);
	printk(KERN_INFO "sloop: blocksize = %d\n", lo_blocksize);
	lo_size = get_loop_size(lo, in_fp);

	err = -EFBIG;
	if ((loff_t)(sector_t)lo_size != lo_size)
		goto out1;
	lo->blocksize = lo_blocksize;
	lo->bdev = bdev;
	lo->flags = lo_flags;
	lo->in_fp = in_fp;
	lo->old_gfp_mask = mapping_gfp_mask(in_mapping);
	mapping_set_gfp_mask(in_mapping, lo->old_gfp_mask & ~ (__GFP_IO|__GFP_FS));
	
	set_capacity(disks[lo->number], lo_size);
	bd_set_size(bdev, lo_size << 9);
	set_blocksize(bdev, lo_blocksize);

	lo->blocks = lo_size >> 3; // FIXME: currently assumes BLOCKSIZE = 4096

	// FIXME: should implement sizelimit
	err = setup_lo_thread(lo);
	if (err < 0)
		goto out2;

	return 0;
	
out2:
	lo->state = LO_UNBOUND;
	lo->thread = NULL;
	lo->in_fp = NULL;
	lo->flags = 0;
	set_capacity(disks[lo->number], 0);
	invalidate_bdev(bdev, 0);
	bd_set_size(bdev, 0);
	mapping_set_gfp_mask(in_mapping, lo->old_gfp_mask);
out1:
	fput(in_fp);
out0:
	module_put(THIS_MODULE);	
	return err;
}

static int lo_set_status64(struct loop_device* lo, struct loop_info64 __user* arg)
{
	struct loop_info64 info;

	if (copy_from_user(&info, arg, sizeof(info)))
		return -EFAULT;
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (info.lo_offset || info.lo_sizelimit || info.lo_encrypt_key_size)
		return -EINVAL;

	// FIXME: should support offset & sizelimit

	memcpy(lo->filename, info.lo_file_name, LO_NAME_SIZE);
	lo->filename[LO_NAME_SIZE-1] = 0;
	return 0;
}

static int lo_get_status64(struct loop_device* lo, struct loop_info64 __user* arg)
{
	struct loop_info64 info;
	struct kstat stat;
	int err;

	if (lo->state != LO_BOUND)
		return -ENXIO;

	err = vfs_getattr(lo->in_fp->f_vfsmnt, lo->in_fp->f_dentry, &stat);
	if (err)
		return err;

	memset(&info, 0, sizeof(info));

	info.lo_number  = lo->number;
	info.lo_flags   = lo->flags;
	info.lo_device  = huge_encode_dev(stat.dev);
	info.lo_inode   = stat.ino;
	info.lo_rdevice = huge_encode_dev(lo->bdev ? stat.rdev : stat.dev);
	memcpy(info.lo_file_name, lo->filename, LO_NAME_SIZE);

	if (copy_to_user(arg, &info, sizeof(info)))
		return -EFAULT;
	return 0;
}

static int lo_wlimit_start(struct loop_device* lo)
{
	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (lo->wlimit)
		return -EBUSY;

	lo->wlimit = bm_alloc(lo->blocks);
	if (!lo->wlimit)
		return -ENOMEM;
	
	return 0;
}

static int lo_wlimit_stop(struct loop_device* lo, struct bitmap_save __user* arg)
{
	u32* buf32;
	int enc_size;
	struct bitmap_save save;

	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (!lo->wlimit)
		return -ENXIO;

	enc_size = bm_enc_size(lo->wlimit);

	if (copy_from_user(&save, arg, sizeof(save)))
		return -EFAULT;

	if (save.buf_size == 0 && save.bitmap_size == 0) {
		save.bitmap_size = lo->wlimit->size;
		save.buf_size = enc_size * 4;
		if (copy_to_user(arg, &save, sizeof(save)))
			return -EFAULT;
		return -EAGAIN;
	}

	if (!enc_size)
		goto out;

	buf32 = kmalloc(4 * enc_size, GFP_KERNEL);
	bm_enc_buf(lo->wlimit, buf32, enc_size);
	if (copy_to_user(save.buf, buf32, save.buf_size)) {
		kfree(buf32);
		return -EFAULT;
	}

	kfree(buf32);
out:
	bm_free(lo->wlimit);
	lo->wlimit = 0;

	return 0;
}

static int lo_rlimit_start(struct loop_device* lo, struct bitmap_save __user* arg)
{
	struct bitmap_save save;
	u32* buf32 = 0;
	int enc_size;
	int err;

	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (lo->rlimit)
		return -EBUSY;
	if (copy_from_user(&save, arg, sizeof(save)))
		return -EFAULT;
	if (lo->blocks != save.bitmap_size)
		return -EINVAL;

	buf32 = kmalloc(save.buf_size, GFP_KERNEL);

	err = -EFAULT;
	if (copy_from_user(buf32, save.buf, save.buf_size))
		goto out;

	lo->rlimit = bm_alloc(lo->blocks);
	lo->rblocked = bm_alloc(lo->blocks);

	err = -ENOMEM;
	if (!lo->rlimit || !lo->rblocked)
		goto out;

	enc_size = save.buf_size / 4;
	bm_dec_buf(lo->rlimit, buf32, enc_size);

	kfree(buf32);
	return 0;

out:
	if (buf32)
		kfree(buf32);
	if (lo->rlimit)
		bm_free(lo->rlimit);
	if (lo->rblocked)
		bm_free(lo->rblocked);
	lo->rlimit = 0;
	lo->rblocked = 0;

	return err;
}

static int lo_rlimit_stop(struct loop_device* lo)
{
	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (!lo->rlimit || !lo->rblocked)
		return -ENXIO;

	bm_free(lo->rlimit);
	bm_free(lo->rblocked);
	lo->rlimit = 0;
	lo->rblocked = 0;
	wake_up_interruptible(&lo->rb_wait);
	return 0;
}

static int lo_clr_fd(struct loop_device* lo, struct block_device* bdev)
{
	struct file* fp = lo->in_fp;
	gfp_t gfp_mask = lo->old_gfp_mask;

	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (lo->refcnt > 1)
		return -EBUSY;
	if (fp == NULL)
		return -EINVAL;

	spin_lock_irq(&lo->lock);
	lo->state = LO_RUNDOWN;
	spin_unlock_irq(&lo->lock);

	kthread_stop(lo->thread);

	lo->flags = 0;
	lo->in_fp = NULL;
	lo->bdev = NULL;
	memset(lo->filename, 0, LO_NAME_SIZE);

	invalidate_bdev(bdev, 0);
	set_capacity(disks[lo->number], 0);
	bd_set_size(bdev, 0);
	mapping_set_gfp_mask(fp->f_mapping, gfp_mask);
	lo->state = LO_UNBOUND;

	if (lo->wlimit)
		bm_free(lo->wlimit);
	lo->wlimit = 0;

	if (lo->rlimit)
		bm_free(lo->rlimit);
	lo->rlimit = 0;

	if (lo->rblocked)
		bm_free(lo->rblocked);
	lo->rblocked = 0;
	wake_up_interruptible(&lo->rb_wait);

	fput(fp);
	module_put(THIS_MODULE);

	return 0;
}

static void lo_fill_reader(struct loop_device* lo, struct bio* bio, u32 bindex, struct page* page)
{
	struct bio_vec *bvec;
        int i, err = 0;
	struct rblocked_cookie* cookie = bio->bi_private;
	loff_t pos;

	// FIXME: only support blocksize 4096

	pos = bio->bi_sector << 9;
	// read unblocked bvec
	if (cookie->is_new && bio->bi_vcnt > cookie->blocked_vcnt) {
		cookie->is_new = 0;
		err = lo_receive(lo->in_fp, bio, pos);
		if (err < 0)
			goto endio;
	}

	pos = bio->bi_sector << 9;
	// fill this page into the bvec->bv_page
	bio_for_each_segment(bvec, bio, i) {
		if (pos >> 12 != bindex) {
			pos += bvec->bv_len;
			continue;
		}

		err = lo_read(page, 0, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
		if (err < 0)
			goto endio;

		flush_dcache_page(bvec->bv_page);
	}
	--cookie->blocked_vcnt;

	// if not all blocked bvec are filled
	if (cookie->blocked_vcnt != 0)
		return;

endio:
	bio->bi_private = cookie->old_private;
	kfree(cookie);
	lo_remove_rblocked_bio(lo, bio);
	if (err < 0)
		bio_io_error(bio, bio->bi_size);
	else
		bio_endio(bio, bio->bi_size, 0);
	cond_resched();
}

static void lo_fill_readers(struct loop_device* lo, u32 bindex, struct page* page)
{
	struct bio* bio;
	struct bio* next_bio;
	sector_t sect = bindex << 3;

	spin_lock_irq(&lo->lock);
	bm_setb(lo->rblocked, bindex, 0);
	spin_unlock_irq(&lo->lock);

	bio = lo->rblocked_bio;
	for (;;) {
		spin_lock_irq(&lo->lock);
		bio = lo_next_rblocked_bio(lo, bio, sect);
		spin_unlock_irq(&lo->lock);
		
		if (!bio)
			break;

		next_bio = bio->bi_next;
		lo_fill_reader(lo, bio, bindex, page);
		bio = next_bio;
	}
}

static int lo_request(struct loop_device *lo, struct magic_request* w)
{
	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
	if (!bio)
		return -ENOMEM;
	init_completion(&w->wait);
	bio->bi_private = w;
	bio->bi_bdev = NULL;
	lo_make_request(lo->queue, bio);
	wait_for_completion(&w->wait);
	return 0;
}

static int lo_request_sync(struct loop_device *lo, u32 bindex, struct page* page)
{
	struct magic_request w = {
		.type   = REQ_SYNC_BLOCK,
		.bindex = bindex,
		.page   = page,
	};
	return lo_request(lo, &w);
}

static int lo_request_switch(struct loop_device *lo, struct file* fp)
{
	struct magic_request w = {
		.type   = REQ_CHANGE_FD,
		.fp     = fp,
	};
	return lo_request(lo, &w);
}

static void do_lo_sync_block(struct loop_device* lo, u32 bindex, struct page* page)
{
	if (bm_getb(lo->rblocked, bindex)) {
		// copy block to reader directly
		lo_fill_readers(lo, bindex, page);
	}

	// sync to disk
	if (bm_getb(lo->rlimit, bindex) == 1) {
		loff_t pos = bindex << 12;

		int err = lo_fop_write(lo->in_fp, page, 0, 4096, pos);
		if (err)
			printk(KERN_INFO "sloop: block sync failed at pos %llu, size 4096", pos);

		spin_lock_irq(&lo->lock);
		bm_setb(lo->rlimit, bindex, 0);
		spin_unlock_irq(&lo->lock);
	}
}

static int lo_sync_block(struct loop_device* lo, struct loop_block __user* arg)
{
	int err = 0;
	struct loop_block lb;
	struct page* page;

	if (lo->state != LO_BOUND)
		return -ENXIO;
	if (!lo->rlimit)
		return -ENXIO;
	if (copy_from_user(&lb, arg, sizeof(lb)))
		return -EFAULT;

	page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
	if (unlikely(!page))
		return -ENOMEM;

	err = -EFAULT;
	if (copy_from_user(kmap(page), lb.buf, 4096))
		goto free;
	
	err = lo_request_sync(lo, lb.index, page);

free:
	kunmap(page);
	__free_page(page);
	return err;
}

static void do_lo_switch(struct loop_device* lo, struct file* fp)
{
	struct file *old_fp = lo->in_fp;
	struct address_space *mapping = fp->f_mapping;

	mapping_set_gfp_mask(old_fp->f_mapping, lo->old_gfp_mask);
	lo->in_fp = fp;
	lo->blocksize = inode_blocksize(mapping->host);
	lo->old_gfp_mask = mapping_gfp_mask(mapping);
	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
}

static int lo_change_fd(struct loop_device* lo, struct file* lo_file,
		       struct block_device* bdev, unsigned int arg)
{
	struct file	*fp, *old_fp;
	struct inode	*inode;
	int		err;

	err = -ENXIO;
	if (lo->state != LO_BOUND)
		goto out;

	err = -EBADF;
	fp = fget(arg);
	if (!fp)
		goto out;

	inode = fp->f_mapping->host;
	old_fp = lo->in_fp;

	err = -EINVAL;

	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
		goto out_putf;

	/* new backing store needs to support loop (eg sendfile) */
	if (!inode->i_fop->sendfile)
		goto out_putf;

	/* size of the new backing store needs to be the same */
	if (get_loop_size(lo, fp) != get_loop_size(lo, old_fp))
		goto out_putf;

	err = -EBUSY;
	if (!(lo->flags & LO_FLAGS_RO)) {
		if (!lo->wlimit || lo->rlimit)
			goto out_putf;

		err = -ENOMEM;
		lo->rblocked = bm_alloc(lo->blocks);
		if (!lo->rblocked)
			goto out_putf;

		lo->rlimit = lo->wlimit;
		lo->wlimit = 0;
	}

	/* and ... switch */
	err = lo_request_switch(lo, fp);
	if (err)
		goto out_putf;

	fput(old_fp);
	return 0;

out_putf:
	fput(fp);
out:
	return err;
}

static int lo_ioctl(struct inode *inodep, struct file* filep, unsigned int cmd, unsigned long arg)
{
	struct loop_device *lo = inode_get_lo(inodep);
	int err;

	mutex_lock(&lo->mutex);
	switch (cmd) {
	  case LOOP_SET_FD:
	  	err = lo_set_fd(lo, filep, inodep->i_bdev, arg);
		break;

	  case LOOP_CHANGE_FD:
	  	err = lo_change_fd(lo, filep, inodep->i_bdev, arg);
		break;

	  case LOOP_SET_STATUS64:
	  	err = lo_set_status64(lo, (struct loop_info64 __user*)arg);
		break;

	  case LOOP_GET_STATUS64:
	  	err = lo_get_status64(lo, (struct loop_info64 __user*)arg);
		break;

	  case LOOP_GET_STATUS: // FIXME: workaround for losetup -f
	  	if (lo->state == LO_UNBOUND)
	  		err = -ENXIO;
		else
			err = 0;
		break;

	  case LOOP_CLR_FD:
	  	err = lo_clr_fd(lo, inodep->i_bdev);
		break;

	  case LOOP_WLIMIT_START:
	  	err = lo_wlimit_start(lo);
		break;

	  case LOOP_WLIMIT_STOP:
	  	err = lo_wlimit_stop(lo, (struct bitmap_save __user*)arg);
		break;


	  case LOOP_RLIMIT_START:
	  	err = lo_rlimit_start(lo, (struct bitmap_save __user*)arg);
		break;

	  case LOOP_RLIMIT_STOP:
	  	err = lo_rlimit_stop(lo);
		break;

	  case LOOP_SYNC_BLOCK:
	  	err = lo_sync_block(lo, (struct loop_block __user*)arg);
		break;

	  default:
	  	err = -EINVAL;
	}
	mutex_unlock(&lo->mutex);

	return err;
}

static int lo_open(struct inode *inodep, struct file* filep)
{
	struct loop_device* lo = inode_get_lo(inodep);

	mutex_lock(&lo->mutex);
	++lo->refcnt;
	mutex_unlock(&lo->mutex);

	return 0;
}

static int lo_release(struct inode *inodep, struct file* filep)
{
	struct loop_device* lo = inode_get_lo(inodep);

	mutex_lock(&lo->mutex);
	--lo->refcnt;
	mutex_unlock(&lo->mutex);

	return 0;
}

static struct block_device_operations lo_fops = {
	.owner    = THIS_MODULE,
	.open     = lo_open,
	.release  = lo_release,
	.ioctl    = lo_ioctl,
};

static int proc_count_bitmap(char *page, char **start,
                             off_t off, int count,
                             int *eof, void *data)
{
	struct bitmap* bm = * (struct bitmap**)data;

	*eof = 1;

	if (bm)
		return sprintf(page, "%u\n", bm_count(bm));

	return sprintf(page, "\n");
}

static int proc_kick_rblocked(char *page, char **start,
                             off_t off, int count,
                             int *eof, void *data)
{
	struct loop_device* lo = data;
	char *p = page;
	struct rblocked_cookie* cookie;
	struct bio* bio = lo_get_rblocked_bio(lo);

	if (!lo->rblocked) {
		*eof = 1;
		return 0;
	}
	
	mutex_lock(&lo->mutex);
	while (bio && (p - page) < 4082) {
		p += sprintf(p, "%llu\n", bio->bi_sector >> 3);

		cookie = bio->bi_private;
		bio->bi_private = cookie->old_private;
		kfree(cookie);

		bio_io_error(bio, bio->bi_size);

		bio = lo_get_rblocked_bio(lo);
	}
	lo->rblocked_bio = bio;
	mutex_unlock(&lo->mutex);

	if ((p - page) == 0)
		*eof = 1;

	return p - page;
}

static int proc_list_rblocked(char *page, char **start,
                             off_t off, int count,
                             int *eof, void *data)
{
	struct loop_device* lo = data;
	char *p = page;
	u32 pos;

	int ret = wait_event_interruptible(lo->rb_wait, (!lo->rblocked || bm_count(lo->rblocked) != 0));
	if (ret)
		return -EINTR;
	if (!lo->rblocked)
		return -EIO;

	pos = 0;
	mutex_lock(&lo->mutex);
	while (bm_nextb(lo->rblocked, &pos) && (p - page) < 4082) {
		p += sprintf(p, "%u\n", pos);
		++pos;
	}
	mutex_unlock(&lo->mutex);

	if ((p - page) == 0)
		*eof = 1;

	return p - page;
}

static int proc_list_bitmap(char *page, char **start,
                             off_t off, int count,
                             int *eof, void *data)
{
	struct bitmap* bm = * (struct bitmap**)data;
	char *p = page;
	u32 pos;

	if (!bm) {
		*eof = 1;
		return 0;
	}

	pos = 0;
	while (bm_nextb(bm, &pos) && (p - page) < 4082) {
		p += sprintf(p, "%u\n", pos);
		++pos;
	}

	if ((p - page) == 0)
		*eof = 1;

	return p - page;
}

static int proc_sync_block(struct file* fp, const char __user* buf,
		unsigned long count, void* data)
{
	struct loop_device* lo = data;
	struct loop_block __user* lb = (struct loop_block __user*) buf;
	return lo_sync_block(lo, lb);
	return count;
}

static int __init loop_init(void)
{
	int i;

	if (max_loop < 1 || max_loop > 256) {
		printk(KERN_WARNING "sloop: invalid max_loop (%d), use 8 instead\n", max_loop);
		max_loop = 8;
	}
	printk(KERN_INFO "sloop: max_loop: %d\n", max_loop);

	if (register_blkdev(LOOP_MAJOR, "loop"))
		return -EIO;
	
	loop_devs = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
	if (!loop_devs)
		goto out_mem0;
	memset(loop_devs, 0, max_loop * sizeof(struct loop_device));
	
	disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL);
	if (!disks)
		goto out_mem1;
	
	for (i = 0; i < max_loop; ++i) {
		disks[i] = alloc_disk(1);
		if (!disks[i])
			goto out_mem2;
	}

	for (i = 0; i < max_loop; ++i) {
		struct loop_device* lo = &loop_devs[i];
		struct gendisk* disk = disks[i];
		
		disk->major = LOOP_MAJOR;
		disk->first_minor = i;
		disk->fops = &lo_fops;
		sprintf(disk->disk_name, "loop%d", i);

		disk->private_data = lo;
		lo->number = i;
		mutex_init(&lo->mutex);

		lo->queue = blk_alloc_queue(GFP_KERNEL);
		if (!lo->queue)
			goto out_mem3;
		disk->queue = lo->queue;
		spin_lock_init(&lo->lock);

		init_waitqueue_head(&lo->wait);
		init_waitqueue_head(&lo->rb_wait);
	}

	proc_loop_dir = proc_mkdir("sloop", NULL);
	if (!proc_loop_dir) {
		printk(KERN_ERR "sloop: proc_mkdir sloop failed\n");
		goto out_mem3;
	}
	for (i = 0; i < max_loop; ++i)
		add_disk(disks[i]);

	proc_loop_dir->owner = THIS_MODULE;

	for (i = 0; i < max_loop; ++i) {
		struct loop_device* lo = &loop_devs[i];

		lo->proc_subdir = proc_mkdir(disks[i]->disk_name, proc_loop_dir);
		lo->proc_wlimit_c = create_proc_read_entry("wlimit_count", 0444, lo->proc_subdir, proc_count_bitmap, &lo->wlimit);
		lo->proc_rlimit_c = create_proc_read_entry("rlimit_count", 0444, lo->proc_subdir, proc_count_bitmap, &lo->rlimit);
		lo->proc_rblocked_c = create_proc_read_entry("rblocked_count", 0444, lo->proc_subdir, proc_count_bitmap, &lo->rblocked);
		lo->proc_wlimit = create_proc_read_entry("wlimit", 0444, lo->proc_subdir, proc_list_bitmap, &lo->wlimit);
		lo->proc_rlimit = create_proc_read_entry("rlimit", 0444, lo->proc_subdir, proc_list_bitmap, &lo->rlimit);

		// special entries
		lo->proc_rblocked_kick = create_proc_read_entry("rblocked_kick", 0444, lo->proc_subdir, proc_kick_rblocked, lo);
		lo->proc_rblocked = create_proc_read_entry("rblocked", 0544, lo->proc_subdir, proc_list_rblocked, lo);
		if (lo->proc_rblocked)
			lo->proc_rblocked->write_proc = proc_sync_block;
	}

	return 0;

out_mem3:
	printk(KERN_ERR "sloop: free %d blk_queue\n", i);
	while (--i > 0)
		blk_cleanup_queue(loop_devs[i].queue);
	i = max_loop;
out_mem2:
	printk(KERN_ERR "sloop: free %d disks\n", i);
	while (--i > 0)
		put_disk(disks[i]);
	kfree(disks);
out_mem1:
	printk(KERN_ERR "sloop: free %d loop_devs\n", max_loop);
	kfree(loop_devs);
out_mem0:
	printk(KERN_ERR "sloop: unregister blkdev\n");
	unregister_blkdev(LOOP_MAJOR, "loop");
	printk(KERN_ERR "sloop: out of memory\n");
	return -ENOMEM;
}

static void loop_exit(void)
{
	int i;

	for (i = 0; i < max_loop; ++i) {
		remove_proc_entry("wlimit", loop_devs[i].proc_subdir);
		remove_proc_entry("rlimit", loop_devs[i].proc_subdir);
		remove_proc_entry("rblocked", loop_devs[i].proc_subdir);
		remove_proc_entry("rsynced", loop_devs[i].proc_subdir);
		remove_proc_entry("wlimit_count", loop_devs[i].proc_subdir);
		remove_proc_entry("rlimit_count", loop_devs[i].proc_subdir);
		remove_proc_entry("rblocked_count", loop_devs[i].proc_subdir);
		remove_proc_entry("rsynced_count", loop_devs[i].proc_subdir);
		remove_proc_entry("rblocked_kick", loop_devs[i].proc_subdir);
		remove_proc_entry(disks[i]->disk_name, proc_loop_dir);
		del_gendisk(disks[i]);
		blk_cleanup_queue(loop_devs[i].queue);
		put_disk(disks[i]);
	}

	remove_proc_entry("sloop", NULL);

	if (unregister_blkdev(LOOP_MAJOR, "loop"))
		printk(KERN_WARNING "sloop: cannot unregister blkdev\n");
}

module_init(loop_init);
module_exit(loop_exit);

MODULE_LICENSE("GPL");
