#include <linux/fs2.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/highmem.h>

inline void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
	bh->b_end_io = handler;
	bh->b_private = private;
}

static int __block_prepare_write(struct inode *inode, struct page *page,
		unsigned from, unsigned to, get_block_t *get_block)
{
	unsigned block_start, block_end;
	sector_t block;
	int err = 0;
	unsigned blocksize, bbits;
	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;

	// BUG_ON(!PageLocked(page));
	// BUG_ON(from > PAGE_CACHE_SIZE);
	// BUG_ON(to > PAGE_CACHE_SIZE);
	// BUG_ON(from > to);

	blocksize = 1 << inode->i_blkbits;
	// if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);
	head = page_buffers(page);

	bbits = inode->i_blkbits;
	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);

	for(bh = head, block_start = 0; bh != head || !block_start;
	    block++, block_start=block_end, bh = bh->b_this_page) {
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			// if (PageUptodate(page)) {
			// 	if (!buffer_uptodate(bh))
			// 		set_buffer_uptodate(bh);
			// }
			continue;
		}
		if (buffer_new(bh))
			clear_buffer_new(bh);
		if (!buffer_mapped(bh)) {
			// WARN_ON(bh->b_size != blocksize);
			err = get_block(inode, block, bh, 1);
			if (err)
				break;
			if (buffer_new(bh)) {
				// unmap_underlying_metadata(bh->b_bdev,
				// 			bh->b_blocknr);
				// if (PageUptodate(page)) {
				// 	clear_buffer_new(bh);
				// 	set_buffer_uptodate(bh);
				// 	mark_buffer_dirty(bh);
				// 	continue;
				// }
				if (block_end > to || block_start < from)
					zero_user_segments(page,
						to, block_end,
						block_start, from);
				continue;
			}
		}
		// if (PageUptodate(page)) {
		// 	if (!buffer_uptodate(bh))
		// 		set_buffer_uptodate(bh);
		// 	continue; 
		// }
		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
		    !buffer_unwritten(bh) &&
		     (block_start < from || block_end > to)) {
			// ll_rw_block(READ, 1, &bh);
			*wait_bh++=bh;
		}
	}
	/*
	 * If we issued read requests - let them complete.
	 */
	while(wait_bh > wait) {
		// wait_on_buffer(*--wait_bh);
		if (!buffer_uptodate(*wait_bh))
			err = -EIO;
	}
	// if (unlikely(err))
		// page_zero_new_buffers(page, from, to);
	return err;
}

static int __block_commit_write(struct inode *inode, struct page *page,
		unsigned from, unsigned to)
{
	unsigned block_start, block_end;
	int partial = 0;
	unsigned blocksize;
	struct buffer_head *bh, *head;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	blocksize = 1 << inode->i_blkbits;

	for(bh = head = page_buffers(page), block_start = 0;
	    bh != head || !block_start;
	    block_start=block_end, bh = bh->b_this_page) {
	
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		block_end = block_start + blocksize;
		if (block_end <= from || block_start >= to) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			if (!buffer_uptodate(bh))
				partial = 1;
		} else {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			set_buffer_uptodate(bh);
			// mark_buffer_dirty(bh);
		}
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		clear_buffer_new(bh);
	}

	/*
	 * If this is a partial write which happened to make all buffers
	 * uptodate then we can optimize away a bogus readpage() for
	 * the next read(). Here we 'discover' whether the page went
	 * uptodate as a result of this (potentially partial) write.
	 */
	// if (!partial)
	// 	SetPageUptodate(page);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 0;
}

int block_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata,
			get_block_t *get_block)
{
	struct inode *inode = mapping->host;
	int status = 0;
	struct page *page;
	pgoff_t index;
	unsigned start, end;
	int ownpage = 0;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	index = pos >> PAGE_CACHE_SHIFT;
	start = pos & (PAGE_CACHE_SIZE - 1);
	end = start + len;

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	page = *pagep;
	if (page == NULL) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		ownpage = 1;
		page = grab_cache_page_write_begin(mapping, index, flags);
		if (!page) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			status = -ENOMEM;
			goto out;
		}
		*pagep = page;
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	// } else
	// 	BUG_ON(!PageLocked(page));

	// status = __block_prepare_write(inode, page, start, end, get_block);
	// if ((status)) {
	// 	ClearPageUptodate(page);

	// 	if (ownpage) {
	// 		unlock_page(page);
	// 		page_cache_release(page);
	// 		*pagep = NULL;

	// 		/*
	// 		 * prepare_write() may have instantiated a few blocks
	// 		 * outside i_size.  Trim these off again. Don't need
	// 		 * i_size_read because we hold i_mutex.
	// 		 */
	// 		if (pos + len > inode->i_size)
	// 			vmtruncate(inode, inode->i_size);
	// 	}
	// }

out:
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return status;
}

int block_write_end(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = mapping->host;
	unsigned start;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	start = pos & (PAGE_CACHE_SIZE - 1);

	// if (unlikely(copied < len)) {
	// 	/*
	// 	 * The buffers that were written will now be uptodate, so we
	// 	 * don't have to worry about a readpage reading them and
	// 	 * overwriting a partial write. However if we have encountered
	// 	 * a short write and only partially written into a buffer, it
	// 	 * will not be marked uptodate, so a readpage might come in and
	// 	 * destroy our partial write.
	// 	 *
	// 	 * Do the simplest thing, and just treat any short write to a
	// 	 * non uptodate page as a zero-length write, and force the
	// 	 * caller to redo the whole thing.
	// 	 */
	// 	if (!PageUptodate(page))
	// 		copied = 0;

	// 	page_zero_new_buffers(page, start+copied, start+len);
	// }
	flush_dcache_page(page);

	/* This could be a short (even 0-length) commit */
	// __block_commit_write(inode, page, start, start+copied);

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return copied;
}

int generic_write_end(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = mapping->host;
	int i_size_changed = 0;

	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

	/*
	 * No need to use i_size_read() here, the i_size
	 * cannot change under us because we hold i_mutex.
	 *
	 * But it's important to update i_size while still holding page lock:
	 * page writeout could otherwise come in and zero beyond i_size.
	 */
	if (pos+copied > inode->i_size) {
		i_size_write(inode, pos+copied);
		i_size_changed = 1;
	}

	// unlock_page(page);
	// page_cache_release(page);

	/*
	 * Don't mark the inode dirty under page lock. First, it unnecessarily
	 * makes the holding time of page lock longer. Second, it forces lock
	 * ordering of page lock and transaction start for journaling
	 * filesystems.
	 */
	// if (i_size_changed)
		// mark_inode_dirty(inode);

	return copied;
}

#if 0
#define BH_LRU_SIZE	8

struct bh_lru {
	struct buffer_head *bhs[BH_LRU_SIZE];
};

// static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
static struct bh_lru bh_lrus = {{ NULL }};

#define bh_lru_lock()	preempt_disable()
#define bh_lru_unlock()	preempt_enable()

static inline void check_irqs_on(void)
{
#ifdef irqs_disabled
	BUG_ON(irqs_disabled());
#endif
}

static struct buffer_head *lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
	struct buffer_head *ret = NULL;
	struct bh_lru *lru;
	unsigned int i;

	check_irqs_on();
	bh_lru_lock();
	// lru = &__get_cpu_var(bh_lrus);
	lru = &(bh_lrus);
	for (i = 0; i < BH_LRU_SIZE; i++) {
		struct buffer_head *bh = lru->bhs[i];

		if (bh && bh->b_bdev == bdev &&
				bh->b_blocknr == block && bh->b_size == size) {
			if (i) {
				while (i) {
					lru->bhs[i] = lru->bhs[i - 1];
					i--;
				}
				lru->bhs[0] = bh;
			}
			get_bh(bh);
			ret = bh;
			break;
		}
	}
	bh_lru_unlock();
	return ret;
}

static struct buffer_head *__find_get_block_slow(struct block_device *bdev, sector_t block)
{
	struct inode *bd_inode = bdev->bd_inode;
	struct address_space *bd_mapping = bd_inode->i_mapping;
	struct buffer_head *ret = NULL;
	pgoff_t index;
	struct buffer_head *bh;
	struct buffer_head *head;
	struct page *page;
	int all_mapped = 1;

	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
	page = find_get_page(bd_mapping, index);
	if (!page)
		goto out;

	spin_lock(&bd_mapping->private_lock);
	if (!page_has_buffers(page))
		goto out_unlock;
	head = page_buffers(page);
	bh = head;
	do {
		if (!buffer_mapped(bh))
			all_mapped = 0;
		else if (bh->b_blocknr == block) {
			ret = bh;
			get_bh(bh);
			goto out_unlock;
		}
		bh = bh->b_this_page;
	} while (bh != head);

	/* we might be here because some of the buffers on this page are
	 * not mapped.  This is due to various races between
	 * file io on the block device and getblk.  It gets dealt with
	 * elsewhere, don't buffer_error if we had some unmapped buffers
	 */
	if (all_mapped) {
		printk("__find_get_block_slow() failed. "
			"block=%llu, b_blocknr=%llu\n",
			(unsigned long long)block,
			(unsigned long long)bh->b_blocknr);
		printk("b_state=0x%08lx, b_size=%zu\n",
			bh->b_state, bh->b_size);
		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
	}
out_unlock:
	spin_unlock(&bd_mapping->private_lock);
	page_cache_release(page);
out:
	return ret;
}

struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);

	if (bh == NULL) {
		bh = __find_get_block_slow(bdev, block);
		if (bh)
			bh_lru_install(bh);
	}
	if (bh)
		touch_buffer(bh);
	return bh;
}
#endif

struct buffer_head *__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
#if 0
	struct buffer_head *bh = __find_get_block(bdev, block, size);

	might_sleep();
	if (bh == NULL)
		bh = __getblk_slow(bdev, block, size);
	return bh;
#else
	return __bread(bdev, block, size);
#endif
}

struct buffer_head *__bread(struct block_device *bdev, sector_t block, unsigned size)
{
#if 0
	struct buffer_head *bh = __getblk(bdev, block, size);

	if ((bh) && !buffer_uptodate(bh))
		bh = __bread_slow(bh);
#else
#define EXT2_START_SECTOR_ON_SDCARD		411648
	int count = size / 512;
	struct buffer_head *bh = (struct buffer_head *)kmalloc(sizeof(struct buffer_head));
	bh->b_data = (char *)kmalloc(BLOCK_SIZE);
	bh->b_size = BLOCK_SIZE;

	sd_read_sector((unsigned int *)bh->b_data, EXT2_START_SECTOR_ON_SDCARD + (block * count), count);
#endif
	return bh;
}
























struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
	// struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
	struct buffer_head *ret = kmalloc(sizeof(struct buffer_head));
	if (ret) {
		INIT_LIST_HEAD(&ret->b_assoc_buffers);
		// get_cpu_var(bh_accounting).nr++;
		// recalc_bh_state();
		// put_cpu_var(bh_accounting);
	}
	return ret;
}

void set_bh_page(struct buffer_head *bh,
		struct page *page, unsigned long offset)
{
	bh->b_page = page;
	// BUG_ON(offset >= PAGE_SIZE);
	// if (PageHighMem(page))
	// 	/*
	// 	 * This catches illegal uses and preserves the offset:
	// 	 */
	// 	bh->b_data = (char *)(0 + offset);
	// else
		bh->b_data = page_address(page) + offset;
}

struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
		int retry)
{
	struct buffer_head *bh, *head;
	long offset;

try_again:
	head = NULL;
	offset = PAGE_SIZE;
	while ((offset -= size) >= 0) {
		bh = alloc_buffer_head(GFP_NOFS);
		if (!bh)
			goto no_grow;

		bh->b_bdev = NULL;
		bh->b_this_page = head;
		bh->b_blocknr = -1;
		head = bh;

		bh->b_state = 0;
		atomic_set(&bh->b_count, 0);
		bh->b_private = NULL;
		bh->b_size = size;

		/* Link the buffer to its page */
		set_bh_page(bh, page, offset);

		init_buffer(bh, NULL, NULL);
	}
	return head;
/*
 * In case anything failed, we just free everything we got.
 */
no_grow:
	// if (head) {
	// 	do {
	// 		bh = head;
	// 		head = head->b_this_page;
	// 		free_buffer_head(bh);
	// 	} while (head);
	// }

	// /*
	//  * Return failure for non-async IO requests.  Async IO requests
	//  * are not allowed to fail, so we have to wait until buffer heads
	//  * become available.  But we don't want tasks sleeping with 
	//  * partially complete buffers, so all were released above.
	//  */
	// if (!retry)
	// 	return NULL;

	// /* We're _really_ low on memory. Now we just
	//  * wait for old buffer heads to become free due to
	//  * finishing IO.  Since this is an async request and
	//  * the reserve list is empty, we're sure there are 
	//  * async buffer heads in use.
	//  */
	// free_more_memory();
	goto try_again;
}

void create_empty_buffers(struct page *page,
			unsigned long blocksize, unsigned long b_state)
{
	struct buffer_head *bh, *head, *tail;

	head = alloc_page_buffers(page, blocksize, 1);
	bh = head;
	do {
		bh->b_state |= b_state;
		tail = bh;
		bh = bh->b_this_page;
	} while (bh);
	tail->b_this_page = head;

	// spin_lock(&page->mapping->private_lock);
	// if (PageUptodate(page) || PageDirty(page)) {
	// 	bh = head;
	// 	do {
	// 		if (PageDirty(page))
	// 			set_buffer_dirty(bh);
	// 		if (PageUptodate(page))
	// 			set_buffer_uptodate(bh);
	// 		bh = bh->b_this_page;
	// 	} while (bh != head);
	// }
	attach_page_buffers(page, head);
	// spin_unlock(&page->mapping->private_lock);
}

int submit_bh(int rw, struct buffer_head * bh)
{
#if 0
	struct bio *bio;
	int ret = 0;

	BUG_ON(!buffer_locked(bh));
	BUG_ON(!buffer_mapped(bh));
	BUG_ON(!bh->b_end_io);
	BUG_ON(buffer_delay(bh));
	BUG_ON(buffer_unwritten(bh));

	/*
	 * Mask in barrier bit for a write (could be either a WRITE or a
	 * WRITE_SYNC
	 */
	if (buffer_ordered(bh) && (rw & WRITE))
		rw |= WRITE_BARRIER;

	/*
	 * Only clear out a write error when rewriting
	 */
	if (test_set_buffer_req(bh) && (rw & WRITE))
		clear_buffer_write_io_error(bh);

	/*
	 * from here on down, it's all bio -- do the initial mapping,
	 * submit_bio -> generic_make_request may further map this bio around
	 */
	bio = bio_alloc(GFP_NOIO, 1);

	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
	bio->bi_bdev = bh->b_bdev;
	bio->bi_io_vec[0].bv_page = bh->b_page;
	bio->bi_io_vec[0].bv_len = bh->b_size;
	bio->bi_io_vec[0].bv_offset = bh_offset(bh);

	bio->bi_vcnt = 1;
	bio->bi_idx = 0;
	bio->bi_size = bh->b_size;

	bio->bi_end_io = end_bio_bh_io_sync;
	bio->bi_private = bh;

	bio_get(bio);
	submit_bio(rw, bio);

	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;

	bio_put(bio);
#else
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	int ret = 0;
	int sector = bh->b_blocknr * (bh->b_size >> 9);
	void *addr = page_address(bh->b_page);

	printf("this is %s(): %d >>> sector = %d\r\n", __func__, __LINE__, sector);
	printf("this is %s(): %d >>> addr = %x\r\n", __func__, __LINE__, addr);
	printf("this is %s(): %d >>> bh->b_size = %d\r\n", __func__, __LINE__, bh->b_size);
	printf("this is %s(): %d >>> bh_offset(bh) = %d\r\n", __func__, __LINE__, bh_offset(bh));

	sd_read_sector((unsigned int *)addr, EXT2_START_SECTOR_ON_SDCARD + (sector), 4096 / 512);
#endif

	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return ret;
}

int block_read_full_page(struct page *page, get_block_t *get_block)
{
	struct inode *inode = page->mapping->host;
	sector_t iblock, lblock;
	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
	unsigned int blocksize;
	int nr, i;
	int fully_mapped = 1;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// BUG_ON(!PageLocked(page));
	blocksize = 1 << inode->i_blkbits;
	printf("this is %s(): %d >>> inode->i_blkbits = %d\r\n", __func__, __LINE__, inode->i_blkbits);
	printf("this is %s(): %d >>> blocksize = %d\r\n", __func__, __LINE__, blocksize);

	// if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	head = page_buffers(page);
	printf("this is %s(): %d\r\n", __func__, __LINE__);

	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
	printf("this is %s(): %d >>> iblock = %d\r\n", __func__, __LINE__, iblock);
	printf("this is %s(): %d >>> lblock = %d\r\n", __func__, __LINE__, lblock);
	bh = head;
	nr = 0;
	i = 0;

	do {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (buffer_uptodate(bh))
		// 	continue;

		// if (!buffer_mapped(bh)) {
		if (1) {
			printf("this is %s(): %d\r\n", __func__, __LINE__);
			int err = 0;

			fully_mapped = 0;
			if (iblock < lblock) {
				printf("this is %s(): %d\r\n", __func__, __LINE__);
				// WARN_ON(bh->b_size != blocksize);
				err = get_block(inode, iblock, bh, 0);
				// if (err)
					// SetPageError(page);
			}
			// if (!buffer_mapped(bh)) {
			// if (1) {
			// 	printf("this is %s(): %d\r\n", __func__, __LINE__);
			// 	zero_user(page, i * blocksize, blocksize);
			// 	// if (!err)
			// 	// 	set_buffer_uptodate(bh);
			// 	continue;
			// }
			/*
			 * get_block() might have updated the buffer
			 * synchronously
			 */
			// if (buffer_uptodate(bh))
			// 	continue;
			printf("this is %s(): %d\r\n", __func__, __LINE__);
		}
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		arr[nr++] = bh;
	} while (i++, iblock++, (bh = bh->b_this_page) != head);
	printf("this is %s(): %d >>> nr = %d\r\n", __func__, __LINE__, nr);

	// if (fully_mapped)
	// 	SetPageMappedToDisk(page);

	// if (!nr) {
	// 	/*
	// 	 * All buffers are uptodate - we can set the page uptodate
	// 	 * as well. But not if get_block() returned an error.
	// 	 */
	// 	if (!PageError(page))
	// 		SetPageUptodate(page);
	// 	unlock_page(page);
	// 	return 0;
	// }

	/* Stage two: lock the buffers */
	// for (i = 0; i < nr; i++) {
	// 	bh = arr[i];
	// 	lock_buffer(bh);
	// 	mark_buffer_async_read(bh);
	// }

	/*
	 * Stage 3: start the IO.  Check for uptodateness
	 * inside the buffer lock in case another process reading
	 * the underlying blockdev brought it uptodate (the sct fix).
	 */
#if 0
	for (i = 0; i < nr; i++) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		bh = arr[i];
		// if (buffer_uptodate(bh))
		// 	end_buffer_async_read(bh, 1);
		// else
			submit_bh(READ, bh);
	}
#else
	bh = arr[0];
	submit_bh(READ, bh);
#endif
	printf("this is %s(): %d\r\n", __func__, __LINE__);
	return 0;
}

int bh_submit_read(struct buffer_head *bh)
{
	// BUG_ON(!buffer_locked(bh));

	// if (buffer_uptodate(bh)) {
	// 	unlock_buffer(bh);
	// 	return 0;
	// }

	// get_bh(bh);
	// bh->b_end_io = end_buffer_read_sync;
	submit_bh(READ, bh);
	// wait_on_buffer(bh);
	// if (buffer_uptodate(bh))
		return 0;
	return -EIO;
}
