#include "block.h"
#include "kernel/time.h"

#ifdef TEST_DBG_WIN32
#define kmalloc	malloc
#undef kfree
#define kfree free
#define kmem_cache_alloc(a)	malloc_zero(sizeof(dev_blk_req_node_t))
unsigned int volatile jiffies = 1000;
void* malloc_zero(unsigned int sz) {
	void* ret = malloc(sz);
	if (ret && sz) memset(ret, 0, sz);
	return ret;
}
#endif

kmem_cache_t* dev_blk_req_cache = NULL;

dev_blk_reqlist_t* dev_reqlist_alloc(uint maxblks, uint type, uint block_size)
{
	dev_blk_reqlist_t* obj;
	
	if (!block_size) return NULL;

	obj = kmalloc(sizeof(dev_blk_reqlist_t));
	if (NULL == obj) return NULL;
	obj->remaining = maxblks;

	if (type == DEV_BLK_REQLST_TYPE_FIFO)
		listnode_init(obj->h.fifo);
	else if (type == DEV_BLK_REQLST_TYPE_ELEVATOR)
		obj->h.elevator = NULL;
	else
	{
		kfree(obj);
		return NULL;
	}

	obj->ts_fstreq = obj->ts_lstreq = 0;
	obj->block_size = block_size;
	obj->flags = type;
	return obj;
}

bool dev_reqlist_release(dev_blk_reqlist_t* reqlist)
{
	if (NULL == reqlist) return true;
	if (reqlist->flags & DEV_BLK_REQLST_TYPE_FIFO)
	{
		if (!listnode_isempty(reqlist->h.fifo))
			return false;
	}
	else if (reqlist->flags & DEV_BLK_REQLST_TYPE_ELEVATOR)
	{
		if (NULL != reqlist->h.elevator)
			return false;
	}
	else return false;

	kfree(reqlist);
	return true;
}

static int dev_blknode_avl_compare(avl_node_t *fst, avl_node_t *snd)
{
	dev_blk_req_node_t *first = AVLNODE_ENTRY(dev_blk_req_node_t, u.avlnode, fst);
	dev_blk_req_node_t *second = AVLNODE_ENTRY(dev_blk_req_node_t, u.avlnode, snd);
	if (first->data.blkid + first->data.blkcnt <= second->data.blkid)
		return -1;
	else if (first->data.blkid >= second->data.blkid + second->data.blkcnt)
		return 1;
	else return 0;
}

static void verify_buffer(dev_blk_reqlist_t* rlist, dev_blk_req_node_t* req)
{
	uint i, pages;
	if (req->data.use_page_list)
		return;
	if (!(req->flags & DEV_BLK_NODE_FLAG_ACCESS_READ))
		return;

	pages = (req->data.blkcnt * rlist->block_size
		+ PAGE_SZ - 1) >> PAGE_SHIFT;
	for (i = 0; i < pages; ++i)
		req->data.buf.ptr[i * PAGE_SZ] = 0;
}

static void dev_block_update_reqlist_timestamp(dev_blk_reqlist_t* rlist)
{
	unsigned int t = jiffies;
	if (!rlist->ts_fstreq) rlist->ts_fstreq = t;
	rlist->ts_lstreq = t;
}

int dev_block_submit_request(dev_blk_reqlist_t* rlist, dev_blk_req_node_t* req)
{
	if (rlist->flags & DEV_BLK_REQLST_TYPE_FIFO)
		listnode_add(rlist->h.fifo, req->u.ownerlist);

	// elevator
	else if (rlist->flags & DEV_BLK_REQLST_TYPE_ELEVATOR)
	{
		if (rlist->remaining < req->data.blkcnt && !rlist->h.elevator)
			return DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND;
		if (rlist->flags & DEV_BLK_REQLST_DRAIN)
			return DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND;

		if (rlist->flags & DEV_BLK_REQLST_RUNNING)
		{
			// get the first node
			dev_blk_req_node_t* fst_req;
			avl_node_t *first = avl_first(rlist->h.elevator);
			if (NULL == first) return DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND;

			// see if the current request ahead of the first one
			// if so, we cannot handle it. leave to next round
			fst_req = AVLNODE_ENTRY(dev_blk_req_node_t, u.avlnode, first);
			if (req->data.blkid < fst_req->data.blkid + fst_req->data.blkcnt)
				return DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND;
		}

		// submit the request
		verify_buffer(rlist, req);
		if (avl_insert(&(rlist->h.elevator), &(req->u.avlnode), dev_blknode_avl_compare))
			return DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND;
		if (rlist->remaining > req->data.blkcnt)
			rlist->remaining -= req->data.blkcnt;
		else rlist->remaining = 0;
	}
	dev_block_update_reqlist_timestamp(rlist);
	return 0;
}

int dev_block_new_request(uint flags, uint blksz,
		void* buffer, unsigned long long blkid, uint blks, dev_blk_req_node_t** req)
{
	dev_blk_req_node_t* node;

	if (!buffer || !blks || !req)
		return DEV_BLK_REQ_ERR_BAD_PARAMETER;

	if (blksz > PAGE_SZ)
		return DEV_BLK_REQ_BLOCK_TOO_LARGE;

	if (!(flags & (DEV_BLK_ACCESS_READ | DEV_BLK_ACCESS_WRITE)))
		return DEV_BLK_REQ_ERR_UNKNOWN_ACCESS_TYPE;

	if ((((uint)buffer) & (PAGE_SZ - 1)) || (blks % (PAGE_SZ / blksz)))
		return DEV_BLK_REQ_ERR_BAD_ALIGNMENT;

	// create the request node
	node = kmem_cache_alloc(dev_blk_req_cache);
	if (NULL == node) return DEV_BLK_REQ_ERR_NO_MEMORY;

	node->data.blkid = blkid;
	node->data.blkcnt = blks;
	node->data.buf.ptr = buffer;

	if (flags & DEV_BLK_BUF_PHY_PAGE_LIST)
		node->data.use_page_list = 1;

	// set access type
	if (flags & DEV_BLK_ACCESS_READ)
		node->flags |= DEV_BLK_NODE_FLAG_ACCESS_READ;
	else node->flags |= DEV_BLK_NODE_FLAG_ACCESS_WRITE;

	*req = node;
	return 0;
}

static void dev_blk_data_cache_ctor(kmem_cache_t* mcache, void* p, size_t sz)
{
	memset(p, 0, sz);
}

static void dev_blk_data_cache_dtor(kmem_cache_t* mcache, void* p, size_t sz)
{
	// todo:
}

int dev_init(void)
{
	// create cache(s)
	dev_blk_req_cache = easy_kmem_cache_create("blk_req_cache",
		sizeof(dev_blk_req_node_t),
		dev_blk_data_cache_ctor,
		dev_blk_data_cache_dtor);

	if (NULL == dev_blk_req_cache) return 1;
	return 0;
}

// test for win32
#ifdef TEST_DBG_WIN32
void block_test(void)
{
	printf("sizeof dev_blk_data_t = %u\n", sizeof(dev_blk_data_t));
}
#endif

/* EOF */
