#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>

#include <linux/kernel.h>   /* nkfm_debug() */
#include <linux/slab.h>   /* kmalloc() */
#include <linux/fs.h>       /* everything... */
#include <linux/errno.h>    /* error codes */
#include <linux/types.h>    /* size_t */
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <asm/page.h>
#include <linux/cdev.h>

#include <linux/device.h>
#include <linux/highmem.h>

#include   <asm/processor.h>  
#include   <asm/uaccess.h>  
#include   <linux/skbuff.h>   /*sk_buff*/

#define NKFM_KERNEL
#include <nkfm.h>

MODULE_LICENSE("Dual BSD/GPL");

static struct nkfm_cfg cfg;
static struct nkfm_skb_pool *skb_pool;
unsigned char *nk_mem;
int nkfm_major;
struct nk_statistics_t nk_statistics;
int skb_nr = 100000;
module_param(skb_nr, int, 0);

static int nkfm_mmap(struct file *filp, struct vm_area_struct *vma)
{
	size_t const size = vma->vm_end - vma->vm_start;
	unsigned long pfn;

	if (size > (PAGE_SIZE << 15)) {
		nkfm_debug("Mmap size %lu is out of range \n", size);
		return -1;
	}

	pfn = virt_to_phys((void *)nk_mem) >> PAGE_SHIFT;

	if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
		return -EAGAIN;
	}

	return 0;
}

static int nk_get_q(unsigned long args)
{
#if 0
	int i = -1;
	int ret = 0;
	int *q_index = (int *)args;

	if (q_index == NULL) {
		return -1;
	}

	/*lock*/
	//spin_lock(&nk_mem->lock);

	for (i = 0; i < NK_MAX_RX_Q_NR; i++) {
		if (nk_mem->rx_q[i].state == NK_RX_Q_STATE_OFF) {
			nk_mem->rx_q[i].pid = (current)->pid;
			nk_mem->rx_q[i].state = NK_RX_Q_STATE_ON;

			break;
		}
	}

	if (i >= NK_MAX_RX_Q_NR) {
		i = -1;
	}

	ret = copy_to_user(q_index, &i, sizeof(*q_index));
	if (ret != 0) {
		ret = -EFAULT;
	}

	/*unlock*/
	//spin_unlock(&nk_mem->lock);
#endif

	return 0;
}

static int nk_put_q(unsigned long args)
{
#if 0
	int i;

	/*lock*/
	spin_lock(&nk_mem->lock);

	for (i = 0; i < NK_MAX_RX_Q_NR; i++) {
		if (nk_mem->rx_q[i].pid == (current)->pid) {
			nk_mem->rx_q[i].pid = 0;
			nk_mem->rx_q[i].state = NK_RX_Q_STATE_OFF;
		
			break;
		}
	}

	/*unlock*/
	spin_unlock(&nk_mem->lock);
#endif

	return 0;
}

static int nk_clear(unsigned long args)
{
	memset(&nk_statistics, 0, sizeof(nk_statistics));

	return 0;
}

static int nk_show(unsigned long args)
{
	int ret = 0;

	if (args == 0) {
		return -1;
	}

	ret = copy_to_user((struct nk_statistics_t *)args, &nk_statistics, sizeof(nk_statistics));
	if (ret != 0) {
		nkfm_error("Copt to user failed, [%d]\n", ret);
		ret = -EFAULT;
	}

	return 0;
}

long nkfm_ioctl(struct file *filp, unsigned int cmd, unsigned long args)
{
	nkfm_info("Cmd is %d\n", cmd);

	switch (cmd) {
		case NK_CMD_GET_Q:
			nk_get_q(args);
			break;

		case NK_CMD_PUT_Q:
			nk_put_q(args);
			break;

		case NK_CMD_CLEAR:
			nk_clear(args);
			break;

		case NK_CMD_SHOW:
			nk_show(args);
			break;

		default:
			break;
	}

	return 0;
}

struct file_operations nkfm_fops = {
	.owner = THIS_MODULE,
	.mmap = nkfm_mmap,
	.unlocked_ioctl = nkfm_ioctl,
	.compat_ioctl = nkfm_ioctl,
};

#if 0
static void nk_pool_init(struct nk_skb_pool_t *pool)
{
	int i;

	pool->free= 0;
	pool->rcv = -1;
	pool->last = -1;

	for (i = 0; i < NK_MAX_SKB_NODE_NR; i++) {
		pool->n[i].next = i + 1;
		pool->n[i].index = i;
	}
	pool->n[NK_MAX_SKB_NODE_NR - 1].next = -1;

	spin_lock_init(&pool->lock);
	spin_lock_init(&pool->rcv_lock);
}
#endif

#if 0
static void nk_rx_q_init(struct nk_rx_q_t *rx_q)
{
	memset(rx_q->node, -1, sizeof(rx_q->node));
	spin_lock_init(&rx_q->lock);
}
#endif

static void init_pool_skbs(struct nkfm_skb_pool *pool)
{
	int i;
	int index = 0;
	struct nkfm_skb *nskb;

	memset(pool->skbs, 0, pool->len);

	for (i = 0; i < pool->skb_nr; i++) {
		nskb = pool->skbs + i;
		nskb->pindex = pool->index;
		nskb->index = index++;
	}
}

static void nkfm_cfg_init(void)
{
	memset(&cfg, 0, sizeof(cfg));
	cfg.skb_nr = skb_nr;
}

static void show_skb_pool(void)
{
	struct nkfm_skb_pool *pool;

	for (pool = skb_pool; pool; pool = pool->next) {
		nkfm_debug("pindex %u, order %u, skb_nr %u, len %u\n", 
				pool->index, pool->order, pool->skb_nr, pool->len);
	}
}

static int nkfm_mem_init(void)
{
	int i;
	struct page *page;
	struct page *pages;
	int order = 0;
	int nr = 0;
	int index = 0;
	struct nkfm_skb_pool *pool = NULL;
	struct nkfm_skb_pool *last = NULL;
	
	while (cfg.skb_nr * NKFM_SKB_SIZE > PAGE_SIZE * (1 << order)) {
		order++;
	}

	order = (order > MAX_ORDER - 1) ? MAX_ORDER - 1 : order;
	nkfm_debug("Order is %u\n", order);

	while (nr < cfg.skb_nr) {
		pages = alloc_pages(GFP_KERNEL, order);
		if (!pages) {
			if (--order) {
				continue;
			} else {
				nkfm_error("Get pages failed, total %u, alloc %u\n", 
						cfg.skb_nr, nr);
				return -1;
			}
		}
		
		page = pages;
		for (i = 0; i < (1 << order); i++) {
			SetPageReserved(page++);
		}

		pool = (struct nkfm_skb_pool *)kmalloc(GFP_KERNEL, sizeof(*pool));
		if (!pool) {
			nkfm_error("Malloc skb pool failed\n");
			__free_pages(pages, order);
			return -1;
		}

		memset(pool, 0, sizeof(*pool));

		pool->index = index++;
		pool->order = order;
		pool->len = PAGE_SIZE * (1 << pool->order);
		pool->skb_nr = pool->len / NKFM_SKB_SIZE;
		pool->skbs = (struct nkfm_skb *)page_address(pages);
		
		init_pool_skbs(pool);

		if (last) {
			last->next = pool;
		} else {
			skb_pool = pool;
		}
		last = pool;

		nr += pool->skb_nr;
	}

	show_skb_pool();

	return 0;
}


static int nkfm_init(void)
{
	nkfm_cfg_init();
	nkfm_mem_init();

#define DEVICE_NAME		"nkfm"
	nkfm_major = register_chrdev(0, DEVICE_NAME, &nkfm_fops); 
	if (nkfm_major < 0) { 
		return -1;
	}

	return 0;
}

static void nkfm_cleanup(void)
{
	unregister_chrdev(nkfm_major, DEVICE_NAME); 
}

module_init(nkfm_init);
module_exit(nkfm_cleanup);


/*============================================================================*/
#if 0
static struct nk_rx_q_t* nk_get_rx_q(struct nk_sk_buff_t *nk_skb)
{
#if 0
	struct nk_rx_q_t *rx_q = NULL;
	static int rx_q_index = 0;
	int i;

	for (i = 0; i < NK_MAX_RX_Q_NR; i++) {
		rx_q = &nk_mem->rx_q[rx_q_index];

		if (++rx_q_index >= NK_MAX_RX_Q_NR) {
			rx_q_index = 0;
		}

		if (rx_q->state == NK_RX_Q_STATE_ON) {
			nkfm_debug("get_rx_q of task [%d]\n", rx_q->pid);
			return rx_q; 
		}
	}
#endif

	return NULL;
}
#endif

#if 0
static void nk_xmit_skb(struct nk_sk_buff_t *nk_skb)
{
	nkfm_debug("Xmit skb, index %d\n", nk_skb->index);
	nk_statistics.snd++;
	nk_free_skb(nk_skb);
}
#endif

#if 0
static int nk_check_and_send_ex(struct nk_skb_pool_t *pool)
{
	int i = 0;
	int rcv = pool->rcv;
	int next = -1;
	int pend = -1;

	if (rcv == -1 || pool->n[rcv].state != NK_SKB_STATE_DONE) {
		return 0;
	}

	/*lock*/
	spin_lock(&pool->rcv_lock);

	/*Check again*/
	rcv = pool->rcv;
	if (rcv == -1 || pool->n[rcv].state != NK_SKB_STATE_DONE) {
		/*unlock*/
		spin_unlock(&pool->rcv_lock);

		return 0;
	}

	for (i = 0; i < NK_MAX_SEND_NR; i++) {
		next = pool->n[rcv].next;

		if (next == -1 || pool->n[next].state != NK_SKB_STATE_DONE) {
			i++;
			break;
		}

		rcv = next;
	}

	pend = pool->rcv;
	pool->rcv = pool->n[rcv].next;
	pool->n[rcv].next = -1;

	if (pool->rcv == -1) {
		pool->last = -1;
	}

	/*unlock*/
	spin_unlock(&pool->rcv_lock);

	nkfm_debug("Will xmit skb %d, num [%d]\n", pend, i);

	for (rcv = pend; rcv != -1; rcv = next) {
		next = pool->n[rcv].next;
		nk_xmit_skb(&pool->n[rcv]);
	}

	return 0;
}
#endif

#if 0
static void nk_check_and_send(void)
{
#if 0
	int i;
	struct nk_skb_pool_t *pool = NULL;

	for (i = 0; i < NK_SKB_POOL_NR; i++) {
		pool = &nk_mem->pool[i];
		nk_check_and_send_ex(pool);
	}
#endif
}
#endif

#if 0
static int nk_dispatch(struct nk_rx_q_t *rx_q, struct nk_sk_buff_t *nk_skb)
{
	int rcv;

	/*Shall we check send queue first ?*/
	nk_check_and_send();

	/*lock*/
	spin_lock(&rx_q->lock);

	rcv = rx_q->rcv;
	nkfm_debug("in dispatch, rcv %d\n", rcv);

	if (rx_q->node[rcv] == -1) {
		if (++(rx_q->rcv) >= NK_MAX_RX_NODE_NR) {
			rx_q->rcv = 0;
		}
	} else {
		rcv = -1;
	}

	/*unlock*/
	spin_unlock(&rx_q->lock);

	if (rcv == -1) {
		printk("Dispatch fail, index %d\n", nk_skb->index);
		nk_free_skb(nk_skb);
		return -1;
	}

	rx_q->node[rcv] = nk_skb->index;

	return 0;
}
#endif

struct nk_sk_buff_t* nk_alloc_skb(struct net_device *netdev, unsigned bufsz)
{
#if 0
	int free;
	struct nk_sk_buff_t *nk_skb = NULL;
	struct nk_skb_pool_t *pool = &nk_mem->pool[NK_SKB_POOL_KERNEL];
	static unsigned long long num = 0;

	if (bufsz > NK_SKB_DATA_LEN) {
		nkfm_debug("bufsz is too big to alloc, %u\n", bufsz);
		return NULL;
	}

	/*Lock*/
	spin_lock(&pool->lock);
	free = pool->free;
	if (free == -1) {
		/*unlock*/
		spin_unlock(&pool->lock);
		printk("%s", "No more nk_skb to alloc\n");
		return NULL;
	}
	pool->free = pool->n[free].next;

	/*unlock*/
	spin_unlock(&pool->lock);

	nk_skb = &pool->n[free];

	++num;
	nkfm_debug("Alloc %llu nk_skb %p\n", num, nk_skb);

	return nk_skb;
#endif
	return NULL;
}
EXPORT_SYMBOL(nk_alloc_skb);

void nk_free_skb(struct nk_sk_buff_t *nk_skb)
{
#if 0
	struct nk_skb_pool_t *pool = &nk_mem->pool[NK_SKB_POOL_KERNEL];

	if (nk_skb == NULL) {
		return;
	}

	nkfm_debug("Free nk_skb %p\n", nk_skb);

	/*lock*/
	spin_lock(&pool->lock);

	nk_skb->next = pool->free;
	pool->free = nk_skb->index;

	/*unlock*/
	spin_unlock(&pool->lock);
#endif
}
EXPORT_SYMBOL(nk_free_skb);

int nk_receive_skb(struct nk_sk_buff_t *nk_skb)
{
#if 0
	struct nk_skb_pool_t *pool = &nk_mem->pool[NK_SKB_POOL_KERNEL];
	struct nk_rx_q_t *rx_q = NULL;

	nkfm_debug("Recv nk_skb [%u] :\n", nk_skb->index);
	nk_statistics.rcv++;

	nk_skb->state = NK_SKB_STATE_PENDING;
	nk_skb->next = -1;

	/*lock*/
	spin_lock(&pool->rcv_lock);

	if (pool->last == -1) {
		pool->rcv = nk_skb->index;
	} else {
		pool->n[pool->last].next = nk_skb->index;
	}
	pool->last = nk_skb->index;

	/*unlock*/
	spin_unlock(&pool->rcv_lock);

	rx_q = nk_get_rx_q(nk_skb);
	if (rx_q == NULL) {
		nkfm_debug("%s", "Get rx_q failed\n");
		return -1;
	}

	nk_dispatch(rx_q, nk_skb);

#endif
	return 0;
}
EXPORT_SYMBOL(nk_receive_skb);

