#include <linux/miscdevice.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>

#include "fh_dma_miscdev.h"

#ifdef CONFIG_CHANNEL_ALLOC_MEM_CLASSICS
#define NR_DESCS_PER_CHANNEL CONFIG_CHANNEL_ALLOC_DESC_NUM
#else
#define NR_DESCS_PER_CHANNEL	64
#endif
#define NR_DESCS_PER_CHANNEL_CROP      1024

#define AUTO_FIND_CHANNEL	0xff
/* 4095 xfer * 32-bit * n desc */
#define MEMCPY_UNIT     (4095 * 4 * NR_DESCS_PER_CHANNEL)
#define MEMCPY_FRAM_UNIT    (1024 * 4 * NR_DESCS_PER_CHANNEL_CROP)

// #define FH_DMA_DEBUG


#ifdef FH_DMA_DEBUG
#define PRINT_DMA_DBG(fmt, args...)     \
do                              \
{                               \
	printk("FH_DMA_DEBUG: ");   \
	printk(fmt, ## args);       \
}                               \
while(0)
#else
#define PRINT_DMA_DBG(fmt, args...)  do { } while (0)
#endif


static void fh_dma_callback(void *data)
{
	PRINT_DMA_DBG("dma transfer done, end=%lu\n", jiffies);
	complete(data);
}

static int kick_off_dma(struct dma_chan *channel,
unsigned int src_offset, unsigned int dst_offset, unsigned int size)
{
	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n",
					size, src_offset, dst_offset);

	dma_tx_desc = dma_dev->device_prep_dma_memcpy(channel,
					dst_offset, src_offset, size, flag);

	PRINT_DMA_DBG("device_prep_dma_memcpy end\n");

	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;
	PRINT_DMA_DBG("tx_submit start\n");
	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	PRINT_DMA_DBG("tx_submit end\n");
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}
	PRINT_DMA_DBG("dma_async_issue_pending start\n");
	dma_async_issue_pending(channel);
	PRINT_DMA_DBG("dma_async_issue_pending end, %d\n",
					DMA_MEMCPY_TIMEOUT);

	timeout = wait_for_completion_timeout(&cmp,
					msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));

	PRINT_DMA_DBG("wait_for_completion_timeout end, timeout: %lu\n", timeout);

	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);

	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n", __func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}

/*static int kick_off_dma_crop(struct dma_chan *channel,
unsigned int src_offset, unsigned int dst_offset,
struct dma_ipa_config *dma_ipa_config, unsigned int size)
{
	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n",
		size, src_offset, dst_offset);

	//dma_tx_desc = dma_dev->device_prep_dma_memcpy_crop(
	//	channel, dst_offset, src_offset, size, dma_crop_config, flag);
	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;

	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	dma_async_issue_pending(channel);

	timeout = wait_for_completion_timeout(&cmp,
		msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n",
			__func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}*/


int s2g_para_check(unsigned int src_offset, unsigned int dst_offset,
struct dma_sca_gat *dma_s2g, unsigned int size)
{

	if ((size == 0) || (dma_s2g->width == 0) || (dma_s2g->src_step == 0)) {
		pr_err("[size %x] or [dma_s2g->width %x] or [dma_s2g->src_step %x] should not 0..\n",
		size, dma_s2g->width, dma_s2g->src_step);
		return -2;
	}
	if ((dma_s2g->width == 3) || (dma_s2g->width > 4)) {
		pr_err("[dma_s2g->width %x] only for [1.2.4]\n",
		dma_s2g->width);
		return -3;
	}
	//width only for 1 2 4 and multi step..
	if (dma_s2g->src_step % dma_s2g->width) {
		pr_err("[dma_s2g->src_step %x] should multi [dma_s2g->width %x]\n",
		dma_s2g->src_step, dma_s2g->width);
		return -4;
	}

	if (size % ((dma_s2g->src_step / dma_s2g->width) + 1)) {
		pr_err("[size %x] should multi %08x\n",
		size, (dma_s2g->src_step / dma_s2g->width) + 1);
		return -5;
	}

	return 0;
}

static int kick_off_dma_s2g(struct dma_chan *channel,
unsigned int src_offset, unsigned int dst_offset,
struct dma_sca_gat *dma_s2g, unsigned int size)
{
	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n",
		size, src_offset, dst_offset);


	if (!dma_dev->device_prep_dma_sca_to_gat) {
		pr_err("%s not support sca to gat\n", __func__);
		return -1;
	}

	ret = s2g_para_check(src_offset, dst_offset,
	dma_s2g, size);
	if (ret) {
		pr_err("%s para check failed\n", __func__);
		return -1;
	}

	dma_tx_desc = dma_dev->device_prep_dma_sca_to_gat(
	channel, dst_offset, src_offset, size, dma_s2g, flag);

	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;

	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	dma_async_issue_pending(channel);

	timeout = wait_for_completion_timeout(&cmp,
		msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n",
			__func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}



static int kick_off_dma_cpy_from_line(struct dma_chan *channel,
unsigned int src_offset, unsigned int dst_offset,
struct dma_line_gap_cpy *p_lg_cpy, unsigned int size)
{

	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n",
		size, src_offset, dst_offset);


	if (!dma_dev->device_prep_dma_cpy_from_line_format) {
		pr_err("%s not support line cpy with gap\n", __func__);
		return -1;
	}

	dma_tx_desc = dma_dev->device_prep_dma_cpy_from_line_format(
	channel, dst_offset, src_offset, size, p_lg_cpy, flag);

	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;

	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	dma_async_issue_pending(channel);

	timeout = wait_for_completion_timeout(&cmp,
		msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n",
			__func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}





static int kick_off_dma_cpy_to_line(struct dma_chan *channel,
unsigned int src_offset, unsigned int dst_offset,
struct dma_line_gap_cpy *p_lg_cpy, unsigned int size)
{

	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	PRINT_DMA_DBG("try to copy 0x%x bytes: 0x%x --> 0x%x\n",
		size, src_offset, dst_offset);


	if (!dma_dev->device_prep_dma_cpy_to_line_format) {
		pr_err("%s not support line cpy with gap\n", __func__);
		return -1;
	}

	dma_tx_desc = dma_dev->device_prep_dma_cpy_to_line_format(
	channel, dst_offset, src_offset, size, p_lg_cpy, flag);

	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;

	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	dma_async_issue_pending(channel);

	timeout = wait_for_completion_timeout(&cmp,
		msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n",
			__func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}



static int kick_off_dma_cpy_usr_def(struct dma_chan *channel,
struct dma_cpy_usr_def_desc *p_usr_desc,
size_t desc_len)
{

	int ret;
	struct completion cmp;
	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
	struct dma_device *dma_dev = channel->device;
	dma_cookie_t cookie;
	unsigned long timeout;
	unsigned long flag;

	flag = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

	if (!dma_dev->device_prep_dma_cpy_with_usr_desc) {
		pr_err("%s not support usr desc cpy \n", __func__);
		return -1;
	}

	dma_tx_desc = dma_dev->device_prep_dma_cpy_with_usr_desc(
	channel, p_usr_desc, desc_len, flag);

	if (!dma_tx_desc) {
		pr_err("ERROR: %s, device_prep_dma_memcpy fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	init_completion(&cmp);
	dma_tx_desc->callback = fh_dma_callback;
	dma_tx_desc->callback_param = &cmp;

	cookie = dma_tx_desc->tx_submit(dma_tx_desc);
	if (dma_submit_error(cookie)) {
		pr_err("ERROR: %s, tx_submit fail\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	dma_async_issue_pending(channel);

	timeout = wait_for_completion_timeout(&cmp,
		msecs_to_jiffies(DMA_MEMCPY_TIMEOUT));
	if (!timeout) {
		pr_err("ERROR: %s, dma transfer fail, timeout\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	ret = dma_async_is_tx_complete(channel, cookie, NULL, NULL);
	if (ret) {
		pr_err("ERROR: %s, dma transfer fail, incorrect status: %d\n",
			__func__, ret);
		ret = -ENODEV;
		return ret;
	}

	return 0;
}


static int fh_dma_start_transfer(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret;
	unsigned int i;

	for (i = 0; i < memcpy->size / MEMCPY_UNIT; i++) {
		ret = kick_off_dma(channel,
		memcpy->src_addr_phy + MEMCPY_UNIT * i,
		memcpy->dst_addr_phy + MEMCPY_UNIT * i,
		MEMCPY_UNIT);
		if (ret)
			return ret;
	}

	ret = kick_off_dma(channel,
	memcpy->src_addr_phy + MEMCPY_UNIT*i,
	memcpy->dst_addr_phy + MEMCPY_UNIT*i,
	memcpy->size % MEMCPY_UNIT);
	return ret;
}

static int fh_dma_start_transfer_crop(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret;

	if (memcpy->flag != DMA_MEMCPY_CROP_FLAG) {
		pr_err("ERROR: %s, dma crop flag not set\n", __func__);
		return ret;
	}

/*	if (memcpy->size < MEMCPY_FRAM_UNIT) {
		ret = kick_off_dma_crop(channel, memcpy->src_addr_phy,
			memcpy->dst_addr_phy,
			memcpy->dma_crop_config,
			MEMCPY_FRAM_UNIT);
	}

	for (i = 0; i < memcpy->size / MEMCPY_FRAM_UNIT; i++) {
		ret = kick_off_dma_crop(channel, memcpy->src_addr_phy,
			memcpy->dst_addr_phy,
			memcpy->dma_crop_config,
			MEMCPY_FRAM_UNIT);
		if (ret)
			return ret;
	}*/
	return ret;
}


static int fh_dma_start_transfer_s2g(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret;

	if (memcpy->flag != DMA_MEMCPY_S2G_FLAG) {
		pr_err("ERROR: %s, dma s2g flag not set\n", __func__);
		return ret;
	}
	ret = kick_off_dma_s2g(channel, memcpy->src_addr_phy,
	memcpy->dst_addr_phy, memcpy->dma_s2g_info, memcpy->size);

	return ret;
}



static int fh_dma_start_transfer_from_line(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret;
	unsigned int i;
	unsigned int temp_src_offset = 0;
	unsigned int temp_dst_offset = 0;
	unsigned int temp_raw_line_no = 0;

	struct dma_line_gap_cpy *p_temp_lg_cpy;

	p_temp_lg_cpy = memcpy->p_lg_cpy;
	temp_raw_line_no = p_temp_lg_cpy->line_no;


	for (i = 0; i < temp_raw_line_no / NR_DESCS_PER_CHANNEL; i++) {
		p_temp_lg_cpy->line_no = NR_DESCS_PER_CHANNEL;
		ret = kick_off_dma_cpy_from_line(channel,
		memcpy->src_addr_phy + temp_src_offset,
		memcpy->dst_addr_phy + temp_dst_offset,
		memcpy->p_lg_cpy,
		(p_temp_lg_cpy->line_st_size) * NR_DESCS_PER_CHANNEL);
		if (ret)
			return ret;
		temp_src_offset += (p_temp_lg_cpy->line_st_offset +
		p_temp_lg_cpy->line_st_size +
		p_temp_lg_cpy->line_gap) * NR_DESCS_PER_CHANNEL;
		temp_dst_offset += (p_temp_lg_cpy->line_st_size) * NR_DESCS_PER_CHANNEL;
	}

	if (temp_raw_line_no % NR_DESCS_PER_CHANNEL) {
		p_temp_lg_cpy->line_no = temp_raw_line_no % NR_DESCS_PER_CHANNEL;
		ret = kick_off_dma_cpy_from_line(channel,
		memcpy->src_addr_phy + temp_src_offset,
		memcpy->dst_addr_phy + temp_dst_offset,
		memcpy->p_lg_cpy,
		memcpy->size - temp_dst_offset);
	}

	return ret;
}




static int fh_dma_start_transfer_to_line(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret;
	unsigned int i;
	unsigned int temp_src_offset = 0;
	unsigned int temp_dst_offset = 0;
	unsigned int temp_raw_line_no = 0;

	struct dma_line_gap_cpy *p_temp_lg_cpy;

	p_temp_lg_cpy = memcpy->p_lg_cpy;
	temp_raw_line_no = p_temp_lg_cpy->line_no;


	for (i = 0; i < temp_raw_line_no / NR_DESCS_PER_CHANNEL; i++) {
		p_temp_lg_cpy->line_no = NR_DESCS_PER_CHANNEL;
		ret = kick_off_dma_cpy_to_line(channel,
		memcpy->src_addr_phy + temp_src_offset,
		memcpy->dst_addr_phy + temp_dst_offset,
		memcpy->p_lg_cpy,
		(p_temp_lg_cpy->line_st_size) * NR_DESCS_PER_CHANNEL);

		if (ret)
			return ret;

		temp_dst_offset += (p_temp_lg_cpy->line_st_offset +
		p_temp_lg_cpy->line_st_size +
		p_temp_lg_cpy->line_gap) * NR_DESCS_PER_CHANNEL;
		temp_src_offset += (p_temp_lg_cpy->line_st_size) * NR_DESCS_PER_CHANNEL;
	}

	if (temp_raw_line_no % NR_DESCS_PER_CHANNEL) {
		p_temp_lg_cpy->line_no = temp_raw_line_no % NR_DESCS_PER_CHANNEL;
		ret = kick_off_dma_cpy_to_line(channel,
		memcpy->src_addr_phy + temp_src_offset,
		memcpy->dst_addr_phy + temp_dst_offset, memcpy->p_lg_cpy,
		memcpy->size - temp_src_offset);
	}

	return ret;
}

static int fh_dma_start_transfer_usr_def(struct dma_chan *channel,
struct dma_memcpy *memcpy)
{
	int ret = 0;
	unsigned int i;
	struct dma_cpy_usr_def_desc *p_desc;
	unsigned int desc_size = 0;

	desc_size = memcpy->usr_desc_size;
	p_desc = memcpy->p_usr_desc;

	for (i = 0; i < memcpy->usr_desc_size / NR_DESCS_PER_CHANNEL; i++) {
		ret = kick_off_dma_cpy_usr_def(channel,
		p_desc, NR_DESCS_PER_CHANNEL);
		if (ret)
			return ret;
		p_desc += NR_DESCS_PER_CHANNEL;
		desc_size -= NR_DESCS_PER_CHANNEL;
	}

	if (memcpy->usr_desc_size  % NR_DESCS_PER_CHANNEL) {
		ret = kick_off_dma_cpy_usr_def(channel, p_desc, desc_size);
		if (ret)
			return ret;
	}

	return ret;
}



static bool chan_filter(struct dma_chan *chan, void *param)
{
	struct dma_memcpy *memcpy = param;

	PRINT_DMA_DBG("chan_filter, channel id: %d\n", memcpy->chan_id);
	if (memcpy->chan_id < 0)
		return false;

	if (memcpy->chan_id == chan->chan_id)
		return true;
	else
		return false;
}

static int fh_dma_memcpy(struct dma_memcpy *memcpy)
{
	//fixme: ioctl should be atomic, otherwise channel will be changed.
	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	PRINT_DMA_DBG("fh_dma_memcpy start\n");
	PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);


	PRINT_DMA_DBG("fh_dma_request_channel start\n");
	dma_cap_zero(mask);
	PRINT_DMA_DBG("dma_cap_zero end\n");
	dma_cap_set(DMA_MEMCPY, mask);
	PRINT_DMA_DBG("dma_cap_set end\n");

	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}
	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	PRINT_DMA_DBG("dma channel name: %s\n", dma_chan_name(dma_channel));

	ret = fh_dma_start_transfer(dma_channel, memcpy);

	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);

	return ret;
}

static int fh_dma_memcpy_crop(struct dma_memcpy *memcpy)
{
	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}
	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	ret = fh_dma_start_transfer_crop(dma_channel, memcpy);

	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);

	return ret;
}


static int fh_dma_memcpy_s2g(struct dma_memcpy *memcpy)
{
	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}

	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	ret = fh_dma_start_transfer_s2g(dma_channel, memcpy);
	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);

	return ret;
}


static int fh_dma_memcpy_from_line(struct dma_memcpy *memcpy)
{
	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);
	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}
	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	ret = fh_dma_start_transfer_from_line(dma_channel, memcpy);
	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);

	return ret;
}



static int fh_dma_memcpy_to_line(struct dma_memcpy *memcpy)
{
	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	PRINT_DMA_DBG("ioctl, memcpy->size: 0x%x\n", memcpy->size);

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}
	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	ret = fh_dma_start_transfer_to_line(dma_channel, memcpy);
	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);

	return ret;
}


static int fh_dma_memcpy_usr_def(struct dma_memcpy *memcpy)
{

	struct dma_chan *dma_channel;
	dma_cap_mask_t mask;
	int ret;

	dma_cap_zero(mask);
	dma_cap_set(DMA_MEMCPY, mask);
	if (memcpy->chan_id == AUTO_FIND_CHANNEL) {
		dma_channel = dma_request_channel(mask, 0, 0);
		if (!dma_channel) {
			pr_err("ERROR: %s, AUTO No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	} else {
		dma_channel = dma_request_channel(mask, chan_filter, memcpy);
		if (!dma_channel) {
			pr_err("ERROR: %s, No Channel Available, channel: %d\n",
				__func__, memcpy->chan_id);
		}
	}
	if (!dma_channel)
		return -1;
	memcpy->chan_id = dma_channel->chan_id;
	ret = fh_dma_start_transfer_usr_def(dma_channel, memcpy);
	if (ret)
		pr_err("ERROR: %s, DMA Xfer Failed\n", __func__);

	dma_release_channel(dma_channel);
	return ret;
}



void dump_usr_setting_desc(struct dma_cpy_usr_def_desc *p, int desc_size)
{
	int i;

	for (i = 0; i < desc_size; i++) {
		pr_err("[desc:%d] = src_add [%08x] : dst_add [%08x] : size [%08x] : width [%08x]\n",
		i, p[i].src, p[i].dst, p[i].size, p[i].width);
	}
}


static long fh_dma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	struct dma_memcpy *memcpy;
	struct dma_crop *dma_crop_config;
	struct dma_sca_gat *dma_s2g_info;
	struct dma_line_gap_cpy *p_lg_cpy;
	struct dma_cpy_usr_def_desc *p_usr_desc;

	if (arg == 0)
		return -EINVAL;

	if (unlikely(_IOC_TYPE(cmd) != DMA_IOCTL_MAGIC)) {
		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
			   __func__, _IOC_TYPE(cmd), -ENOTTY);
		return -ENOTTY;
	}

	if (unlikely(_IOC_NR(cmd) > DMA_IOCTL_MAXNR)) {
		pr_err("%s: ERROR: incorrect cmd num %d (error: %d)\n",
			   __func__, _IOC_NR(cmd), -ENOTTY);
		return -ENOTTY;
	}

	if (_IOC_DIR(cmd) & _IOC_READ) {
		ret = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
	} else if (_IOC_DIR(cmd) & _IOC_WRITE) {
		ret = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
	}

	if (ret) {
		pr_err("%s: ERROR: user space access is not permitted %d (error: %d)\n",
			   __func__, _IOC_NR(cmd), -EACCES);
		return -EACCES;
	}

	switch (cmd) {
	case DMA_MEMCOPY:
	{
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
		sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		ret = fh_dma_memcpy(memcpy);
		kfree(memcpy);
		break;
	}
	case DMA_MEMCOPY_CROP:
	{
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
			sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		dma_crop_config = memdup_user(memcpy->dma_crop_config,
			sizeof(struct dma_crop));
		memcpy->dma_crop_config = dma_crop_config;
		ret = fh_dma_memcpy_crop(memcpy);
		kfree(dma_crop_config);
		kfree(memcpy);
		break;
	}
	case DMA_MEMCOPY_SCA_TO_GAT:
	{
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
			sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		dma_s2g_info = memdup_user(memcpy->dma_s2g_info,
			sizeof(struct dma_sca_gat));
		memcpy->dma_s2g_info = dma_s2g_info;
		ret = fh_dma_memcpy_s2g(memcpy);
		kfree(dma_s2g_info);
		kfree(memcpy);
		break;
	}
	case DMA_MEMCOPY_FROM_LINE:
	{
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
			sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		p_lg_cpy = memdup_user(memcpy->p_lg_cpy,
			sizeof(struct dma_line_gap_cpy));
		memcpy->p_lg_cpy = p_lg_cpy;
		ret = fh_dma_memcpy_from_line(memcpy);
		kfree(p_lg_cpy);
		kfree(memcpy);
		break;
	}


	case DMA_MEMCOPY_TO_LINE:
	{
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
			sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		p_lg_cpy = memdup_user(memcpy->p_lg_cpy,
			sizeof(struct dma_line_gap_cpy));
		memcpy->p_lg_cpy = p_lg_cpy;
		ret = fh_dma_memcpy_to_line(memcpy);
		kfree(p_lg_cpy);
		kfree(memcpy);
		break;
	}

	case DMA_MEMCOPY_USR_DEF:
	{
		//pr_err("DMA_MEMCOPY_USR_DEF get in....\n");
		memcpy = kzalloc(sizeof(struct dma_memcpy), GFP_KERNEL);
		if (memcpy == NULL) {
			ret = -ENOMEM;
			break;
		}
		if (copy_from_user(memcpy, (void __user *)arg,
			sizeof(struct dma_memcpy))) {
			ret = EINVAL;
			kfree(memcpy);
			break;
		}
		p_usr_desc = memdup_user(memcpy->p_usr_desc,
		sizeof(struct dma_cpy_usr_def_desc) * memcpy->usr_desc_size);
		if (!p_usr_desc)
			BUG_ON(1);
		//dump_usr_setting_desc(p_usr_desc, memcpy->usr_desc_size);
		memcpy->p_usr_desc = p_usr_desc;
		ret = fh_dma_memcpy_usr_def(memcpy);
		kfree(p_usr_desc);
		kfree(memcpy);
		break;
	}

	}

	return ret;
}

static int fh_dma_open(struct inode *inode, struct file *file)
{
	PRINT_DMA_DBG("fh_dma_open\n");
	return 0;
}

static int fh_dma_release(struct inode *inode, struct file *filp)
{
	PRINT_DMA_DBG("fh_dma_release\n");
	return 0;
}


static const struct file_operations fh_dma_fops = {
	.owner                  = THIS_MODULE,
	.open                   = fh_dma_open,
	.release                = fh_dma_release,
	.unlocked_ioctl         = fh_dma_ioctl,
};

static struct miscdevice fh_dma_device = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = DEVICE_NAME,
	.fops = &fh_dma_fops,
};

static int __init fh_dma_init(void)
{
	int ret;

	ret = misc_register(&fh_dma_device);

	if (ret < 0) {
		pr_err("%s: ERROR: %s registration failed",
			__func__, DEVICE_NAME);
		return -ENXIO;
	}

	return ret;
}

static void __exit fh_dma_exit(void)
{
	remove_proc_entry(PROC_FILE, NULL);
	misc_deregister(&fh_dma_device);
}
module_init(fh_dma_init);
module_exit(fh_dma_exit);

MODULE_AUTHOR("QIN");
MODULE_DESCRIPTION("Misc Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform: FH");
