#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/fh_dmac.h>
#include <linux/pm.h>

/*****************************************************************************
 *  Include Section
 *  add all #include here
 *****************************************************************************/
#include   <mach/pmu.h>
#include	<linux/sched.h>
#include <linux/clk.h>
#include <mach/fh_predefined.h>

#include "mol_hash.h"









#define HASH_CFG	 0
#define RX_FIFO_CFG0	 0x4
#define RX_FIFO_CFG1	 0x8
#define RX_FIFO_CFG2	 0xc
#define REQ_CFG1		 0x10
#define MES_INFO_CFG0	 0x14
#define MES_INFO_CFG1	 0x18
#define MES_INFO_CFG2	 0x1C
#define HMAC_KEY_INFO_CFG0	 0x20
#define HMAC_KEY_INFO_CFG1	 0x24
#define HMAC_KEY_INFO_CFG2	 0x28
#define DEBUG_BUS	 0x2C
#define HASH_H0	 			0x30
#define HASH(x) 			(HASH_H0 + ((x) << 2))
#define HASH_MSG_LEN_CFG0 			0x70
#define HASH_MSG_LEN_CFG1 			0x74
#define HASH_MSG_LEN_CFG2 			0x78
#define HASH_MSG_LEN_CFG3 			0x7c
#define HKEY_LEN_CFG 				0x80
#define HKEY_CFG0 					0x84
#define HKEY_CFG(x) 			   (HKEY_CFG0 + ((x) << 2))
#define HASH_FIFO					0x100
#define HASH_INT_CFG				0xc4

#define HASH_MODE_OFS					0x0
#define HASH_MODE_MASK					0x7
#define HASH_SHA1						0x0
#define HASH_SHA256						0x1
#define HASH_SHA384						0x2
#define HASH_SHA512						0x3

#define HASH_START						BIT(3)
#define HMAC_START						BIT(4)
#define DBG_BUS_SEL_OFS					5
#define DBG_BUS_SEL_MASK					0x1F70


#define MAX_HASH_TRANS_ONETIME (0x4000)

static int fh_mc_hash_probe(struct platform_device *pdev);
static int __exit fh_mc_hash_remove(struct platform_device *pdev);
struct fh_hash_obj_t* hash_obj  = NULL;
struct miscdevice* fh_mc_hash_device;
struct completion	hash_dma_complete;


void  hash_dma_cb(void* p) 
{
	 complete(&hash_dma_complete);
}


void hash_dma_write_data(struct fh_hash_obj_t* obj, u8 *src, int len)
{
	int wl = len;
	dma_cap_mask_t mask;
	struct dma_chan *dma_channel;
	unsigned char* local_buff = kzalloc(MAX_HASH_TRANS_ONETIME,GFP_KERNEL);
	struct dma_async_tx_descriptor *p_desc = 0;
	
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	if (local_buff == NULL) {
		pr_err("hash_dma_write_data no enough mem\n");
		return;
	}
	
	dma_channel = dma_request_channel(mask, NULL, NULL);
	if (dma_channel == NULL) {
		kfree(local_buff);
		pr_err("hash no free DMA CHAN\n");
		return;
	}

	while(wl > 0) {
		int curtranslen = wl >= MAX_HASH_TRANS_ONETIME ? MAX_HASH_TRANS_ONETIME:wl;
		static struct scatterlist sgl;
		struct dma_slave_config *rx_config = kzalloc(sizeof(struct dma_slave_config),GFP_KERNEL);
		struct fh_dma_extra* ext_para = kzalloc(sizeof(struct fh_dma_extra),GFP_KERNEL);;

		if (rx_config == NULL || ext_para == NULL) {
			if (rx_config) {
				kfree(rx_config);
			}
			if (ext_para) {
				kfree(ext_para);
			}
			kfree(local_buff);
			return;
		}
		
		
		memset(local_buff,0,MAX_HASH_TRANS_ONETIME);
		if (copy_from_user(local_buff, (void __user *)src, curtranslen)) {
			kfree(ext_para);
			kfree(rx_config);
			kfree(local_buff);
			return;
		}
		
		memset(&sgl, 0, sizeof(struct scatterlist));
		sgl.dma_address = dma_map_single(fh_mc_hash_device->parent, local_buff, curtranslen, DMA_TO_DEVICE);
		sgl.length = curtranslen < 0x80 ? 0x80 : curtranslen;
		if (sgl.length % 4 != 0)
			sgl.length = (sgl.length/4+1)*4;

		rx_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
		rx_config->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

		rx_config->dst_addr = obj->phys+HASH_FIFO ;
		
    	rx_config->slave_id = 36;
    	rx_config->src_maxburst = 8;
    	rx_config->dst_maxburst = 8;
		rx_config->device_fc = false;
		rx_config->direction = DMA_MEM_TO_DEV;

		dmaengine_slave_config(dma_channel, rx_config);
		
		ext_para->dinc = FH_DMA_SLAVE_FIX;
		ext_para->sinc = FH_DMA_SLAVE_INC;
		
		ext_para->data_switch = SWT_ABCD_DCBA;
		ext_para->ot_len_flag = USR_DEFINE_ONE_TIME_LEN;
		ext_para->ot_len_len = 0x80;
		p_desc = dma_channel->device->device_prep_slave_sg(dma_channel,
			&sgl, 1, DMA_MEM_TO_DEV,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
			ext_para);
			
		reinit_completion(&hash_dma_complete);
		p_desc->callback = hash_dma_cb;
		p_desc->tx_submit(p_desc);

		if(len > 0x100) {
			SET_REG(obj->regs+REQ_CFG1,0);
		}

		wait_for_completion_interruptible(&hash_dma_complete);
		kfree(ext_para);
		kfree(rx_config);

		dma_unmap_single(fh_mc_hash_device->parent,
			sgl.dma_address,
			curtranslen, DMA_TO_DEVICE);


		wl -= curtranslen;
		src+=curtranslen;
	}
	dma_release_channel(dma_channel);
	kfree(local_buff);
	return ;



}

irqreturn_t fh_mc_hash_isr(int irq, void *dev_id)
{
	struct fh_hash_obj_t* hash_obj = (struct fh_hash_obj_t*)dev_id;

	u32 v= GET_REG(hash_obj->regs+HASH_INT_CFG);
	
	SET_REG(hash_obj->regs+HASH_INT_CFG,v|0x4);

	complete_all(&hash_obj->run_complete);

	return IRQ_HANDLED;
}


static void hash_xfer_data_prep(struct fh_hash_obj_t* obj, void* buff,unsigned long buflen) 
{
	unsigned int dmaonce = 0;

	SET_REG(obj->regs+HASH_MSG_LEN_CFG0, buflen);
	if(buflen > 0x100) {
		SET_REG(obj->regs+REQ_CFG1, 2);
	}
    dmaonce = buflen>0x80 ? 0x80 : buflen;

	SET_REG(obj->regs+HASH_MSG_LEN_CFG0, buflen);
	
	SET_REG(obj->regs+MES_INFO_CFG2, dmaonce);



}

static void hash_xfer_data(struct fh_hash_obj_t* obj, void* buff,unsigned long buflen) 
{	
	hash_dma_write_data(obj, buff, buflen);
}






static int sha1(struct fh_hash_obj_t* obj, void* buff,unsigned long buflen)
{
	hash_xfer_data_prep(obj, buff, buflen);
	SET_REG(obj->regs+HASH_CFG,0x2008);
	hash_xfer_data(obj, buff, buflen);	
	return 0;
}

static int sha256(struct fh_hash_obj_t* obj, void* buff, unsigned long buflen)
{
	hash_xfer_data_prep(obj, buff, buflen);
	
	SET_REG(obj->regs+HASH_CFG,0x2009);
	
	hash_xfer_data(obj, buff, buflen);	

	return 0;
}

static int sha384(struct fh_hash_obj_t* obj, void* buff, unsigned long buflen)
{
	hash_xfer_data_prep(obj, buff, buflen);
	
	SET_REG(obj->regs+HASH_CFG,0x200a);
	
	hash_xfer_data(obj, buff, buflen);	
	return 0;
}

static int sha512(struct fh_hash_obj_t* obj, void* buff, unsigned long buflen)
{

	hash_xfer_data_prep(obj, buff, buflen);
	
	SET_REG(obj->regs+HASH_CFG,0x200b);
	
	
	hash_xfer_data(obj, buff, buflen);	

	return 0;
}



int hw_sha_mol(struct fh_hash_obj_t* obj, int mode, void* buff, unsigned long buflen, unsigned char* res )
{
	int ret = -1;
	if (buflen == 0) {
		return -1;
	}


	switch (mode) {
	case 160:
		ret = sha1(obj, buff, buflen);
		break;
	case 256:
		ret = sha256(obj, buff, buflen);
		break;
	case 384:
		ret = sha384(obj, buff, buflen);
		break;
	case 512:
		ret = sha512(obj, buff, buflen);
		break;
	default:
		ret = -1;
		break;
	}
	if (ret == 0) {
		if (copy_to_user( (void __user *)res, (void *)obj->regs+HASH_H0,mode/8)) {
			ret = -1;
		}
		SET_REG(obj->regs+HASH_CFG,0);
	}
	return ret;
}




static long fh_mc_hash_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret = -EINVAL;
	struct fh_hash_param_t param;
	
	if (arg == 0)
		return -EINVAL;

	if (unlikely(_IOC_TYPE(cmd) != FH_HASH_IOCTL_MAGIC))
	{
		pr_err("%s: ERROR: incorrect magic num %d (error: %d)\n",
			   __func__, _IOC_TYPE(cmd), -ENOTTY);
		return -ENOTTY;
	}
	switch (cmd)
	{
	case FH_HASH_CALC:
	{
		if (copy_from_user(&param, (void __user *)arg, sizeof(struct fh_hash_param_t)))
		{
			ret = EINVAL;
			break;
		}

		mutex_lock(&hash_obj->lock);
		reinit_completion(&hash_obj->run_complete);
		ret = hw_sha_mol(hash_obj, param.shalen,param.buffer, param.len, param.res);
		if (ret == 0) {
			wait_for_completion_interruptible(&hash_obj->run_complete);
		} else {
			ret = -EINVAL;
		}
		mutex_unlock(&hash_obj->lock);
		break;
	}
	default:
		break;
	}
	return ret;
}

static int fh_mc_hash_open(struct inode *inode, struct file *file)
{
	return 0;
}

static int fh_mc_hash_release(struct inode *inode, struct file *filp)
{
	
	return 0;
}


static const struct file_operations fh_mc_hash_fops =
{
	.owner				  = THIS_MODULE,
	.open				   = fh_mc_hash_open,
	.release				= fh_mc_hash_release,
	.unlocked_ioctl		 = fh_mc_hash_ioctl,
};

#ifdef CONFIG_PM
static int hash_mol_runtime_suspend(struct device *dev)
{
	return 0;
}

static int hash_mol_runtime_resume(struct device *dev)
{
	return 0;
}

UNIVERSAL_DEV_PM_OPS(hash_mol_pm_ops, hash_mol_runtime_suspend,
		   hash_mol_runtime_resume, NULL);

#define HASH_MOL_PM_OPS (&hash_mol_pm_ops)
#else
#define HASH_MOL_PM_OPS NULL
#endif /* CONFIG_PM */



#ifdef CONFIG_USE_OF
static struct of_device_id const fh_mc_hash_of_match[] = {
	{ .compatible = "fh,fh-hash" },
	{}
};
MODULE_DEVICE_TABLE(of, fh_sm_of_match);
#endif
static struct platform_driver fh_mc_hash_driver = {
	.driver	= {
		.name	= FH_MC_HASH_PLAT_DEVICE_NAME,
		.owner	= THIS_MODULE,
		.pm		= HASH_MOL_PM_OPS,
#ifdef CONFIG_USE_OF
		.of_match_table = fh_mc_hash_of_match,
#endif
	},
	.probe	= fh_mc_hash_probe,
	.remove	= __exit_p(fh_mc_hash_remove),
};

static int fh_mc_hash_probe(struct platform_device *pdev)
{
	int err;
	
#ifdef CONFIG_USE_OF
	struct device_node *np = pdev->dev.of_node;
#else
	struct resource *res;
#endif

	hash_obj = kzalloc(sizeof(struct fh_hash_obj_t),GFP_KERNEL);
	if (hash_obj == NULL) {
		err = -ENXIO;
		return err;
	}
#ifdef CONFIG_USE_OF

	hash_obj->irq_no = irq_of_parse_and_map(np, 0);
	if (hash_obj->irq_no < 0) {
		dev_warn(&pdev->dev, "hash interrupt is not available.\n");
		return hash_obj->irq_no;
	}

	hash_obj->regs = of_iomap(np, 0);
	if (hash_obj->regs == NULL) {
		err = -ENXIO;
		goto fail_no_ioremap;
	}
#else
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "sm get platform source error..\n");
		return -ENODEV;
	}

	hash_obj->irq_no = platform_get_irq(pdev, 0);
	hash_obj->irq_no = irq_create_mapping(NULL,
		hash_obj->irq_no);
	if (hash_obj->irq_no < 0) {
		dev_warn(&pdev->dev, "sm interrupt is not available.\n");
		return hash_obj->irq_no;
	}

	res = request_mem_region(res->start, resource_size(res),
	pdev->name);
	if (res == NULL) {
		dev_err(&pdev->dev, "sm region already claimed\n");
		return -EBUSY;
	}
	hash_obj->regs = ioremap(res->start, resource_size(res));
	hash_obj->phys = res->start;
	if (hash_obj->regs == NULL) {
		err = -ENXIO;
		goto fail_no_ioremap;
	}

#endif


	init_completion(&hash_obj->run_complete);
	init_completion(&hash_dma_complete);
	
	
	mutex_init(&hash_obj->lock);
	err = request_irq(hash_obj->irq_no, fh_mc_hash_isr, IRQF_SHARED,
		"fh_hash", hash_obj);
	if (err) {
		dev_err(&pdev->dev, "request_irq:%d failed, %d\n",
			hash_obj->irq_no, err);
		err = -ENXIO;
		goto fail_no_ioremap;
	}



	fh_mc_hash_device = kzalloc(sizeof(struct miscdevice),GFP_KERNEL);
	fh_mc_hash_device->minor = MISC_DYNAMIC_MINOR;
	fh_mc_hash_device->fops = &fh_mc_hash_fops;
	fh_mc_hash_device->name = "fh_hash";
	err = misc_register(fh_mc_hash_device);

	if (err < 0) {
		pr_err("%s: ERROR: %s registration failed", __func__,
				FH_MC_HASH_DEVICE_NAME);
		err = -ENXIO;
		goto misc_error;
	}

	return 0;

misc_error:
	free_irq(hash_obj->irq_no, &hash_obj);

fail_no_ioremap:
	iounmap(hash_obj->regs);

	return err;
}

static int __exit fh_mc_hash_remove(struct platform_device *pdev)
{
	misc_deregister(fh_mc_hash_device);
	kfree(fh_mc_hash_device);
	free_irq(hash_obj->irq_no, &hash_obj);
	iounmap(hash_obj->regs);
	kfree(hash_obj);
	return 0;
}






static int __init fh_mc_hash_init(void)
{
	return platform_driver_register(&fh_mc_hash_driver);

}

static void __exit fh_mc_hash_exit(void)
{

	platform_driver_unregister(&fh_mc_hash_driver);
}
module_init(fh_mc_hash_init);
module_exit(fh_mc_hash_exit);

MODULE_AUTHOR("Fullhan");
MODULE_DESCRIPTION("Misc Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform: FH MC HASH");
