/***************************************************
 * ccp903_sec.c
 *
 * Created on: Mar 20, 2017
 * Author: zjjin@ccore.com
 ***************************************************/

#include "../INCLUDE/compate.h"
#include "../INCLUDE/pci_csec.h"
#include "../INCLUDE/jr.h"
#include "../INCLUDE/sec.h"

#include "../INCLUDE/ccp903_dbg.h"

extern int sec_init(struct csec_priv_t *csec_priv);
extern void sec_dequeue(void *data);
/*0: Success; -N: Error.*/
static int probe_status = 0;

static struct pci_device_id pci_csec_pci_tbl[] __initdata = {
	CCORE_CC90X_CRYPTO_DEVICE(PCI_DEVICE_ID_CCORE_CC903T),
	CCORE_CC90X_CRYPTO_DEVICE(PCI_DEVICE_ID_CCORE_CC903TH),
	CCORE_CC90X_CRYPTO_DEVICE(PCI_DEVICE_ID_CCORE_CC903TF),
	CCORE_CC90X_CRYPTO_DEVICE(PCI_DEVICE_ID_CCORE_CC903TU_0),
	/* required last entry */
	{0,}
};
struct pci_device_id *ccore_device_id;

MODULE_DEVICE_TABLE(pci, pci_csec_pci_tbl);

#define DBI_BAR0	0x10
#define DBI_BAR1	0x14
#define DBI_BAR2	0x18
#define DBI_BAR3	0x1C
#define DBI_BAR4	0x20
#define DBI_BAR5	0x24

#define CH_EN(n)						(1<<(n-1))
#define CH_EN_WE(n)						(1<<(n+7))

#define TT_FC_MASK						0xfffffff8
#define TT_FC_MEM2MEM					0
#define DST_MULTBLK_TYPE_MASK			0xfffffff3
#define DST_MULTBLK_TYPE_CONTIGUOUS		0
#define SRC_MULTBLK_TYPE_MASK			0xfffffffc
#define SRC_MULTBLK_TYPE_CONTIGUOUS		0

#define DMAC_RST						(1<<0)
#define SHADOWREG_LLI_VALID				(1<<31)
#define SHADOWREG_LLI_LAST				(1<<30)
#define IOC_BLKTFR						(1<<26)
#define SRC_MSIZE_MASK					0xfffc3000
#define SRC_MSIZE_1						0
#define SRC_MSIZE_4						(1<<14)
#define SRC_MSIZE_8						(2<<14)
#define SRC_MSIZE_16					(3<<14)
#define SRC_MSIZE_32					(4<<14)
#define SRC_MSIZE_64					(5<<14)
#define SRC_MSIZE_128					(6<<14)
#define SRC_MSIZE_256					(7<<14)
#define SRC_MSIZE_512					(8<<14)
#define SRC_MSIZE_1024					(9<<14)
#define DST_MSIZE_MASK					0xffc30000
#define DST_MSIZE_1						0
#define DST_MSIZE_4						(1<<18)
#define DST_MSIZE_8						(2<<18)
#define DST_MSIZE_16					(3<<18)
#define DST_MSIZE_32					(4<<18)
#define DST_MSIZE_64					(5<<18)
#define DST_MSIZE_128					(6<<18)
#define DST_MSIZE_256					(7<<18)
#define DST_MSIZE_512					(8<<18)
#define DST_MSIZE_1024					(9<<18)
#define SRC_TR_WIDTH_MASK				0xfffff8ff
#define SRC_TR_WIDTH_8					0
#define SRC_TR_WIDTH_16					(1<<8)
#define SRC_TR_WIDTH_32					(2<<8)
#define SRC_TR_WIDTH_64					(3<<8)
#define SRC_TR_WIDTH_128				(4<<8)
#define SRC_TR_WIDTH_256				(5<<8)
#define SRC_TR_WIDTH_512				(6<<8)
#define DST_TR_WIDTH_MASK				0xffffc7ff
#define DST_TR_WIDTH_8					0
#define DST_TR_WIDTH_16					(1<<11)
#define DST_TR_WIDTH_32					(2<<11)
#define DST_TR_WIDTH_64					(3<<11)
#define DST_TR_WIDTH_128				(4<<11)
#define DST_TR_WIDTH_256				(5<<11)
#define DST_TR_WIDTH_512				(6<<11)
#define DINC							(1<<6)
#define SINC							(1<<4)
#define DMS_2							(1<<2)
#define SMS_2							(1<<0)

struct pci_device_id *get_ccore_devid_table(void)
{
	ccore_device_id = pci_csec_pci_tbl;
	//return ccore_device_id;
	return pci_csec_pci_tbl;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
static struct workqueue_struct *g_dq_work_queue[RING_NUM] = {NULL};
#endif

int32_t get_jobring_index(struct work_struct *work)
{
    int32_t jr_idx = 0;
	int32_t ring_num;
    struct csec_priv_t *csec_priv;
    struct jobring *jr =container_of(work, struct jobring, dequeue_task);
    csec_priv = jr->csec_priv;
	ring_num = csec_priv->hw.ring_num;
    for(jr_idx=0; jr_idx<ring_num; jr_idx++) {
            if(jr == &(csec_priv->jr_g[jr_idx]))
                return jr_idx;
    }
        /*
         * job ring index should be smaller than Ring_Num;
         */
    //if(jr_idx >= ring_num)
	return -1;
}

void schedule_work_unbound(struct work_struct *w)
{
        int32_t jr_idx;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
        /*Latency work queue schedule scheme*/
        //schedule_work(w);

        jr_idx = get_jobring_index(w);
        if(jr_idx < 0)
                return;
        if(!g_dq_work_queue[jr_idx])
                return;
        queue_work(g_dq_work_queue[jr_idx],w);
#else
        /* system_unbound_wq: Do not bind schedule worker to specific CPU core.*/
        queue_work(system_unbound_wq,w);
#endif
}

//axidma
//todo 重点分析代码

void axidma_intsignal_flag_set(struct csec_priv_t *csec_priv, unsigned char n, unsigned int flag)
{
	void __iomem *regaddr = NULL;

	regaddr = (void __iomem *)(csec_priv->ba[BAR_DMA].base_virt);
	// 打印 regaddr 的地址
    printk(KERN_INFO ">>axidma_intsignal_flag_set>>>Address of regaddr: %p\n", (void *)regaddr);

	if(flag){
		sec_out32(regaddr+0xD000+n*0x100+0x90, cpu_2_le32(1<<1));
	}else{
		sec_out32(regaddr+0xD000+n*0x100+0x90, cpu_2_le32(0));
	}
}

void axidma_clear_int_flag(struct csec_priv_t *csec_priv, unsigned char n)
{
	void __iomem *regaddr = NULL;
	volatile unsigned int value;
	unsigned long timeout;

	timeout = jiffies + HZ*10;

	regaddr = (void __iomem *)(csec_priv->ba[BAR_DMA].base_virt);

	do{
		if(time_after(jiffies, timeout)){
			printk(KERN_ERR "read axidma intflag timeout\n");
			printk(KERN_ERR "0x198:%x\n", value);
			break;
		}

		value = sec_in32(regaddr+0xD000+n*0x100+0x88);
	}while((value & (1<<1)) == 0);
	//printk(KERN_ERR "0x198:%x\n", value);
	sec_out32(regaddr+0xD000+n*0x100+0x98, value);

}

void axidma_transfer_one_block(struct csec_priv_t *csec_priv, unsigned char n, unsigned long long sar, unsigned long long dar, unsigned int ts, unsigned int ctrl)
{
	unsigned int cfg_l, cfg_h;
	void __iomem *regaddr = NULL;

	regaddr = (void __iomem *)(csec_priv->ba[BAR_DMA].base_virt);

	sec_out32(regaddr+0xD000+n*0x100+0x00, cpu_2_le32(sar));
	sec_out32(regaddr+0xD000+n*0x100+0x04, cpu_2_le32(sar>>32));
	sec_out32(regaddr+0xD000+n*0x100+0x08, cpu_2_le32(dar));
	sec_out32(regaddr+0xD000+n*0x100+0x0C, cpu_2_le32(dar>>32));

	sec_out32(regaddr+0xD000+n*0x100+0x10, cpu_2_le32(ts));

	sec_out32(regaddr+0xD000+n*0x100+0x18, cpu_2_le32(ctrl));
	sec_out32(regaddr+0xD000+n*0x100+0x1C, cpu_2_le32(0));

	cfg_l = sec_in32(regaddr+0xD000+n*0x100+0x20);
	cfg_h = sec_in32(regaddr+0xD000+n*0x100+0x24);
	sec_out32(regaddr+0xD000+n*0x100+0x24, (cfg_h & TT_FC_MASK) | TT_FC_MEM2MEM);
	sec_out32(regaddr+0xD000+n*0x100+0x20, (cfg_l & DST_MULTBLK_TYPE_MASK & SRC_MULTBLK_TYPE_MASK) | DST_MULTBLK_TYPE_CONTIGUOUS | SRC_MULTBLK_TYPE_CONTIGUOUS );
	sec_out32(regaddr+0xD000+0x18, CH_EN(n) | CH_EN_WE(n));
#if 0
	{
		volatile unsigned int val;

		val = sec_in32(regaddr+0xD000+n*0x100+0x00);
		printk(KERN_ERR "0x100:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x04);
		printk(KERN_ERR "0x104:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x08);
		printk(KERN_ERR "0x108:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x0C);
		printk(KERN_ERR "0x10C:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x10);
		printk(KERN_ERR "0x110:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x18);
		printk(KERN_ERR "0x118:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x1C);
		printk(KERN_ERR "0x11C:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x20);
		printk(KERN_ERR "0x120:%x\n", val);
		val = sec_in32(regaddr+0xD000+n*0x100+0x24);
		printk(KERN_ERR "0x124:%x\n", val);
		val = sec_in32(regaddr+0xD000+0x18);
		printk(KERN_ERR "0x18:%x\n", val);
	}
#endif
}

//add for custom api
void ep_dma_wr_init(struct pci_dev *pci_dev,  unsigned int len, unsigned int ctrl)
{
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_priv = pci_get_drvdata(pci_dev);
	hw = &csec_priv->hw;

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);

	sec_out32(regaddr+0x4000+0x97c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0x97c, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+0x9c4, cpu_2_le32(0x00010001));
	sec_out32(regaddr+0x4000+0xa6c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa70, cpu_2_le32(ctrl));
	sec_out32(regaddr+0x4000+0xa74, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa78, cpu_2_le32(len));
	sec_out32(regaddr+0x4000+0xa7c, cpu_2_le32(RES_BUF_ADDR));
	sec_out32(regaddr+0x4000+0xa80, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0xa84, cpu_2_le32((unsigned int)(csec_priv->cmd_phys_addr + MAX_CMD_LEN)));
	sec_out32(regaddr+0x4000+0xa88, cpu_2_le32((unsigned int)((csec_priv->cmd_phys_addr + MAX_CMD_LEN) >>32)));
	sec_out32(regaddr+0x4000+0x980, cpu_2_le32(0x00000000));
#else

	pci_write_config_dword(pci_dev, 0x97c, 0x00000000);
	pci_write_config_dword(pci_dev, 0x97c, 0x00000001);
	pci_write_config_dword(pci_dev, 0x9c4, 0x00010001);
	//write32_cfg(hDev, 0x9c4, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa6c, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa70, ctrl);
	pci_write_config_dword(pci_dev, 0xa78, len);
	pci_write_config_dword(pci_dev, 0xa7c, RES_BUF_ADDR);
	pci_write_config_dword(pci_dev, 0xa80, 0x80000000);
	pci_write_config_dword(pci_dev, 0xa84, (unsigned int)(csec_priv->cmd_phys_addr + MAX_CMD_LEN));
	pci_write_config_dword(pci_dev, 0xa88, (unsigned int)((csec_priv->cmd_phys_addr + MAX_CMD_LEN) >>32));
	pci_write_config_dword(pci_dev, 0x980, 0x00000000);
#endif
}

void ep_dma_rd_init(struct pci_dev *pci_dev, unsigned int len, unsigned int ctrl)
{
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_priv = pci_get_drvdata(pci_dev);
	hw = &csec_priv->hw;

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);

	sec_out32(regaddr+0x4000+0x99c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0x99c, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+0xa18, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa6c, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0xa70, cpu_2_le32(ctrl));
	sec_out32(regaddr+0x4000+0xa74, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa78, cpu_2_le32(len));
	sec_out32(regaddr+0x4000+0xa7c, cpu_2_le32((unsigned int)csec_priv->cmd_phys_addr));
	sec_out32(regaddr+0x4000+0xa80, cpu_2_le32((unsigned int)(csec_priv->cmd_phys_addr >>32)));
	sec_out32(regaddr+0x4000+0xa84, cpu_2_le32(CMD_BUF_ADDR));
	sec_out32(regaddr+0x4000+0xa88, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0x9a0, cpu_2_le32(0x00000000));
#else

	pci_write_config_dword(pci_dev, 0x99c, 0x00000000);
	pci_write_config_dword(pci_dev, 0x99c, 0x00000001);
	//pci_write_config_dword(pci_dev, 0xa18, 0x00010001);
	pci_write_config_dword(pci_dev, 0xa18, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa6c, 0x80000000);
	pci_write_config_dword(pci_dev, 0xa70, ctrl);
	pci_write_config_dword(pci_dev, 0xa78, len);
	pci_write_config_dword(pci_dev, 0xa7c, (unsigned int)csec_priv->cmd_phys_addr);
	pci_write_config_dword(pci_dev, 0xa80, (unsigned int)(csec_priv->cmd_phys_addr >>32));
	pci_write_config_dword(pci_dev, 0xa84, CMD_BUF_ADDR);
	pci_write_config_dword(pci_dev, 0xa88, 0x80000000);
	pci_write_config_dword(pci_dev, 0x9a0, 0x00000000);
#endif
}

void pci_epdma_config(struct pci_dev *pci_dev)
{
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_priv = pci_get_drvdata(pci_dev);
	hw = &csec_priv->hw;

	init_completion(&csec_priv->trans_cmpl);
	csec_priv->pci_dev = pci_dev;
	csec_priv->cmd_buf_size = 0x100000;
	csec_priv->cmd_buf = dma_alloc_coherent(csec_priv->dev, csec_priv->cmd_buf_size, &csec_priv->cmd_phys_addr, GFP_KERNEL|GFP_DMA);
	if(!csec_priv->cmd_buf){
		csec_debug("pci_epdma_config:alloc cmd buf fail\n");
	}
	writel(0, csec_priv->cmd_buf + CMD_FLAG_ADDR_OFFSET);

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);

	sec_out32(regaddr+0x4000+IATU_INDEX, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+IATU_LBA, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+IATU_UBA, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+IATU_LAR, cpu_2_le32(0x800fffff));
	sec_out32(regaddr+0x4000+IATU_LTAR, cpu_2_le32(csec_priv->cmd_phys_addr));
	sec_out32(regaddr+0x4000+IATU_UTAR, cpu_2_le32((csec_priv->cmd_phys_addr)>>32));
	sec_out32(regaddr+0x4000+IATU_CTRL1, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+IATU_CTRL3, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+IATU_CTRL2, cpu_2_le32(0x80000000));
#else

	pci_write_config_dword(pci_dev, IATU_INDEX, 0x00000001);	//card inbound & index 2
	pci_write_config_dword(pci_dev, IATU_LBA, (unsigned)(0x80000000));
	pci_write_config_dword(pci_dev, IATU_UBA, (unsigned)(0x00000001));
	pci_write_config_dword(pci_dev, IATU_LAR, (unsigned)(0x800fffff));
	pci_write_config_dword(pci_dev, IATU_LTAR, (unsigned)(csec_priv->cmd_phys_addr));
	pci_write_config_dword(pci_dev, IATU_UTAR, (unsigned)((csec_priv->cmd_phys_addr)>>32));
	pci_write_config_dword(pci_dev, IATU_CTRL1,0x00000000);
	pci_write_config_dword(pci_dev, IATU_CTRL3,0x00000000);
	pci_write_config_dword(pci_dev, IATU_CTRL2,0x80000000);			//enable
#endif
}

/*
 * host_from_ep_dma_read: Host read data from crypto card through DMA.
 * pci_dev:		pci devide pointer.
 * len:			length of data will be read from crypto card RAM.
 * ctrl:		Enable local DMA interrupt
 * ep_send_buf_addr:	The memory address of crypto card which used for
 *			holding the data ready to send.
 * offset:		The offset between 'cmd_phys_addr' and the host memory
 *			address mapping to ep_send_buf_addr.
 */
void host_from_ep_dma_read(struct pci_dev *pci_dev,  unsigned int len, unsigned int ctrl,
				unsigned int ep_send_buf_addr, unsigned int offset)
{
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_priv = pci_get_drvdata(pci_dev);
	hw = &csec_priv->hw;

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);

	sec_out32(regaddr+0x4000+0x97c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0x97c, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+0x9c4, cpu_2_le32(0x00010001));
	sec_out32(regaddr+0x4000+0xa6c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa70, cpu_2_le32(ctrl));
	sec_out32(regaddr+0x4000+0xa78, cpu_2_le32(len));
	sec_out32(regaddr+0x4000+0xa7c, cpu_2_le32(ep_send_buf_addr));
	sec_out32(regaddr+0x4000+0xa80, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0xa84, cpu_2_le32((unsigned int)(csec_priv->cmd_phys_addr + offset)));
	sec_out32(regaddr+0x4000+0xa88, cpu_2_le32((unsigned int)((csec_priv->cmd_phys_addr + offset) >>32)));
	sec_out32(regaddr+0x4000+0x980, cpu_2_le32(0x00000000));
#else

	pci_write_config_dword(pci_dev, 0x97c, 0x00000000);
	pci_write_config_dword(pci_dev, 0x97c, 0x00000001);
	pci_write_config_dword(pci_dev, 0x9c4, 0x00010001);
	//write32_cfg(hDev, 0x9c4, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa6c, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa70, ctrl);
	pci_write_config_dword(pci_dev, 0xa78, len);
	pci_write_config_dword(pci_dev, 0xa7c, ep_send_buf_addr);
	pci_write_config_dword(pci_dev, 0xa80, 0x80000000);
	pci_write_config_dword(pci_dev, 0xa84, (unsigned int)(csec_priv->cmd_phys_addr + offset));
	pci_write_config_dword(pci_dev, 0xa88, (unsigned int)((csec_priv->cmd_phys_addr + offset) >>32));
	pci_write_config_dword(pci_dev, 0x980, 0x00000000);
#endif
}

/* host_to_ep_dma_write: Host read data from crypto card through DMA.
 * pci_dev:		pci devide pointer.
 * len:			length of data will be writen to crypto card RAM.
 * ctrl:		Enable local DMA interrupt
 * ep_recv_buf_addr:	The receive buffer memory address of crypto card.
 * offset:		The offset between 'cmd_phys_addr' and the host memory
 *			address mapping to ep_recv_buf_addr.
 */
void host_to_ep_dma_write(struct pci_dev *pci_dev, unsigned int len, unsigned int ctrl,
				unsigned int ep_recv_buf_addr, unsigned int offset)
{
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_priv = pci_get_drvdata(pci_dev);
	hw = &csec_priv->hw;

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);

	sec_out32(regaddr+0x4000+0x99c, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0x99c, cpu_2_le32(0x00000001));
	sec_out32(regaddr+0x4000+0xa18, cpu_2_le32(0x00000000));
	sec_out32(regaddr+0x4000+0xa6c, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0xa70, cpu_2_le32(ctrl));
	sec_out32(regaddr+0x4000+0xa78, cpu_2_le32(len));
	sec_out32(regaddr+0x4000+0xa7c, cpu_2_le32((unsigned int)csec_priv->cmd_phys_addr));
	sec_out32(regaddr+0x4000+0xa80, cpu_2_le32((unsigned int)(csec_priv->cmd_phys_addr >>32)));
	sec_out32(regaddr+0x4000+0xa84, cpu_2_le32(ep_recv_buf_addr)); //CMD_BUF_ADDR
	sec_out32(regaddr+0x4000+0xa88, cpu_2_le32(0x80000000));
	sec_out32(regaddr+0x4000+0x9a0, cpu_2_le32(0x00000000));
#else

	pci_write_config_dword(pci_dev, 0x99c, 0x00000000);
	pci_write_config_dword(pci_dev, 0x99c, 0x00000001);
	//pci_write_config_dword(pci_dev, 0xa18, 0x00010001);
	pci_write_config_dword(pci_dev, 0xa18, 0x00000000);
	pci_write_config_dword(pci_dev, 0xa6c, 0x80000000);
	pci_write_config_dword(pci_dev, 0xa70, ctrl);
	pci_write_config_dword(pci_dev, 0xa78, len);
	pci_write_config_dword(pci_dev, 0xa7c, (unsigned int)(csec_priv->cmd_phys_addr + offset));
	pci_write_config_dword(pci_dev, 0xa80, (unsigned int)((csec_priv->cmd_phys_addr + offset) >>32));
	pci_write_config_dword(pci_dev, 0xa84, ep_recv_buf_addr);
	pci_write_config_dword(pci_dev, 0xa88, 0x80000000);
	pci_write_config_dword(pci_dev, 0x9a0, 0x00000000);
#endif
}

/* read info from bar
 *
 *
 */
int ccore_pci_read_from_bar(struct csec_priv_t *csec_priv, unsigned int bar_index, unsigned int offset, unsigned int size, unsigned int *pbuf)
{
	volatile unsigned int i;
	volatile unsigned int value;
	unsigned int *p;
	void __iomem *regaddr = NULL;
	
	if(csec_priv == NULL)
		return -ENODEV;

	if(bar_index >= 6)
		return -EINVAL;

	regaddr = (void __iomem *)(csec_priv->ba[bar_index].base_virt);
	if(regaddr == NULL){
		printk(KERN_ERR "bar[%d] unremap\n", bar_index);
		return -EINVAL;
	}

	p = pbuf;
	
	for(i=0; i<size; i+=4)
	{
		value = sec_in32(regaddr+offset+i);
		
		*p++ = value;		
	}

	return 0;
}

int ccore_pci_csec_init(struct csec_priv_t *csec_priv)
{
	if(csec_priv == NULL)
		return -ENODEV;

	return 0;
}

int ccore_pci_csec_release(struct csec_priv_t *csec_priv)
{
	int ret = 0;

	csec_debug("ccore_pci_csec_release\n");
	if(csec_priv == NULL)
	{
		return -ENODEV;
	}

	if(csec_priv->cmd_buf){
		dma_free_coherent(&csec_priv->pci_dev->dev, csec_priv->cmd_buf_size, csec_priv->cmd_buf, csec_priv->cmd_phys_addr);
		csec_priv->cmd_buf = NULL;
	}

	return ret;
}

void ccore_pci_transmint_init(struct csec_priv_t *csec_priv)
{

	writel(0, csec_priv->cmd_buf + CMD_FLAG_ADDR_OFFSET);

}

int ccore_pci_transmit_wait_timeout(struct csec_priv_t *csec_priv, int seconds)
{
	volatile u32 stat;
	unsigned long timeout;
	unsigned long timeout1;

	timeout = jiffies + HZ*seconds;
	timeout1 = jiffies + HZ*5;

	do{
		stat = readl(csec_priv->cmd_buf + CMD_FLAG_ADDR_OFFSET);
		if(stat == CMD_FLAG_VALID){
			writel(0, csec_priv->cmd_buf + CMD_FLAG_ADDR_OFFSET);
			return 0;
		}

		if(time_after(jiffies, timeout)){
			return -ETIMEDOUT;
		}
		else if(time_after(jiffies, timeout1)){
			msleep(1);
		}

		cpu_relax();
	}while(1);

	return -EINVAL;
}

int ccore_pci_transmit_epdma(struct csec_priv_t *csec_priv, unsigned char* pbInData, int InLen, unsigned char* pbOutData, int *pOutLen)
{
	struct ccp90x_hw *hw;
	int ret = 0;
	volatile unsigned int value;
	volatile unsigned int timeout;
#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	csec_error(KERN_INFO "====ccore_pci_transmit_epdma ===!\n");
	if(csec_priv == NULL)
	{
		return -ENODEV;
	}

	if((InLen <= 0) || (InLen > MAX_CMD_LEN) || (*pOutLen <= 0) || (*pOutLen > MAX_CMD_LEN))
	{
		printk(KERN_ERR "ccore_pci_transmit len ivalid\n");
		return -EINVAL;
	}

	hw = &csec_priv->hw;
#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);
#endif
	ccore_pci_transmint_init(csec_priv);

	ep_dma_rd_init(csec_priv->pci_dev, InLen, 8);

	ret = ccore_pci_transmit_wait_timeout(csec_priv, 60);
	if(ret)
	{
		printk(KERN_ERR "ccore_pci_transmit_wait_timeout\n");
		return ret;
	}

//	printk(KERN_INFO "%s()-recv flag\n", __func__);
	ep_dma_wr_init(csec_priv->pci_dev, *pOutLen, 8);

	timeout = 100000;
	ret = -ETIMEDOUT;

	while(timeout){
#ifdef BAR_CONFIG_IN_COS
		value = sec_in32(regaddr+0x4000+0x9bc);
#else
		pci_read_config_dword(csec_priv->pci_dev, 0x9bc, (u32 *)&value);
#endif
		if(value != cpu_2_le32(0x1)){
			timeout--;
			cpu_relax();
			continue;
		}else{
#ifdef BAR_CONFIG_IN_COS
			sec_out32(regaddr+0x4000+0x9c8, cpu_2_le32(0x00000001));
#else
			pci_write_config_dword(csec_priv->pci_dev, 0x9c8, cpu_2_le32(0x1));
#endif
			ret = 0;
			break;
		}

	}

	return ret;
}

int ccore_pci_transmit_axidma(struct csec_priv_t *csec_priv, unsigned char* pbInData, int InLen, unsigned char* pbOutData, int *pOutLen)
{
	int ret = 0;
	unsigned int reg;
	unsigned int chan;
	struct ccp90x_hw *hw;

	printk(KERN_ERR "ccore_pci_transmit_axidma start===================\n");
	if(csec_priv == NULL)
	{
		return -ENODEV;
	}

	if((InLen <= 0) || (InLen > MAX_CMD_LEN) || (*pOutLen <= 0) || (*pOutLen > MAX_CMD_LEN))
	{
		printk(KERN_ERR "ccore_pci_transmit len ivalid\n");
		return -EINVAL;
	}
	
#ifndef PCI_DEVICE_AXI_DMA
	hw = &csec_priv->hw;
	if(hw->device_id != PCI_DEVICE_ID_CCORE_CC903TU_0)
	{
		printk(KERN_ERR "pci_epdma_config: not device 9080\n");
		return -EINVAL;
	}
#endif

	chan = 1;
	//修改中断标识
	//m_pci_axi_chan_isr_flag
	//触发中断
	axidma_intsignal_flag_set(csec_priv, chan, 1);
//	axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000003000000000ULL+CMD_BUF_ADDR, InLen-1, DMS_2 | DST_TR_WIDTH_8 | SRC_TR_WIDTH_8);
//	axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000003000000000ULL+CMD_BUF_ADDR, InLen/2-1, DMS_2 | DST_TR_WIDTH_16 | SRC_TR_WIDTH_16);
//  axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000003000000000ULL+CMD_BUF_ADDR, InLen/4-1, DMS_2 | DST_TR_WIDTH_32| SRC_TR_WIDTH_32);
//	axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000003000000000ULL+CMD_BUF_ADDR, InLen/8-1, DMS_2 | DST_TR_WIDTH_64| SRC_TR_WIDTH_64);
	axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000000000000000ULL+CMD_BUF_ADDR, InLen/16-1, DMS_2 | DST_TR_WIDTH_128| SRC_TR_WIDTH_128);
//	axidma_transfer_one_block(csec_priv, chan, csec_priv->cmd_phys_addr, 0x8000003000000000ULL+CMD_BUF_ADDR, InLen/64-1, DMS_2 | DST_TR_WIDTH_512| SRC_TR_WIDTH_512);
    // printk("S\n");
	ret = ccore_pci_transmit_wait_timeout(csec_priv, 60);
	if(ret)
	{
		printk(KERN_ERR "ccore_pci_transmit_wait_timeout\n");
		return ret;
	}
	chan = 2;
    // printk("R\n");
	//挂起中断
	axidma_intsignal_flag_set(csec_priv, chan, 0);
//	axidma_transfer_one_block(csec_priv, chan, 0x8000003000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen-1, DMS_2 | DST_TR_WIDTH_8 | SRC_TR_WIDTH_8);
//	axidma_transfer_one_block(csec_priv, chan, 0x8000003000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen/2-1, DMS_2 | DST_TR_WIDTH_16| SRC_TR_WIDTH_16);
//	axidma_transfer_one_block(csec_priv, chan, 0x8000003000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen/4-1, DMS_2 | DST_TR_WIDTH_32| SRC_TR_WIDTH_32);
//	axidma_transfer_one_block(csec_priv, chan, 0x8000003000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen/8-1, DMS_2 | DST_TR_WIDTH_64| SRC_TR_WIDTH_64);
	axidma_transfer_one_block(csec_priv, chan, 0x8000000000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen/16-1, DMS_2 | DST_TR_WIDTH_128| SRC_TR_WIDTH_128);
//	axidma_transfer_one_block(csec_priv, chan, 0x8000003000000000ULL+RES_BUF_ADDR, csec_priv->cmd_phys_addr+MAX_CMD_LEN, InLen/64-1, DMS_2 | DST_TR_WIDTH_512| SRC_TR_WIDTH_512);

	axidma_clear_int_flag(csec_priv, chan);

	return ret;
}

int ccore_pci_transmit(struct csec_priv_t *csec_priv, unsigned char* pbInData, int InLen, unsigned char* pbOutData, int *pOutLen)
{
//#ifdef PCI_DEVICE_CCP903TU
#ifdef PCI_DEVICE_AXI_DMA
/*CCP903T-U using AXI DMA.*/

		printk(KERN_ERR "====ccore_pci_transmit---ccore_pci_transmit_axidma ===\n");

	printk(KERN_ERR "ccore_pci_transmit len ivalid\n");
	ccore_pci_transmit_axidma(csec_priv, pbInData, InLen, pbOutData, pOutLen);
#else
/*CCP903T,CCP903T-H,CCP903T-F using EP DMA.*/
	//csec_error(KERN_INFO "====ccore_pci_transmit---ccore_pci_transmit_epdma ===!\n");
	printk(KERN_ERR "====ccore_pci_transmit---ccore_pci_transmit_epdma ===\n");
	ccore_pci_transmit_epdma(csec_priv, pbInData, InLen, pbOutData, pOutLen);
#endif
return 0;
}

int ccore_pci_dma_read(struct csec_priv_t *csec_priv, int len)
{
	struct ccp90x_hw *hw;
	int ret = 0;
	volatile unsigned int value;
	volatile unsigned int timeout;
#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	hw = &csec_priv->hw;
	if(csec_priv == NULL)
	{
		return -ENODEV;
	}

#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);
#endif
	ccore_pci_transmint_init(csec_priv);

	host_from_ep_dma_read(csec_priv->pci_dev, len, 8, CMD_BUF_ADDR, 0);
	timeout = 100000;
	ret = -ETIMEDOUT;
	while(timeout){
#ifdef BAR_CONFIG_IN_COS
		value = sec_in32(regaddr+0x4000+0x9bc);
#else
		pci_read_config_dword(csec_priv->pci_dev, 0x9bc, (u32 *)&value);
#endif
		if(value != cpu_2_le32(0x1)){
			timeout--;
			cpu_relax();
			continue;
		}else{
#ifdef BAR_CONFIG_IN_COS
			sec_out32(regaddr+0x4000+0x9c8, cpu_2_le32(0x00000001));
#else
			pci_write_config_dword(csec_priv->pci_dev, 0x9c8, cpu_2_le32(0x1));
#endif
			ret = 0;
			break;
		}

	}

	return ret;
}
int ccore_pci_dma_write(struct csec_priv_t *csec_priv, int len)
{
	int ret = 0;
	struct ccp90x_hw *hw;

#ifdef BAR_CONFIG_IN_COS
	void __iomem *regaddr = NULL;
#endif

	if(csec_priv == NULL)
	{
		return -ENODEV;
	}

	hw = &csec_priv->hw;
#ifdef BAR_CONFIG_IN_COS
	regaddr = (void __iomem *)(csec_priv->ba[hw->commu_bar].base_virt);
#endif
	ccore_pci_transmint_init(csec_priv);

	host_to_ep_dma_write(csec_priv->pci_dev, len, 8, CMD_BUF_ADDR, 0);

	ret = ccore_pci_transmit_wait_timeout(csec_priv, 60);
	if(ret)
	{
		printk(KERN_ERR "ccore_pci_transmit_wait_timeout\n");
		return ret;
	}

	return ret;
}


void pci_csec_config(struct pci_dev *pci_dev)
{
	struct csec_priv_t *csec_priv;
	u32 dbi_val0,dbi_val1;
	//pci_get_drvdata 是 Linux 内核中用于 PCI 设备驱动开发的一个重要函数
	/*
		设备操作函数中访问私有数据：在实现 PCI 设备的读写操作、中断处理等函数时，需要访问设备的私有数据来完成相应的任务。例如，在中断处理函数中，可能需要根据私有数据中的状态信息来判断中断的类型和处理方式。
		多个函数间共享数据：驱动程序中的不同函数可能需要共享一些设备相关的数据，通过将这些数据存储在私有数据中，并使用 pci_get_drvdata 函数获取，可以实现数据的共享和传递。
	*/
	csec_priv = pci_get_drvdata(pci_dev);

	pci_write_config_dword(pci_dev, IATU_INDEX, 	0x80000000);	//card inbound & index 0
	pci_read_config_dword(pci_dev,DBI_BAR0,&dbi_val0);
	pci_read_config_dword(pci_dev,DBI_BAR1,&dbi_val1);
	dbi_val0&=0xffffff80;
	pci_write_config_dword(pci_dev, IATU_LBA, 	dbi_val0);
	pci_write_config_dword(pci_dev, IATU_UBA,	dbi_val1);
	pci_write_config_dword(pci_dev, IATU_LAR, 	dbi_val0 +0xfffff );
	pci_write_config_dword(pci_dev, IATU_LTAR, 	(unsigned)(csec_priv->sec_base) );
	pci_write_config_dword(pci_dev, IATU_UTAR,	(unsigned)((csec_priv->sec_base)>>32) );
	pci_write_config_dword(pci_dev, IATU_CTRL1,	0x00000000);
	pci_write_config_dword(pci_dev, IATU_CTRL2,	0x80000000);			//enable

#ifndef MAIN_DDR
	pci_write_config_dword(pci_dev, IATU_INDEX, 	0x80000002);	//card inbound & index 2
	pci_read_config_dword(pci_dev,DBI_BAR3,&dbi_val0);
	dbi_val0&=0xffffff80;
	pci_write_config_dword(pci_dev, IATU_LBA, 	dbi_val0);
	pci_write_config_dword(pci_dev, IATU_UBA, 	0);
	pci_write_config_dword(pci_dev, IATU_LAR, 	dbi_val0+0xffff);
	pci_write_config_dword(pci_dev, IATU_LTAR, 	pci_2_le32((unsigned)(csec_priv->inram_base)));
	pci_write_config_dword(pci_dev, IATU_UTAR, 	pci_2_le32((unsigned)((csec_priv->inram_base)>>32)));
	pci_write_config_dword(pci_dev, IATU_CTRL1,	pci_2_le32(0x00000000));
	pci_write_config_dword(pci_dev, IATU_CTRL2,	pci_2_le32(0x80000000));			//enable
#endif


	csec_error("pci_csec_config: base_addr is %llx,taget_addr is %llx\n",csec_priv->ba[0].base_phy,csec_priv->sec_base);
	csec_error("pci_csec_config: base_addr32 li is %x,taget_addr32 hi is %x\n",(unsigned int)(csec_priv->ba[0].base_phy),(unsigned int)((csec_priv->sec_base)>>32));
	csec_error("pci_csec_config: base_virt  is %llx\n",(u64)(csec_priv->ba[0].base_virt));
	csec_error(KERN_ERR "CCP903T: Crypto card PCIe IATU and BAR initialized.\n");
}

irqreturn_t  pci_csec_isr(int irq,void *data)
{
	struct jobring *jr;
	struct jr_regs __iomem *regs=NULL;

	csec_debug2(KERN_INFO "ei,data %x\n",data);
	jr = (struct jobring *)data;
	if(atomic_read(&jr->state) == DQ_NOPEND)
	{
		regs = jr->regs;
		if(regs == NULL)
		{
			csec_error(KERN_ERR "pci csec isr regs null.\n");
			return IRQ_HANDLED;
		}
		smp_mb();
		//if(sec_in32(&regs->jrint)==1)
		{
			//sec_out32(&regs->jrint,cpu_2_le32(1));
			sec_out32(&regs->jrcfg1,cpu_2_le32(1));
			atomic_set(&(jr->state), DQ_PEND);
			schedule_work_unbound(&jr->dequeue_task);
		}
	}
	mod_timer(&jr->timer,jiffies+1);
	return IRQ_HANDLED;
}

#if LINUX_VERSION_CODE>=KERNEL_VERSION(4,15,0)
void csec_timer(struct timer_list *mytimer)
#else
void csec_timer(unsigned long data)
#endif
{
	struct jobring *jr;
	struct jr_regs __iomem *regs=NULL;
	unsigned long flags;
#if LINUX_VERSION_CODE>=KERNEL_VERSION(4,15,0)
	jr = from_timer(jr,mytimer,timer);
#else
	jr = (struct jobring *)data;
#endif
	csec_debug2(KERN_INFO "t\n");
	CSEC_DBG2("t\n");

	local_irq_save(flags);
	if(atomic_read(&(jr->state)) == DQ_NOPEND)
	{
		regs = jr->regs;
			smp_mb();
			//sec_out32(&regs->jrint,cpu_2_le32(1));
			sec_out32(&regs->jrcfg1,cpu_2_le32(1));
			atomic_set(&(jr->state), DQ_PEND);
			local_irq_restore(flags);
			schedule_work_unbound(&jr->dequeue_task);
	} else {
		local_irq_restore(flags);
	}
	mod_timer(&jr->timer,jiffies + POLL_INVL);
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
static int dq_work_info(char *buf, char *fmt, ...)
{
	va_list args;
	int len;

	va_start(args, fmt);
	len = vsprintf(buf, fmt, args);
	va_end(args);

	return (len);
}
#endif
int pci_csec_msix(struct csec_priv_t *csec_priv,int parts_num)
{
	int i;
	int err;
	for(i=0;i<parts_num;i++)
	{
		csec_priv->msix_entry[i].entry = i;
	}
#if LINUX_VERSION_CODE>=KERNEL_VERSION(4,15,0)
	err =  pci_enable_msix_range(csec_priv->pci_dev,
				    csec_priv->msix_entry,
				    parts_num,parts_num);
#else
	err =  pci_enable_msix(csec_priv->pci_dev,
				    csec_priv->msix_entry,
				    parts_num);
#endif
	return err;

}
/**
 * ccp90x_init_hw_struct - initialize members of hw struct
 */
static int ccp90x_init_hw_struct(struct pci_dev *pci_dev, struct csec_priv_t *csec_priv)
{
	struct ccp90x_hw *hw;
	hw = &csec_priv->hw;

	hw->vendor_id = (uint32_t)pci_dev->vendor;
	hw->device_id = (uint32_t)pci_dev->device;
	hw->vf_max =  0;
	hw->vf_en =   0;
	hw->msix_enable = MSIX_OFF;
	hw->sec_bar  = 0;
	hw->commu_bar= 5;
	hw->ring_num = RING_NUM;

        switch (hw->device_id) {
        case PCI_DEVICE_ID_CCORE_CC903T:
		hw->sec_bar  = BAR_0;
		hw->ring_num = RING_NUM;
		hw->commu_bar= BAR_5;
		hw->dma_hi_addr = 0x0000000000000000ULL;
                break;
        case PCI_DEVICE_ID_CCORE_CC903TH:
		hw->sec_bar  = BAR_0;
		hw->ring_num = RING_NUM;
		hw->commu_bar= BAR_5;
		hw->dma_hi_addr = 0x0000000000000000ULL;
                break;
        case PCI_DEVICE_ID_CCORE_CC903TF:
		hw->sec_bar  = BAR_4;//BAR_0;
		hw->ring_num = RING_NUM;
		hw->commu_bar= BAR_3;
		hw->dma_hi_addr = 0x0000000000000000ULL;
                break;
        case PCI_DEVICE_ID_CCORE_CC903TU_0:
		hw->msix_enable = MSIX_OFF;
		hw->sec_bar  = BAR_4;//BAR_0;
		hw->ring_num = RING_NUM;
		hw->commu_bar= BAR_3;
		hw->dma_hi_addr = 0x0000000000000000ULL;
                break;
        case PCI_DEVICE_ID_CCORE_CC903TU_1:
		hw->msix_enable = MSIX_OFF;
		hw->sec_bar  = BAR_5;//BAR_0;
		hw->ring_num = RING_NUM;
		hw->commu_bar= BAR_3;
		hw->dma_hi_addr = 0x0000000000000000ULL;//0x0000010000000000ULL;
                break;
        default:
                return -EIO;
        }

	return 0;
}

static int pci_mode_check(struct csec_priv_t *csec_priv)
{
	struct jr_regs __iomem *regs=NULL;
	struct jr_regs __iomem *check_regs=NULL;
	struct ccp90x_hw *hw;
	
	hw = &csec_priv->hw;
	if(hw->sec_bar == BAR_0)
	{
		return 0;
	}

	regs = (struct jr_regs __iomem *)(csec_priv->ba[hw->sec_bar].base_virt + 0x1000);

	csec_priv->ba[BAR_0].base_virt = ioremap(csec_priv->ba[BAR_0].base_phy, csec_priv->ba[BAR_0].len);
	if(csec_priv->ba[BAR_0].base_virt == NULL)
	{
		csec_error( KERN_ERR "BAR_0 ioremap fail\n");
		return -1;
	}
	check_regs = (struct jr_regs __iomem *)(csec_priv->ba[BAR_0].base_virt + 0x1000);

	if(memcmp(regs, check_regs, sizeof(struct jr_regs)))
	{
		return -1;//2nd boot mode
	}
	else
	{
		return 0;//sdf cos mode
	}
}
//todo ============================
/*
	1.设备检查：检查设备是否为虚拟功能设备（VF），如果是则返回错误。
	2.内存分配：为设备私有数据结构体 csec_priv_t 分配内存。
	3.硬件初始化：初始化硬件结构体，并将私有数据与 PCI 设备关联。
	4.设备使能与资源请求：使能 PCI 设备并请求其使用的资源区域。
	5.SR - IOV 与中断配置：根据硬件配置启用单根 I/O 虚拟化（SR - IOV）和 MSI - X 中断。
	6.资源映射：映射 PCI 设备的基地址到虚拟地址空间，为 DMA 操作做准备。
	7.DMA 配置：设置 DMA 掩码以支持 32 位或 64 位 DMA。
	8.自定义配置：调用自定义的配置函数对设备进行进一步配置。
	9.内存分配与映射：为通信和环形缓冲区分配内存并进行映射。
	10.中断处理注册：注册中断处理函数以处理设备中断。
	11.设备初始化：调用 sec_init 函数对设备进行初始化。
	12.工作队列创建：创建单线程工作队列（仅适用于旧内核版本）。
*/
static int pci_csec_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
{
	int ret=-1, i;
	int err;
	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	char dqname[10];
#endif
	csec_error(KERN_INFO "pci_csec_probe: called!\n");
	csec_error(KERN_INFO "vendor = 0x%x, device = 0x%x \n", pci_dev->vendor, pci_dev->device);
	probe_status = 0;
	if (pci_dev->is_virtfn)
	{
		csec_error( KERN_ERR "%s (%hx:%hx) should not be a VF!\n",	pci_name(pci_dev), pci_dev->vendor, pci_dev->device);
		return -EINVAL;
	}


	csec_priv =(struct csec_priv_t *)kzalloc(sizeof(struct csec_priv_t),GFP_KERNEL);

	csec_error(KERN_INFO "csec_priv is 0x%llx\n",(u64)csec_priv );

	if(!csec_priv)
	{
		csec_error(KERN_INFO "pci_csec_probe: failed to enable device pci_csec\n");
		goto err_no_mem;
	}
	csec_error(KERN_INFO "csec_priv is 0x%llx\n",(u64)csec_priv );

	ccp90x_init_hw_struct(pci_dev, csec_priv);
	hw = &csec_priv->hw;

	pci_set_drvdata(pci_dev, csec_priv);

	if (!!(ret = pci_enable_device(pci_dev))) {
		csec_error(KERN_INFO "pci_csec_probe: failed to enable device pci_csec\n");
		goto err_en_dev;
	}

	if(!!(ret = pci_request_regions(pci_dev, "csec's pcie device"))) {
		csec_error(KERN_INFO "pci_csec_probe: failed to request regions\n");
		goto err_req_regions;
	}

	if(hw->vf_en)
	{
			if((hw->vf_max) - (hw->ring_num))
			{
				pci_sriov_set_totalvfs(pci_dev, (hw->vf_max) - (hw->ring_num));
				pci_enable_sriov(csec_priv->pci_dev, (hw->vf_max) - (hw->ring_num));
			}
	}

	if(hw->msix_enable)
	{
			err = pci_csec_msix(csec_priv, hw->ring_num);
			if(err>=0)
			{
#ifdef CONFIG_PCI_IOV
				hw->msix_en = 1;
			}
			else
			{
			/* disable SR-IOV for non MSI-X configurations */
				if(hw->vf_en)
				{
					pci_disable_sriov(csec_priv->pci_dev);
					msleep(500);
					csec_error( KERN_ERR "IOV Disabled\n");
				}
				goto err_msix_sriov;
#endif
			}
	}
	else
	{
		if (!pci_dev_msi_enabled(pci_dev)) {
			if (!!(ret = pci_enable_msi(pci_dev))) {
				csec_error(KERN_ERR  "pci_csec_probe: failed to enable msi,back to intc line\n");
			//	goto err_en_msi;
			}
		}
	}

	pci_set_master(pci_dev);
	//todo
	for (i = 0; i < sizeof(csec_priv->ba) / sizeof(struct ba_t); i++)
	{
		csec_priv->ba[i].base_phy = pci_resource_start(pci_dev, i);
		csec_priv->ba[i].len = pci_resource_len(pci_dev, i);
		csec_priv->ba[i].flags = pci_resource_flags(pci_dev, i);
		printk(KERN_INFO "pci_csec_probe: bar%d: base = 0x%llx, length = 0x%llx, flag = 0x%x\n", i,(u64)((csec_priv->ba)[i].base_phy), (u64)((csec_priv->ba)[i].len), (u32)((csec_priv->ba)[i].flags));
	}

	csec_priv->pci_dev = pci_dev;
	csec_priv->jr_g =csec_priv->jr_t.jr;
	csec_priv->sec_base = SEC_ADDR(0);
	csec_priv->inram_base = INRAM_ADDR;
	csec_priv->ba[hw->sec_bar].base_virt = ioremap(csec_priv->ba[hw->sec_bar].base_phy, csec_priv->ba[hw->sec_bar].len);		//bar0 is iatu to card sec

	  // 打印 base_virt 的地址
        printk(KERN_INFO "hw->sec_bar;%u,The address of (csec_priv->ba[hw->sec_bar].base_virt): %p\n",hw->sec_bar, (void *)csec_priv->ba[hw->sec_bar].base_virt);

#ifdef BAR_CONFIG_IN_COS
	csec_priv->ba[hw->commu_bar].base_virt = ioremap(csec_priv->ba[hw->commu_bar].base_phy, csec_priv->ba[hw->commu_bar].len);		//bar5 is to card reg
#endif
	csec_priv->dev = &pci_dev->dev;
	csec_priv->ring_total = hw->ring_num;
	for(i=0;i<hw->ring_num;i++)
	{
		atomic_set(&(csec_priv->jr_t.jr[i].state),DQ_NOPEND);
	}
	
	err = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32));
	if (err) {
		err = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32));
		if(err)
		{		
				csec_error(KERN_ERR "No support 64bit coherent dma! back to 32 bit dma\n");
				goto err_dma;
		}
	}						    
	err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
	if (err) {
		err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
		if(err)
		{	
			csec_error(KERN_ERR "No support 64bit dma! back to 32 bit dma\n");
			goto err_dma;
		}	
	}

#ifdef BAR_CONFIG_IN_COS
	pci_epdma_config(pci_dev);//add for custom api
#else
	pci_csec_config(pci_dev);
	pci_epdma_config(pci_dev);//add for custom api
#endif

#ifdef MAIN_DDR
	csec_priv->commu_virt =dma_alloc_coherent(csec_priv->dev, array_size*hw->ring_num+COMMU_SIZE, &(csec_priv->commu_phy),GFP_KERNEL|SYS_DMA);
	if(!csec_priv->commu_virt)
	{
		csec_error(KERN_ERR "pci_csec_probe:  csec_priv->commu_virt no enough mem");
		goto err_dma_mem;
	}
	csec_priv->ring_virt = csec_priv->commu_virt + COMMU_SIZE;
	csec_priv->ring_phy = (change_addr_for_sec(csec_priv->commu_phy+COMMU_SIZE) | hw->dma_hi_addr);
	csec_error(KERN_INFO "pci_csec_probe===========DDR=================\n");

#else
	csec_priv->ring_phy = INRAM_ADDR+COMMU_SIZE;
	csec_priv->commu_virt = ioremap(csec_priv->ba[3].base_phy, array_size*hw->ring_num+COMMU_SIZE);
	csec_priv->ring_virt = csec_priv->commu_virt + COMMU_SIZE;
	csec_error(KERN_INFO "pci_csec_probe===========NON DDR=================\n");
#endif

	csec_error(KERN_INFO "csec_priv->ring_virt is %llx,csec_priv->ring_phy %llx\n",(u64)(csec_priv->ring_virt),csec_priv->ring_phy);
	if(!csec_priv->ring_virt)
	{
		csec_error(KERN_ERR "pci_csec_probe:  csec_priv->ring_virt no enough mem");
		goto err_dma_mem;
	}

	csec_error(KERN_INFO "pci_csec_probe: vendor = 0x%x, device = 0x%x\n", (unsigned int) pci_dev->vendor, (unsigned int) pci_dev->device);

	if(hw->msix_en == 1)
	{
		for(i=0; i<hw->ring_num; i++)
		{
			err = request_irq(csec_priv->msix_entry[i].vector,pci_csec_isr,0,"ccp908_card_pf_irq", &(csec_priv->jr_g[i]));
			if (err)
				csec_error(KERN_ERR "CCP903T: Binding MSIX IRQ handler failed, IRQ=%d\n", csec_priv->msix_entry[i].vector);
			else
				csec_error(KERN_ERR "CCP903T: Registered irq handler to binding irq %d for processing sec jobs.\n",	csec_priv->msix_entry[i].vector);
		}
	}
	else
	{
		for(i=0; i<hw->ring_num; i++)
		{
			err = request_irq(csec_priv->pci_dev->irq, pci_csec_isr, IRQF_SHARED,	  "ccp908_card_share_irq", &(csec_priv->jr_g[i]));
			if (err)
				csec_error(KERN_ERR "CCP903T: Binding MSI IRQ handler failed, IRQ=%d\n",csec_priv->pci_dev->irq);
			else
				csec_error(KERN_ERR "CCP903T: Registered irq handler to binding irq %d for processing sec jobs.\n",	csec_priv->pci_dev->irq);
		}
	}

/*
	ret = pci_mode_check(csec_priv);
	if(ret)
	{
		csec_error(KERN_ERR "CCP903T: Not in COS Mode\n");
		goto err_sec_init;
	}
*/
	ret = sec_init(csec_priv);
	if(ret)
		goto err_sec_init;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	for(i=0; i<hw->ring_num; i++)
	{
		memset(dqname,0,sizeof(dqname));
		dq_work_info(dqname, "dqwork""%d", i);
		g_dq_work_queue[i] = create_singlethread_workqueue(dqname);
		if(!g_dq_work_queue[i]){
			ret = -ENOMEM;
			goto err_sec_init;
		}
	}
#endif

	csec_error(KERN_ERR "CCP903T: PCI driver probe crypto card initialized.\n");

	return 0;

err_mode_check:
	for(i=0;i<hw->ring_num;i++)
	{
		cancel_work_sync(&(csec_priv->jr_g[i].dequeue_task) );
		del_timer(&csec_priv->jr_g[i].timer);
	}

err_sec_init:
	for(i=0; i<hw->ring_num; i++)
	{
		if(hw->msix_en == 1)
			free_irq(csec_priv->msix_entry[i].vector,&(csec_priv->jr_g[i]));
		else
			free_irq(csec_priv->pci_dev->irq,&(csec_priv->jr_g[i]));
	}

err_dma_mem:
	if(hw->msix_en==1)
	{
		if((hw->vf_en)&&((hw->vf_max) - (hw->ring_num)))
		{
			pci_disable_sriov(csec_priv->pci_dev);
		}
		pci_disable_msix(pci_dev);
	}
	else
	{
		if (pci_dev_msi_enabled(pci_dev)) 
		{
			pci_disable_msi(pci_dev);
		}
	}

err_dma:
#ifdef CONFIG_PCI_IOV
err_msix_sriov:
#endif
	pci_release_regions(pci_dev);

err_req_regions:
	pci_disable_device(pci_dev);

err_en_dev:
	kfree(csec_priv);

err_no_mem:
#ifdef MAIN_DDR
	//csec_priv->ring_phy = change_addr_for_cpu(csec_priv->ring_phy);
	dma_free_coherent(csec_priv->dev, array_size*(hw->ring_num)+COMMU_SIZE, csec_priv->commu_virt,csec_priv->commu_phy);
#else
	iounmap(csec_priv->ring_virt);
#endif

	csec_error("CCP903T: PCI driver probe crypto card failed!\n");
	probe_status = ret;
	return ret;
}

static void  pci_csec_remove(struct pci_dev *pdev)
{

	struct csec_priv_t *csec_priv;
	struct ccp90x_hw *hw;
	int i;

	csec_debug (KERN_INFO "pci_csec_remove is called\n");

	csec_priv = pci_get_drvdata(pdev);
	hw = &csec_priv->hw;

#ifdef MAIN_DDR
	//csec_priv->ring_phy = change_addr_for_cpu(csec_priv->ring_phy-COMMU_SIZE);
	dma_free_coherent(csec_priv->dev, array_size*hw->ring_num+COMMU_SIZE, csec_priv->commu_virt,csec_priv->commu_phy);
#else
	iounmap(csec_priv->ring_virt);
#endif


	if(hw->msix_en)
	{
#ifdef CONFIG_PCI_IOV
		for(i=0; i<hw->ring_num; i++)
			 free_irq(csec_priv->msix_entry[i].vector,&(csec_priv->jr_g[i]));

		pci_disable_msix(csec_priv->pci_dev);

		if((hw->vf_en)&&((hw->vf_max) - (hw->ring_num)))
		    pci_disable_sriov(csec_priv->pci_dev);

		msleep(500);
#endif
	}
	else
	{
		for(i=0; i<hw->ring_num; i++)
			 free_irq(csec_priv->pci_dev->irq,&(csec_priv->jr_g[i]));

		if (pci_dev_msi_enabled(csec_priv->pci_dev))
			pci_disable_msi(csec_priv->pci_dev);
	}
	pci_release_regions(pdev);
	pci_disable_device(pdev);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	for(i=0;i<hw->ring_num;i++)
	{
		if(g_dq_work_queue[i]){
			destroy_workqueue(g_dq_work_queue[i]);
			g_dq_work_queue[i] = NULL;
		}
	}
#endif
	for(i=0;i<hw->ring_num;i++)
	{
		cancel_work_sync(&(csec_priv->jr_g[i].dequeue_task) );
		del_timer(&csec_priv->jr_g[i].timer);
	}

	kfree(csec_priv);
	csec_debug (KERN_INFO "pci_csec_remove is over!\n");
	return;
}

static int pci_csec_suspend(struct pci_dev *pdev, pm_message_t state)
{
	csec_debug(KERN_INFO "pci_csec_suspend is called\n");
	return 0;
}

static int pci_csec_resume(struct pci_dev *pdev)
{
	csec_debug(KERN_INFO "pci_csec_resume is called\n");
	return 0;
}


static struct pci_driver pci_csec_pci_driver = {
	name: "csec's pcie device",
	id_table: pci_csec_pci_tbl,
	probe: pci_csec_probe,
	suspend: pci_csec_suspend,
	resume: pci_csec_resume,
	remove: pci_csec_remove,
};

int  pci_csec_init(void)
{
	struct pci_dev *pci_dev = NULL;
	unsigned int device_id = PCI_DEVICE_ID_CCORE_CC903TU_0;
	int err;
	int i, cnt=0;

	csec_debug(KERN_INFO "pci_csec_init is called\n");

	for (i=0; pci_csec_pci_tbl[i].vendor == PCI_VENDOR_ID_CCORE;) {
		pci_dev = pci_get_device(PCI_VENDOR_ID_CCORE, pci_csec_pci_tbl[i].device, pci_dev);
		csec_debug(KERN_INFO "ccp903_cards_init: s2!\n");
		if (pci_dev == NULL) {
			i++;
			continue;
		}
		printk(KERN_ERR "CCP903T %04x:%04x (vendor:device) crypto card found.",
				pci_dev->vendor, pci_dev->device);
		//pci_dev_put(pci_dev);
		cnt++;
	}

	if(!cnt) {
		csec_error(KERN_ERR "CCP903T: Warning! Crypto card not found!\n");
		return -EINVAL;
	}

	err = pci_register_driver(&pci_csec_pci_driver);
	if (err || probe_status) {
		if(probe_status) {
			pci_unregister_driver(&pci_csec_pci_driver);
			err = probe_status;
		}
		csec_error(KERN_ERR "CCP903T: PCI driver register failed, error %d.\n", err);
	}

	return err;
}

void  pci_csec_exit(void)
{
	csec_debug(KERN_INFO "pci_csec_exit is called\n");
	pci_unregister_driver(&pci_csec_pci_driver);
}

