/**
 @file dal_kernal.c

 @date 2012-10-18

 @version v2.0


*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/types.h>
#include <asm/io.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <asm/irq.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/bug.h>
#if defined(SOC_ACTIVE) || defined(ASW_ACTIVE)
#include <linux/platform_device.h>
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
#include <linux/irqdomain.h>
#endif
#include "dal_common.h"
#include "dal_kernel.h"
#include "dal_mpool.h"
#include "dal.h"
#include "sal.h"
#if defined(ASW_ACTIVE)
#include <linux/of_mdio.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#endif
#include "ctc_mix.h"

MODULE_AUTHOR("Suzhou Centec Communications Co., Ltd.");
MODULE_DESCRIPTION("DAL kernel module");
MODULE_LICENSE("GPL");
/* DMA memory pool size */
static char* dma_pool_size;
static dal_access_type_t g_dal_access = DAL_PCIE_MM;
static uint8  g_dal_dev_lchip[CTC_MAX_LOCAL_CHIP_NUM] = {0};

module_param(dma_pool_size, charp, 0);
MODULE_PARM_DESC(dma_pool_size,
                 "Specify DMA memory pool size (default 4MB)");
static char* dal_io_user_mode;
module_param(dal_io_user_mode, charp, 0);
MODULE_PARM_DESC(dal_io_user_mode,
                 "Specify io mode(default mdio)");

static int dal_debug = 0;
module_param(dal_debug, int, 0);
MODULE_PARM_DESC(dal_debug, "Set debug level (default 0)");


dal_op_t g_dal_op =
{
    pci_read: dal_pci_read,
    pci_write: dal_pci_write,
    pci_conf_read: dal_pci_conf_read,
    pci_conf_write: dal_pci_conf_write,
    i2c_read: NULL,
    i2c_write: NULL,
    interrupt_register: dal_interrupt_register,
    interrupt_unregister: dal_interrupt_unregister,
    interrupt_set_en: dal_interrupt_set_en,
    interrupt_get_msi_info: dal_interrupt_get_msi_info,
    interrupt_set_msi_cap: dal_kernel_set_msi_enable,
    interrupt_set_msix_cap: dal_kernel_set_msix_enable,
    logic_to_phy: dal_logic_to_phy,
    phy_to_logic: dal_phy_to_logic,
    dma_alloc: dal_dma_alloc,
    dma_free: dal_dma_free,
    dma_cache_inval: dal_dma_cache_inval,
    dma_cache_flush: dal_dma_cache_flush,
    interrupt_get_irq: dal_interrupt_get_irq,
    pci_read_ext: dal_pci_read_ext,
    pci_write_ext: dal_pci_write_ext,
    pci_read_ext2: dal_pci_read_ext2,
    pci_write_ext2: dal_pci_write_ext2,
    smi_read: dal_smi_read,
    smi_write: dal_smi_write,
};

uint8 g_dal_ldev_init[CTC_MAX_LOCAL_CHIP_NUM] = {0};  /*device_indx init*/
uint8 g_dal_ldev_2_slot[CTC_MAX_LOCAL_CHIP_NUM] = {0xFF};  /*ldev to pci_dev*/
uint8 g_dal_dev_pp_num[CTC_MAX_LOCAL_CHIP_NUM] = {0};      /*ldev pp num, index is ldev*/ 

/*****************************************************************************
 * defines
 *****************************************************************************/
#define MEM_MAP_RESERVE SetPageReserved
#define MEM_MAP_UNRESERVE ClearPageReserved

#define CTC_POLL_INTERRUPT_STR "poll_intr"
#define VIRT_TO_PAGE(p)     virt_to_page((p))
#define DAL_UNTAG_BLOCK         0
#define DAL_DISCARD_BLOCK      1
#define DAL_MATCHED_BLOCK     2
#define DAL_CUR_MATCH_BLOCk 3

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
#include <linux/slab.h>
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif

#define DAL_INTR_HANDLER_TEMPLATE(idx) static irqreturn_t \
intr##idx##_handler(int irq, void* dev_id) \
{ \
    if(dal_glb.dal_isr[idx].intr_trigger) \
    { \
        return IRQ_HANDLED; \
    } \
    disable_irq_nosync(irq); \
    dal_glb.dal_isr[idx].intr_trigger = 1; \
    sal_sem_give(dal_glb.dal_isr[idx].p_inr_sem);  \
    return IRQ_HANDLED; \
}

#define _DEBUG_ 0
#if _DEBUG_
#define CTC_PRINTK(...) printk(__VA_ARGS__);
#else
#define CTC_PRINTK(...)
#endif
/*****************************************************************************
 * typedef
 *****************************************************************************/
typedef irqreturn_t (*p_func) (int irq, void* dev_id);
typedef struct dal_isr_s
{
    int irq;
    void (* isr)(void*);
    void* isr_data;
    int trigger;
    wait_queue_head_t wqh;
    sal_sem_t*  p_inr_sem;
    sal_task_t*  p_intr_thread;
    int ref_cnt;
    int intr_idx;
    int prio;
    int active;
    int intr_trigger;
    p_func intr_handler_fun;
} dal_isr_t;
#if defined (ASW_ACTIVE)
typedef struct dal_kernel_asw_dev_s
{
	struct platform_device* p_dev;
	unsigned char	mode;
	struct dal_reg_ops	*reg_ops;
	struct mii_bus		*ext_mbus;
	dev_t devno;
	int	irq;
	struct spi_device *spi_device;
	struct i2c_adapter *i2c_adapt;
	unsigned char i2c_addr;
} dal_kernel_asw_dev_t;
struct dal_reg_ops {
	int (*read_reg)(void *cfg, unsigned int addr, unsigned int *data);
	int (*write_reg)(void *cfg, unsigned int addr, unsigned int data);
};
#endif
struct dal_intr_para_s
{
    uint32 irq;
    int8 intr_idx;
    int16 prio;
};
typedef struct dal_intr_para_s dal_intr_para_t;

typedef struct dal_kernel_dev_s
{
    struct list_head list;
    /*pcie device pointer struct pci_dev*, local pointer struct platform_device* */
    void* pci_dev;

    /* PCI I/O mapped base address */
    void __iomem * logic_address;

    /* Physical address */
    uintptr phys_address;
} dal_kernel_dev_t;
typedef struct _dma_segment
{
    struct list_head list;
    unsigned long req_size;     /* Requested DMA segment size */
    unsigned long blk_size;     /* DMA block size */
    unsigned long blk_order;    /* DMA block size in alternate format */
    unsigned long seg_size;     /* Current DMA segment size */
    unsigned long seg_begin;    /* Logical address of segment */
    unsigned long seg_end;      /* Logical end address of segment */
    unsigned long* blk_ptr;     /* Array of logical DMA block addresses */
    int blk_cnt_max;            /* Maximum number of block to allocate */
    int blk_cnt;                /* Current number of blocks allocated */
} dma_segment_t;

struct dal_kernel_glb_s
{
    unsigned char dal_chip_num;
    unsigned char dal_intr_num;
    unsigned char use_high_memory;
	unsigned char dal_io_mode;
    unsigned int dma_mem_size;
    dal_isr_t dal_isr[DAL_MAX_INTR_NUM];
};
typedef struct dal_kernel_glb_s dal_kernel_glb_t;
struct dal_kernel_master_s
{
    dal_kernel_dev_t dal_dev;
    dal_mpool_mem_t* dma_pool;
    unsigned int* dma_virt_base;
    unsigned long long dma_phy_base;
    unsigned char msi_irq_base[DAL_MAX_INTR_NUM];
    unsigned char msi_irq_num;
    unsigned char msi_used;   /*0:none, 1: msi 2:msi-x*/
    unsigned char active_type;
    pci_cmd_status_u_t  pci_cmd_status;
    uint16  pci_cmd_offset;
    uint16  pci_cmd_timeout;
};
typedef struct dal_kernel_master_s dal_kernel_master_t;



/***************************************************************************
 *declared
 ***************************************************************************/
int32_t dal_check_ldev_lchip(uint8 ldev);

/*****************************************************************************
 * global variables
 *****************************************************************************/
static dal_kernel_glb_t dal_glb;
static dal_kernel_master_t* dal_master[CTC_MAX_LOCAL_CHIP_NUM] = {NULL};
#ifdef ASW_ACTIVE
static unsigned int active_type[CTC_MAX_LOCAL_CHIP_NUM] = {0};
static void* dal_dev[CTC_MAX_LOCAL_CHIP_NUM]={0};
#endif
unsigned char dal_lchip[CTC_MAX_LOCAL_CHIP_NUM];
static LIST_HEAD(_dma_seg);

static struct pci_device_id dal_id_table[] =
{
    {PCI_DEVICE(DAL_VENDOR_VID, DAL_GREATBELT_DEVICE_ID)},
	{PCI_DEVICE(DAL_PCIE_VENDOR_ID, DAL_GOLDENGATE_DEVICE_ID)},
    {PCI_DEVICE(DAL_PCIE_VENDOR_ID1, DAL_GOLDENGATE_DEVICE_ID1)},
    {PCI_DEVICE(DAL_PCIE_VENDOR_ID, DAL_DUET2_DEVICE_ID)},
    {PCI_DEVICE(DAL_PCIE_VENDOR_ID, DAL_TSINGMA_DEVICE_ID)},
    {PCI_DEVICE(DAL_PCIE_VENDOR_ID, DAL_TSINGMA_MX_DEVICE_ID)},
    {PCI_DEVICE(DAL_PCIE_VENDOR_ID, DAL_ARCTIC_DEVICE_ID)},
    {0, },
};
#if defined(SOC_ACTIVE)
static const struct of_device_id linux_dal_of_match[] = {
    { .compatible = "centec,dal-localbus",},
    {},
};
MODULE_DEVICE_TABLE(of, linux_dal_of_match);
#endif
#if defined(ASW_ACTIVE)
static const struct of_device_id linux_dal_asw_of_match[] = {
       { .compatible = "centec,ctc2118" },
       {},
};
MODULE_DEVICE_TABLE(of, linux_dal_asw_of_match);
#endif

DAL_INTR_HANDLER_TEMPLATE(0)
DAL_INTR_HANDLER_TEMPLATE(1)
DAL_INTR_HANDLER_TEMPLATE(2)
DAL_INTR_HANDLER_TEMPLATE(3)
DAL_INTR_HANDLER_TEMPLATE(4)
DAL_INTR_HANDLER_TEMPLATE(5)
DAL_INTR_HANDLER_TEMPLATE(6)
DAL_INTR_HANDLER_TEMPLATE(7)


/*****************************************************************************
 * macros
 *****************************************************************************/
#define VERIFY_CHIP_INDEX(n)  (n < CTC_MAX_LOCAL_CHIP_NUM)

#define _KERNEL_INTERUPT_PROCESS

int32_t
dal_op_init(dal_op_t* p_dal_op)
{
    /*kernel mode */
    unsigned char ldev = 0;
    unsigned char slot_idx = 0;
    unsigned char index = 0;
    dal_pci_dev_t pci_dev;

    if (p_dal_op)
    {
        /*p_dal_op->lchip means ldev*/
        if (1 ==g_dal_ldev_init[p_dal_op->ldev])
        {
            return DAL_E_NONE;
        }
    }

    sal_memset(&pci_dev, 0, sizeof(dal_pci_dev_t));
    /* if dal op is provided by user , using user defined */
    if (p_dal_op)
    {
        ldev = p_dal_op->ldev;
        if (0 == ldev)
        {
            g_dal_op.pci_read               = p_dal_op->pci_read?p_dal_op->pci_read:g_dal_op.pci_read;
            g_dal_op.pci_write              = p_dal_op->pci_write?p_dal_op->pci_write:g_dal_op.pci_write;
            g_dal_op.pci_conf_read          = p_dal_op->pci_conf_read?p_dal_op->pci_conf_read:g_dal_op.pci_conf_read;
            g_dal_op.pci_conf_write         = p_dal_op->pci_conf_write?p_dal_op->pci_conf_write:g_dal_op.pci_conf_write;
            g_dal_op.i2c_read               = p_dal_op->i2c_read?p_dal_op->i2c_read:g_dal_op.i2c_read;
            g_dal_op.i2c_write              = p_dal_op->i2c_write?p_dal_op->i2c_write:g_dal_op.i2c_write;
            g_dal_op.interrupt_register     = p_dal_op->interrupt_register?p_dal_op->interrupt_register:g_dal_op.interrupt_register;
            g_dal_op.interrupt_unregister   = p_dal_op->interrupt_unregister?p_dal_op->interrupt_unregister:g_dal_op.interrupt_unregister;
            g_dal_op.interrupt_set_en       = p_dal_op->interrupt_set_en?p_dal_op->interrupt_set_en:g_dal_op.interrupt_set_en;
            g_dal_op.interrupt_get_msi_info = p_dal_op->interrupt_get_msi_info?p_dal_op->interrupt_get_msi_info:g_dal_op.interrupt_get_msi_info;
            g_dal_op.interrupt_set_msi_cap  = p_dal_op->interrupt_set_msi_cap?p_dal_op->interrupt_set_msi_cap:g_dal_op.interrupt_set_msi_cap;
            g_dal_op.interrupt_set_msix_cap = p_dal_op->interrupt_set_msix_cap?p_dal_op->interrupt_set_msix_cap:g_dal_op.interrupt_set_msix_cap;
            g_dal_op.logic_to_phy           = p_dal_op->logic_to_phy?p_dal_op->logic_to_phy:g_dal_op.logic_to_phy;
            g_dal_op.phy_to_logic           = p_dal_op->phy_to_logic?p_dal_op->phy_to_logic:g_dal_op.phy_to_logic;
            g_dal_op.dma_alloc              = p_dal_op->dma_alloc?p_dal_op->dma_alloc:g_dal_op.dma_alloc;
            g_dal_op.dma_free               = p_dal_op->dma_free?p_dal_op->dma_free:g_dal_op.dma_free;
            g_dal_op.dma_cache_inval        = p_dal_op->dma_cache_inval?p_dal_op->dma_cache_inval:g_dal_op.dma_cache_inval;
            g_dal_op.dma_cache_flush        = p_dal_op->dma_cache_flush?p_dal_op->dma_cache_flush:g_dal_op.dma_cache_flush;
            g_dal_op.interrupt_get_irq        = p_dal_op->interrupt_get_irq?p_dal_op->interrupt_get_irq:g_dal_op.interrupt_get_irq;
            g_dal_op.handle_netif               = p_dal_op->handle_netif?p_dal_op->handle_netif:g_dal_op.handle_netif;
            g_dal_op.pci_read_ext               = p_dal_op->pci_read_ext?p_dal_op->pci_read_ext:g_dal_op.pci_read_ext;
            g_dal_op.pci_write_ext              = p_dal_op->pci_write_ext?p_dal_op->pci_write_ext:g_dal_op.pci_write_ext;
            g_dal_op.pci_read_ext2               = p_dal_op->pci_read_ext2?p_dal_op->pci_read_ext2:g_dal_op.pci_read_ext2;
            g_dal_op.pci_write_ext2              = p_dal_op->pci_write_ext2?p_dal_op->pci_write_ext2:g_dal_op.pci_write_ext2;
            g_dal_op.smi_read               = p_dal_op->smi_read?p_dal_op->smi_read:g_dal_op.smi_read;
            g_dal_op.smi_write              = p_dal_op->smi_write?p_dal_op->smi_write:g_dal_op.smi_write;
            g_dal_op.dal_ioctl              = p_dal_op->dal_ioctl?p_dal_op->dal_ioctl:g_dal_op.dal_ioctl;        
        }
        sal_memcpy(&pci_dev, &(p_dal_op->pci_dev), sizeof(dal_pci_dev_t));
        if (!ldev && p_dal_op->lchip)
        {
            /* lchip 0 must assigned to ldev 0*/
            return -1;
        }
        g_dal_dev_lchip[ldev] = p_dal_op->lchip;
        
    }

    if (g_dal_dev_lchip[ldev] == 0 && ldev)
    {
        /* SDK manage lchip allocation for ldev*/
        g_dal_dev_lchip[ldev] = (g_dal_dev_lchip[ldev-1]+g_dal_dev_pp_num[ldev-1]);
    }
    
    if ((0 != pci_dev.busNo) || (0 != pci_dev.devNo) || (0 != pci_dev.funNo))
    {
        for(index=0; index < CTC_MAX_LOCAL_CHIP_NUM; index++)
        {
            if (NULL == dal_master[index])
            {
                continue;
            }
            if (DAL_CPU_MODE_TYPE_PCIE == dal_master[index]->active_type)
            {
                struct pci_dev* p_pci_dev = NULL;
                p_pci_dev = dal_master[index]->dal_dev.pci_dev;
                if ((pci_dev.domainNo == pci_domain_nr(p_pci_dev->bus)) && (pci_dev.busNo == p_pci_dev->bus->number) && (pci_dev.devNo == p_pci_dev->device) && (pci_dev.funNo == p_pci_dev->devfn))
                {
                    g_dal_ldev_2_slot[ldev] = index;
                    slot_idx = index;
                    break;
                }
            }
#if defined(SOC_ACTIVE)
            if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[index]->active_type)
            {
                if ((pci_dev.busNo == 0) && (pci_dev.devNo == DAL_TSINGMA_DEVICE_ID) && (pci_dev.funNo == 0))
                {
                    g_dal_ldev_2_slot[ldev] = index;
                    slot_idx = index;
                    break;
                }
            }
#endif
        }
        if (index >= CTC_MAX_LOCAL_CHIP_NUM)
        {
            printk("not find the ldev %d device bus:0x%x dev:0x%x fun:0x%x\n", ldev, pci_dev.busNo, pci_dev.devNo, pci_dev.funNo);
            return -1;
        }
    }
    else
    {
        g_dal_ldev_2_slot[ldev] = ldev;
        slot_idx = ldev;
    }

    dal_mpool_init(ldev);
    dal_master[slot_idx]->dma_pool = dal_mpool_create(ldev, dal_master[slot_idx]->dma_virt_base, dal_glb.dma_mem_size);
    if (!dal_master[slot_idx]->dma_pool)
    {
        printk("Create mpool fail, dma_pool:%p \n", dal_master[slot_idx]->dma_pool);
        return -1;
    }

    if (dal_check_ldev_lchip(ldev))
    {
        g_dal_dev_pp_num[ldev] = 0;
        g_dal_dev_lchip[ldev] = 0;
        return -1;
    }
    
    g_dal_ldev_init[ldev] = 1;

    /*get pp num must use ldev*/
    g_dal_dev_pp_num[ldev] = dal_device_get_pp_num(ldev);
    if (p_dal_op && (p_dal_op->pp_num == 3 || p_dal_op->pp_num == 6) && p_dal_op->pp_num < g_dal_dev_pp_num[ldev])
    {
        g_dal_dev_pp_num[ldev] = p_dal_op->pp_num;
    }

    return DAL_E_NONE;
}

int32
dal_op_deinit(uint8 ldev)
{
    uint8 slot_idx = 0;
    if (0 == g_dal_ldev_init[ldev])
    {
        return -1;
    }

    slot_idx = g_dal_ldev_2_slot[ldev]; 
    /*mpool destroy*/
    dal_mpool_destroy(ldev, dal_master[slot_idx]->dma_pool);
    g_dal_dev_lchip[ldev] = 0;
    g_dal_dev_pp_num[ldev] = 0;

    return 0;
}

/* interrupt thread */
static void
dal_intr_thread(void* param)
{
    int32 ret = 0;
    int32 intr_idx = 0;
    int32 irq = 0;
    dal_isr_t* intr_handler = NULL;

    intr_handler = (dal_isr_t*)param;
    intr_idx = intr_handler->intr_idx;
    irq = intr_handler->irq;

    /*for many chips in one board, there interrupt line should merge together */
    while (1)
    {
        ret = sal_sem_take(intr_handler->p_inr_sem, SAL_SEM_FOREVER);
        if (0 != ret)
        {
            continue;
        }
        if(intr_handler->active == 0)
        {
            return;
        }
        if (dal_glb.dal_isr[intr_idx].intr_trigger)
        {
            dal_glb.dal_isr[intr_idx].intr_trigger = 0;
        }
        intr_handler->isr((void*)(&irq));
    }
}

int
dal_interrupt_register(unsigned int irq, int prio, void (* isr)(void*), void* data)
{
    int ret;
    unsigned char* int_name = NULL;
    int intr_idx = DAL_MAX_INTR_NUM;
    int intr_idx_tmp = 0;
    unsigned long irq_flags = 0;

    if (dal_glb.dal_intr_num >= DAL_MAX_INTR_NUM)
    {
        printk("Interrupt numbers exceeds max.\n");
        return -1;
    }

    for (intr_idx_tmp=0;intr_idx_tmp < DAL_MAX_INTR_NUM; intr_idx_tmp++)
    {
        if (dal_glb.dal_isr[intr_idx_tmp].active && irq == dal_glb.dal_isr[intr_idx_tmp].irq)
        {
           dal_glb.dal_isr[intr_idx_tmp].ref_cnt++;
           return 0;
        }
        if ((0 == dal_glb.dal_isr[intr_idx_tmp].irq) && (DAL_MAX_INTR_NUM == intr_idx))
        {
            intr_idx = intr_idx_tmp;
            dal_glb.dal_isr[intr_idx].ref_cnt = 0;
        }
    }

    int_name = "dal_intr";

    dal_glb.dal_isr[intr_idx].intr_idx = intr_idx;
    dal_glb.dal_isr[intr_idx].prio = prio;

    dal_glb.dal_isr[intr_idx].irq = irq;
    dal_glb.dal_isr[intr_idx].isr = isr;
    dal_glb.dal_isr[intr_idx].isr_data = data;
    dal_glb.dal_isr[intr_idx].active = 1;
    ret = sal_sem_create(&dal_glb.dal_isr[intr_idx].p_inr_sem, 0);
    if (ret < 0)
    {
        return ret;
    }

    sal_task_create(&dal_glb.dal_isr[intr_idx].p_intr_thread,
                    int_name,
                    SAL_DEF_TASK_STACK_SIZE,
                    prio,
                    dal_intr_thread,
                    (void*)&dal_glb.dal_isr[intr_idx]);

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
    irq_flags = 0;
#else
    irq_flags = IRQF_DISABLED;
#endif
    if ((ret = request_irq(irq,
                           dal_glb.dal_isr[intr_idx].intr_handler_fun,
                           irq_flags,
                           int_name,
                           &dal_glb.dal_isr[intr_idx])) < 0)
    {
        printk("Cannot request irq %d, ret %d.\n", irq, ret);
    }

    if (0 == ret)
    {
        dal_glb.dal_intr_num++;
    }

    return ret;
}

int
dal_interrupt_unregister(unsigned int irq)
{
    int intr_idx = 0;
    int find_flag = 0;

    /* get intr device index */
    for (intr_idx = 0; intr_idx < DAL_MAX_INTR_NUM; intr_idx++)
    {
        if (dal_glb.dal_isr[intr_idx].irq == irq)
        {
            find_flag = 1;
            break;
        }
    }

    if (find_flag == 0)
    {
        printk ("<0>irq%d is not registered! unregister failed \n", irq);
        return -1;
    }

    if(dal_glb.dal_isr[intr_idx].ref_cnt > 1)
    {
        dal_glb.dal_isr[intr_idx].ref_cnt --;
        return 0;
    }

    free_irq(irq, &dal_glb.dal_isr[intr_idx]);
    dal_glb.dal_isr[intr_idx].active = 0;
    sal_sem_give(dal_glb.dal_isr[intr_idx].p_inr_sem);
    sal_task_destroy(dal_glb.dal_isr[intr_idx].p_intr_thread);
    sal_sem_destroy(dal_glb.dal_isr[intr_idx].p_inr_sem);
    sal_memset(&(dal_glb.dal_isr[intr_idx]), 0, sizeof(dal_isr_t));
    dal_glb.dal_intr_num--;

    return 0;
}

int
dal_interrupt_set_en(unsigned int irq, unsigned int enable)
{
    enable ? enable_irq(irq) : disable_irq_nosync(irq);

    return 0;
}

static int
_dal_set_msi_enabe(int lchip, unsigned int msi_num, unsigned int msi_type)
{
    int ret = 0;

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[lchip]->active_type)
    {
        unsigned int index = 0;
        struct pci_dev* pci_dev = (struct pci_dev*)dal_master[lchip]->dal_dev.pci_dev;

        if (DAL_MSI_TYPE_MSI == msi_type && (dal_master[lchip]->msi_used != 1))
        {
            if (msi_num == 1)
            {
                ret = pci_enable_msi(pci_dev);
                if (ret)
                {
                    printk ("msi enable failed!!! lchip = %d, msi_num = %d\n", lchip, msi_num);
                    pci_disable_msi(pci_dev);
                    return -1;
                }

                dal_master[lchip]->msi_irq_base[0] = pci_dev->irq;
                dal_master[lchip]->msi_irq_num = 1;
            }
        	else
            {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
                ret = pci_alloc_irq_vectors(pci_dev, 1, msi_num, PCI_IRQ_ALL_TYPES);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 79))
                ret = pci_enable_msi_exact(pci_dev, msi_num);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 26, 32))
                ret = pci_enable_msi_block(pci_dev, msi_num);
#else
                ret = -1;
#endif
                if (ret)
                {
                    printk ("msi enable failed!!! lchip = %d, msi_num = %d\n", lchip, msi_num);
                    pci_disable_msi(pci_dev);
                    return -1;
                }

                dal_master[lchip]->msi_irq_num = msi_num;
                for (index = 0; index < msi_num; index++)
                {
                    dal_master[lchip]->msi_irq_base[index] = pci_dev->irq + index;
                }

            }
            dal_master[lchip]->msi_used = 1;
        }
		else if (DAL_MSI_TYPE_MSIX == msi_type && dal_master[lchip]->msi_used != 2)
        {
            struct msix_entry entries[DAL_MAX_INTR_NUM];
            unsigned int index = 0;
            sal_memset(entries, 0, sizeof(struct msix_entry)*DAL_MAX_INTR_NUM);
            for (index = 0; index < DAL_MAX_INTR_NUM; index++)
            {
                entries[index].entry = index;
            }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
            ret = pci_enable_msix_range(pci_dev, entries, msi_num, msi_num);
            if (ret == msi_num)
            {
                dal_master[lchip]->msi_irq_num = msi_num;
                for (index=0; index<msi_num; index++)
                {
                    dal_master[lchip]->msi_irq_base[index]= entries[index].vector+index;
                    printk ("msix enable success!!! irq index %u, irq val %u\n", index, dal_master[lchip]->msi_irq_base[index]);
                }
            }
            else
            {
                printk ("msix enable failed!!! lchip = %d", lchip);
                return -1;
            }
#else
            ret = pci_enable_msix(pci_dev, entries, msi_num);
            if (ret > 0)
            {
                printk ("msix retrying interrupts = %d\n", ret);
                ret = pci_enable_msix(pci_dev, entries, ret);
                if (ret != 0)
                {
                    printk ("msix enable failed!!! lchip = %d, msi_num = %d\n", lchip, msi_num);
                    return -1;
                }
            }
            else if (ret < 0)
            {
                printk ("msix enable failed!!! lchip = %d, msi_num = %d\n", lchip, msi_num);
                return -1;
            }
            else
            {
                dal_master[lchip]->msi_irq_num = msi_num;
                for (index = 0; index < msi_num; index++)
                {
                    dal_master[lchip]->msi_irq_base[index] = entries[index].vector + index;
                    printk ("msix enable success!!! irq index %u, irq val %u\n", index, dal_master[lchip]->msi_irq_base[index]);
                }
            }

#endif
		    dal_master[lchip]->msi_used = 2;
        }
    }

    return 0;
}

static int
_dal_set_msi_disable(unsigned int lchip, unsigned int msi_type)
{

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[lchip]->active_type)
    {
        if (DAL_MSI_TYPE_MSI == msi_type)
        {
            pci_disable_msi(dal_master[lchip]->dal_dev.pci_dev);
        }
        else
        {
            pci_disable_msix(dal_master[lchip]->dal_dev.pci_dev);
        }

        memset(dal_master[lchip]->msi_irq_base, 0, sizeof(unsigned int)*DAL_MAX_INTR_NUM);
        dal_master[lchip]->msi_irq_num = 0;
        dal_master[lchip]->msi_used = 0;
    }

    return 0;
}

/*enable parameter is used in kernel mode, for user mode useless */
int
dal_kernel_set_msi_enable(unsigned char ldev, unsigned int enable, unsigned int msi_num)
{
    int ret = 0;
    unsigned int used_num = 0;
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];
    used_num = (enable)?msi_num:0;


    if (used_num > 0)
    {
        ret = _dal_set_msi_enabe(slot_idx, used_num, DAL_MSI_TYPE_MSI);
    }
    else
    {
        ret = _dal_set_msi_disable(slot_idx, DAL_MSI_TYPE_MSI);
    }

    return ret;
}

/*enable parameter is used in kernel mode, for user mode useless */
int
dal_kernel_set_msix_enable(unsigned char ldev, unsigned int enable, unsigned int msi_num)
{
    int ret = 0;
    unsigned int used_num = 0;
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];


    used_num = (enable)?msi_num:0;
    if (used_num > 0)
    {
        ret = _dal_set_msi_enabe(slot_idx, used_num, DAL_MSI_TYPE_MSIX);
    }
    else
    {
        ret = _dal_set_msi_disable(slot_idx, DAL_MSI_TYPE_MSIX);
    }

    return ret;
}

int dal_interrupt_get_msi_info(unsigned char ldev, unsigned char* irq_base)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];

    *irq_base = dal_master[slot_idx]->msi_irq_base[0];
    return 0;
}

int dal_interrupt_get_irq(unsigned char ldev, unsigned char type , unsigned short* irq_array, unsigned char* num)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];


    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[slot_idx]->active_type)
    {
        if((1 == type) || (3 == type))
        {
            unsigned char loop;

            for(loop=0;loop < dal_master[slot_idx]->msi_irq_num; loop++)
            {
                *(irq_array+loop) = dal_master[slot_idx]->msi_irq_base[loop];
            }
            *num = dal_master[slot_idx]->msi_irq_num;
        }
        else if(2 == type)
        {
            unsigned int conf_value = 0;
            dal_pci_conf_read(ldev, 0x3c, &conf_value);
            *irq_array = conf_value&0xFF;
            *num = 1;
        }
        else
        {
            return -1;
        }
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[slot_idx]->active_type)
    {
        unsigned char loop;
        for(loop=0;loop < dal_master[slot_idx]->msi_irq_num; loop++)
        {
            *(irq_array+loop) = dal_master[slot_idx]->msi_irq_base[loop];
        }
        *num = dal_master[slot_idx]->msi_irq_num;
    }
#endif

    return 0;
}
#define _KERNEL_DMA_PROCESS

uint64
dal_logic_to_phy(unsigned char ldev, void* laddr)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];

    if (dal_glb.dma_mem_size)
    {
        /* dma memory is a contiguous block */
        if (laddr)
        {
            return dal_master[slot_idx]->dma_phy_base + ((uintptr)(laddr) - (uintptr)(dal_master[slot_idx]->dma_virt_base));
        }

        return 0;
    }

    return virt_to_bus(laddr);
}

void*
dal_phy_to_logic(unsigned char ldev, uint64 paddr)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];

    if (dal_glb.dma_mem_size)
    {
        /* dma memory is a contiguous block */
        return (void*)(paddr ? (char*)dal_master[slot_idx]->dma_virt_base + (paddr - dal_master[slot_idx]->dma_phy_base) : NULL);
    }

    return bus_to_virt(paddr);
}

unsigned int*
dal_dma_alloc(unsigned char ldev, int size, int type)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];

    return dal_mpool_alloc(ldev, dal_master[slot_idx]->dma_pool, size, type);
}

void
dal_dma_free(unsigned char ldev, void* ptr)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];
    return dal_mpool_free(ldev, dal_master[slot_idx]->dma_pool, ptr);
}

#ifndef DMA_MEM_MODE_PLATFORM
/*
 * Function: _dal_dma_segment_free
 */

/*
 * Function: _find_largest_segment
 *
 * Purpose:
 *    Find largest contiguous segment from a pool of DMA blocks.
 * Parameters:
 *    dseg - DMA segment descriptor
 * Returns:
 *    0 on success, < 0 on error.
 * Notes:
 *    Assembly stops if a segment of the requested segment size
 *    has been obtained.
 *
 *    Lower address bits of the DMA blocks are used as follows:
 *       0: Untagged
 *       1: Discarded block
 *       2: Part of largest contiguous segment
 *       3: Part of current contiguous segment
 */
static int
_dal_find_largest_segment(dma_segment_t* dseg)
{
    int i, j, blks, found;
    unsigned long seg_begin;
    unsigned long seg_end;
    unsigned long seg_tmp;

    blks = dseg->blk_cnt;

    /* Clear all block tags */
    for (i = 0; i < blks; i++)
    {
        dseg->blk_ptr[i] &= ~3;
    }

    for (i = 0; i < blks && dseg->seg_size < dseg->req_size; i++)
    {
        /* First block must be an untagged block */
        if ((dseg->blk_ptr[i] & 3) == DAL_UNTAG_BLOCK)
        {
            /* Initial segment size is the block size */
            seg_begin = dseg->blk_ptr[i];
            seg_end = seg_begin + dseg->blk_size;
            dseg->blk_ptr[i] |= DAL_CUR_MATCH_BLOCk;

            /* Loop looking for adjacent blocks */
            do
            {
                found = 0;

                for (j = i + 1; j < blks && (seg_end - seg_begin) < dseg->req_size; j++)
                {
                    seg_tmp = dseg->blk_ptr[j];
                    /* Check untagged blocks only */
                    if ((seg_tmp & 3) == DAL_UNTAG_BLOCK)
                    {
                        if (seg_tmp == (seg_begin - dseg->blk_size))
                        {
                            /* Found adjacent block below current segment */
                            dseg->blk_ptr[j] |= DAL_CUR_MATCH_BLOCk;
                            seg_begin = seg_tmp;
                            found = 1;
                        }
                        else if (seg_tmp == seg_end)
                        {
                            /* Found adjacent block above current segment */
                            dseg->blk_ptr[j] |= DAL_CUR_MATCH_BLOCk;
                            seg_end += dseg->blk_size;
                            found = 1;
                        }
                    }
                }
            }
            while (found);

            if ((seg_end - seg_begin) > dseg->seg_size)
            {
                /* The current block is largest so far */
                dseg->seg_begin = seg_begin;
                dseg->seg_end = seg_end;
                dseg->seg_size = seg_end - seg_begin;

                /* Re-tag current and previous largest segment */
                for (j = 0; j < blks; j++)
                {
                    if ((dseg->blk_ptr[j] & 3) == DAL_CUR_MATCH_BLOCk)
                    {
                        /* Tag current segment as the largest */
                        dseg->blk_ptr[j] &= ~1;
                    }
                    else if ((dseg->blk_ptr[j] & 3) == DAL_MATCHED_BLOCK)
                    {
                        /* Discard previous largest segment */
                        dseg->blk_ptr[j] ^= 3;
                    }
                }
            }
            else
            {
                /* Discard all blocks in current segment */
                for (j = 0; j < blks; j++)
                {
                    if ((dseg->blk_ptr[j] & 3) == DAL_CUR_MATCH_BLOCk)
                    {
                        dseg->blk_ptr[j] &= ~2;
                    }
                }
            }
        }
    }

    return 0;
}

/*
 * Function: _alloc_dma_blocks
 */
static int
_dal_alloc_dma_blocks(dma_segment_t* dseg, int blks)
{
    int i, start;
    unsigned long addr;

    if (dseg->blk_cnt + blks > dseg->blk_cnt_max)
    {
        printk("No more DMA blocks\n");
        return -1;
    }

    start = dseg->blk_cnt;
    dseg->blk_cnt += blks;

    for (i = start; i < dseg->blk_cnt; i++)
    {
        addr = __get_free_pages(GFP_ATOMIC, dseg->blk_order);
        if (addr)
        {
            dseg->blk_ptr[i] = addr;
        }
        else
        {
            printk("DMA allocation failed\n");
            return -1;
        }
    }

    return 0;
}

/*
 * Function: _dal_dma_segment_alloc
 */
static dma_segment_t*
_dal_dma_segment_alloc(unsigned int size, unsigned int blk_size)
{
    dma_segment_t* dseg;
    int i, blk_ptr_size;
    uintptr page_addr;
    struct sysinfo si;

    /* Sanity check */
    if (size == 0 || blk_size == 0)
    {
        return NULL;
    }

    /* Allocate an initialize DMA segment descriptor */
    if ((dseg = kmalloc(sizeof(dma_segment_t), GFP_ATOMIC)) == NULL)
    {
        return NULL;
    }

    memset(dseg, 0, sizeof(dma_segment_t));
    dseg->req_size = size;
    dseg->blk_size = PAGE_ALIGN(blk_size);

    while ((PAGE_SIZE << dseg->blk_order) < dseg->blk_size)
    {
        dseg->blk_order++;
    }

    si_meminfo(&si);
    dseg->blk_cnt_max = (si.totalram << PAGE_SHIFT) / dseg->blk_size;
    blk_ptr_size = dseg->blk_cnt_max * sizeof(unsigned long);
    /* Allocate an initialize DMA block pool */
    dseg->blk_ptr = kmalloc(blk_ptr_size, GFP_KERNEL);
    if (dseg->blk_ptr == NULL)
    {
        kfree(dseg);
        return NULL;
    }

    memset(dseg->blk_ptr, 0, blk_ptr_size);
    /* Allocate minimum number of blocks */
    _dal_alloc_dma_blocks(dseg, dseg->req_size / dseg->blk_size);

    /* Allocate more blocks until we have a complete segment */
    do
    {
        _dal_find_largest_segment(dseg);
        if (dseg->seg_size >= dseg->req_size)
        {
            break;
        }
    }
    while (_dal_alloc_dma_blocks(dseg, 8) == 0);

    /* Reserve all pages in the DMA segment and free unused blocks */
    for (i = 0; i < dseg->blk_cnt; i++)
    {
        if ((dseg->blk_ptr[i] & 3) == 2)
        {
            dseg->blk_ptr[i] &= ~3;

            for (page_addr = dseg->blk_ptr[i];
                 page_addr < dseg->blk_ptr[i] + dseg->blk_size;
                 page_addr += PAGE_SIZE)
            {
                MEM_MAP_RESERVE(VIRT_TO_PAGE((void*)page_addr));
            }
        }
        else if (dseg->blk_ptr[i])
        {
            dseg->blk_ptr[i] &= ~3;
            free_pages(dseg->blk_ptr[i], dseg->blk_order);
            dseg->blk_ptr[i] = 0;
        }
    }

    return dseg;
}

/*
 * Function: _dal_dma_segment_free
 */
static void
_dal_dma_segment_free(dma_segment_t* dseg)
{
    int i;
    uintptr page_addr;

    if (dseg->blk_ptr)
    {
        for (i = 0; i < dseg->blk_cnt; i++)
        {
            if (dseg->blk_ptr[i])
            {
                for (page_addr = dseg->blk_ptr[i];
                     page_addr < dseg->blk_ptr[i] + dseg->blk_size;
                     page_addr += PAGE_SIZE)
                {
                    MEM_MAP_UNRESERVE(VIRT_TO_PAGE((void*)page_addr));
                }

                free_pages(dseg->blk_ptr[i], dseg->blk_order);
            }
        }

        kfree(dseg->blk_ptr);
        kfree(dseg);
    }
}

/*
 * Function: -dal_pgalloc
 */
static void*
_dal_pgalloc(unsigned int size)
{
    dma_segment_t* dseg;
    unsigned int blk_size;

    blk_size = (size < DMA_BLOCK_SIZE) ? size : DMA_BLOCK_SIZE;
    if ((dseg = _dal_dma_segment_alloc(size, blk_size)) == NULL)
    {
        return NULL;
    }

    if (dseg->seg_size < size)
    {
        /* If we didn't get the full size then forget it */
        printk("Notice: Can not get enough memory for requset!!\n");
        printk("actual size:0x%lx, request size:0x%x\n", dseg->seg_size, size);
         /*-_dal_dma_segment_free(dseg);*/
         /*-return NULL;*/
    }

    list_add(&dseg->list, &_dma_seg);
    return (void*)dseg->seg_begin;
}

/*
 * Function: _dal_pgfree
 */
static int
_dal_pgfree(void* ptr)
{
    struct list_head* pos;

    list_for_each(pos, &_dma_seg)
    {
        dma_segment_t* dseg = list_entry(pos, dma_segment_t, list);

        if (ptr == (void*)dseg->seg_begin)
        {
            list_del(&dseg->list);
            _dal_dma_segment_free(dseg);
            return 0;
        }
    }
    return -1;
}
#endif

/*lchip in this function, means dal chip id, which mapping to sdk lchip, but not eq */
static void
dal_alloc_dma_pool(unsigned char dal_chip, int size)
{
#if defined(DMA_MEM_MODE_PLATFORM) || defined(SOC_ACTIVE)
    struct device * dev = NULL;
#endif

    if (dal_glb.use_high_memory)
    {
        dal_master[dal_chip]->dma_phy_base = virt_to_bus(high_memory);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
        dal_master[dal_chip]->dma_virt_base = ioremap(dal_master[dal_chip]->dma_phy_base, size);
#else
        dal_master[dal_chip]->dma_virt_base = ioremap_nocache(dal_master[dal_chip]->dma_phy_base, size);
#endif
    }
    else
    {
#if defined(DMA_MEM_MODE_PLATFORM) || defined(SOC_ACTIVE)
    if ((DAL_CPU_MODE_TYPE_PCIE != dal_master[dal_chip]->active_type) && (DAL_CPU_MODE_TYPE_LOCAL != dal_master[dal_chip]->active_type))
    {
        printk("active type %d error, not cpu and soc!\n", dal_master[dal_chip]->active_type);
        return;
    }
    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[dal_chip]->active_type)
    {
        dev = &((struct pci_dev*)(dal_master[dal_chip]->dal_dev.pci_dev))->dev;
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[dal_chip]->active_type)
    {
        dev = &((struct platform_device*)(dal_master[dal_chip]->dal_dev.pci_dev))->dev;
    }
#endif
    dal_master[dal_chip]->dma_virt_base = dma_alloc_coherent(dev, dal_glb.dma_mem_size,
                                       &dal_master[dal_chip]->dma_phy_base, GFP_KERNEL);
    printk(KERN_WARNING "########Using DMA_MEM_MODE_PLATFORM \n");
    printk(KERN_WARNING "########Dma alloc physical address:0x%"PRIx64" dma_mem_size:%d, logic_addr:%p\n", dal_master[dal_chip]->dma_phy_base, dal_glb.dma_mem_size, dal_master[dal_chip]->dma_virt_base);
#else
    /* Get DMA memory from kernel */
    dal_master[dal_chip]->dma_virt_base = _dal_pgalloc(size);
    printk("<0>_dal_pgalloc return %p\r\n", dal_master[dal_chip]->dma_virt_base);
    dal_master[dal_chip]->dma_phy_base = virt_to_bus(dal_master[dal_chip]->dma_virt_base);
    printk("<0>Dma physical address 0x%"PRIx64"\n", dal_master[dal_chip]->dma_phy_base);
    printk("<0>Using SDK malloc Dma memory pool!!\n");
    printk(KERN_WARNING "########Dma alloc physical address:0x%"PRIx64" dma_mem_size:%d, logic_addr:%p\n", dal_master[dal_chip]->dma_phy_base, dal_glb.dma_mem_size, dal_master[dal_chip]->dma_virt_base);
#endif
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[dal_chip]->active_type)
    {
        dal_master[dal_chip]->dma_phy_base -= 0x80000000;
    }
#endif
}

static void
dal_free_dma_pool(unsigned char dal_chip)
{
    int ret = 0;
#if defined(DMA_MEM_MODE_PLATFORM) || defined(SOC_ACTIVE)
    struct device * dev = NULL;
#endif

    ret = ret;

#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[dal_chip]->active_type)
    {
        dal_master[dal_chip]->dma_phy_base += 0x80000000;
    }
#endif
    if (dal_glb.use_high_memory)
    {
        iounmap(dal_master[dal_chip]->dma_virt_base);
    }
    else
    {
#if defined(DMA_MEM_MODE_PLATFORM) || defined(SOC_ACTIVE)
    if ((DAL_CPU_MODE_TYPE_PCIE != dal_master[dal_chip]->active_type) && (DAL_CPU_MODE_TYPE_LOCAL != dal_master[dal_chip]->active_type))
    {
        return;
    }
    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[dal_chip]->active_type)
    {
        dev = &((struct pci_dev*)(dal_master[dal_chip]->dal_dev.pci_dev))->dev;
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[dal_chip]->active_type)
    {
        dev = &((struct platform_device*)(dal_master[dal_chip]->dal_dev.pci_dev))->dev;
    }
#endif
    dma_free_coherent(dev, dal_glb.dma_mem_size,
                                                  dal_master[dal_chip]->dma_virt_base, dal_master[dal_chip]->dma_phy_base);
#else
    ret = _dal_pgfree(dal_master[dal_chip]->dma_virt_base);
    if(ret<0)
    {
        printk("Dma free memory fail !!!!!! \n");
    }
#endif
    }
}
#ifdef ASW_ACTIVE
void
dal_smi_set_timeout_thrd(dal_kernel_asw_dev_t *cfg, int cnt)
{
    struct mii_bus *mbus = cfg->ext_mbus;
    int id = DAL_SMI_SLAVE_ID;

    mbus->write(mbus, id, DAL_SMI_TIMEOUT_THRD_L, (unsigned short int)cnt);
    mbus->write(mbus, id, DAL_SMI_TIMEOUT_THRD_H, (unsigned short int)(cnt >> 16));

    CTC_PRINTK("DAL_SMI_TIMEOUT_THRD_L(0x%x), DAL_SMI_TIMEOUT_THRD_H(0x%x)\n",
                mbus->read(mbus, id, DAL_SMI_TIMEOUT_THRD_L),
                mbus->read(mbus, id, DAL_SMI_TIMEOUT_THRD_H));
}

void
dal_smi_debug_status(dal_kernel_asw_dev_t *cfg)
{
    struct mii_bus *mbus = cfg->ext_mbus;
    int id = DAL_SMI_SLAVE_ID;
    unsigned short int debug_status = 0;
    unsigned char access_cnt, ack_cnt, timeout_cnt;

    debug_status = mbus->read(mbus, id, DAL_SMI_DEBUG_STATUS);

    access_cnt = debug_status & 0xFF;
    ack_cnt = debug_status >> 8;
    timeout_cnt = access_cnt - ack_cnt;
    CTC_PRINTK("%s: debug_status is 0x%x, access cnt is %d, ack cnt is %d, timeout cnt is %d\n", __func__,
            debug_status, access_cnt, ack_cnt, timeout_cnt);
}

int
dal_smi_read_reg(void *p_cfg, unsigned int addr, unsigned int *data)
{
    dal_kernel_asw_dev_t *cfg = p_cfg;
	struct mii_bus *mbus = cfg->ext_mbus;
	int id = DAL_SMI_SLAVE_ID;
	unsigned short int status = 0;
	unsigned short int data_l, data_h;
	unsigned long start_time;
	mutex_lock(&mbus->mdio_lock);

    mbus->write(mbus, id, DAL_SMI_ADDR_L, (unsigned short int)addr);
    CTC_PRINTK("DAL_SMI_ADDR_L(0x%x) w 0x%x\n", DAL_SMI_ADDR_L, (unsigned short int)addr);
    mbus->write(mbus, id, DAL_SMI_ADDR_H, (unsigned short int)(addr >> 16));
    CTC_PRINTK("DAL_SMI_ADDR_H(0x%x) w 0x%x\n", DAL_SMI_ADDR_H, (unsigned short int)(addr >> 16));
    mbus->write(mbus, id, DAL_SMI_CMD, DAL_SMI_ACT);
    CTC_PRINTK("DAL_SMI_CMD(0x%x) w 0x%x\n", DAL_SMI_CMD, DAL_SMI_ACT);

    start_time = jiffies;
    while(1)
    {
        status = mbus->read(mbus, id, DAL_SMI_STATUS);

        if (status & SMI_STATUS_ERROR)
        {
            CTC_PRINTK("%s: mdio read error\n", __func__);
            CTC_PRINTK("DAL_SMI_STATUS(0x%x) r 0x%x\n", DAL_SMI_STATUS, status);
            dal_smi_debug_status(cfg);
            mutex_unlock(&mbus->mdio_lock);
            return -1;
        } else if ((status & SMI_STATUS_TIMEOUT) | (time_after(jiffies, start_time + DAL_SMI_TIMEOUT)))
        {
            CTC_PRINTK("%s: mdio read timeout\n", __func__);
            CTC_PRINTK("DAL_SMI_STATUS(0x%x) r 0x%x\n", DAL_SMI_STATUS, status);
            dal_smi_debug_status(cfg);
            mutex_unlock(&mbus->mdio_lock);
            return -1;
        }
        else if (status & SMI_STATUS_END)
        {
            CTC_PRINTK("DAL_SMI_STATUS(0x%x) r 0x%x\n", DAL_SMI_STATUS, status);
            break;
        }
    }

    data_l = mbus->read(mbus, id, DAL_SMI_RD_DATA_L);
    CTC_PRINTK("DAL_SMI_RD_DATA_L(0x%x) r 0x%x\n", DAL_SMI_RD_DATA_L, data_l);
    data_h = mbus->read(mbus, id, DAL_SMI_RD_DATA_H);
    CTC_PRINTK("DAL_SMI_RD_DATA_H(0x%x) r 0x%x\n", DAL_SMI_RD_DATA_H, data_h);
    *data = data_h << 16 | data_l;
    CTC_PRINTK("ctc2118_mdio_read_reg, addr is 0x%x, data is 0x%x\n", addr, *data);

    mutex_unlock(&mbus->mdio_lock);
    return 0;
}

int
dal_smi_write_reg(void *p_cfg, unsigned int addr, unsigned int data)
{
    dal_kernel_asw_dev_t *cfg = p_cfg;
    struct mii_bus *mbus = cfg->ext_mbus;
    int id = DAL_SMI_SLAVE_ID;
    unsigned short int status = 0;
    unsigned long start_time;

    mutex_lock(&mbus->mdio_lock);

    mbus->write(mbus, id, DAL_SMI_ADDR_L, (unsigned short int)addr);
    	CTC_PRINTK("DAL_SMI_ADDR_L(0x%x) w 0x%x\n", DAL_SMI_ADDR_L, (unsigned short int)addr);

    mbus->write(mbus, id, DAL_SMI_ADDR_H, (unsigned short int)(addr >> 16));
    CTC_PRINTK("DAL_SMI_ADDR_H(0x%x) w 0x%x\n", DAL_SMI_ADDR_H, (unsigned short int)(addr >> 16));

    mbus->write(mbus, id, DAL_SMI_WR_DATA_L, (unsigned short int)data);
    CTC_PRINTK("DAL_SMI_WR_DATA_L(0x%x) w 0x%x\n", DAL_SMI_WR_DATA_L, (unsigned short int)data);

    mbus->write(mbus, id, DAL_SMI_WR_DATA_H, (unsigned short int)(data >> 16));
    CTC_PRINTK("DAL_SMI_WR_DATA_H(0x%x) w 0x%x\n", DAL_SMI_WR_DATA_H, (unsigned short int)(data >> 16));

    mbus->write(mbus, id, DAL_SMI_CMD, DAL_SMI_ACT | DAL_SMI_WRITE);
    CTC_PRINTK("DAL_SMI_CMD(0x%x) w 0x%x\n", DAL_SMI_CMD, DAL_SMI_ACT | DAL_SMI_WRITE);

    start_time = jiffies;
    while (1) 
    {
        status = mbus->read(mbus, id, DAL_SMI_STATUS);
        if (status & SMI_STATUS_ERROR) 
        {
            dal_smi_debug_status(cfg);
            mutex_unlock(&mbus->mdio_lock);
            return -1;
        }
        else if ((status & SMI_STATUS_TIMEOUT) | (time_after(jiffies, start_time + DAL_SMI_TIMEOUT)))
        {
            dal_smi_debug_status(cfg);
            mutex_unlock(&mbus->mdio_lock);
            return -1;
        } 
        else if (status & SMI_STATUS_END) 
        {
            CTC_PRINTK("DAL_SMI_STATUS(0x%x) r 0x%x\n", DAL_SMI_STATUS, status);
            break;
        }
    }

    CTC_PRINTK("ctc2118_mdio_write_reg, addr is 0x%x, data is 0x%x\n", addr, data);
    mutex_unlock(&mbus->mdio_lock);
    return 0;
}

static struct dal_reg_ops smi_reg_ops = {
	.write_reg = dal_smi_write_reg,
	.read_reg = dal_smi_read_reg,
};

dal_kernel_asw_dev_t *
dal_smi_probe_of(struct platform_device *pdev, dal_kernel_asw_dev_t *cfg)
{
    struct device_node *np = pdev->dev.of_node;
    struct device_node *mdio_node;

    mdio_node = of_parse_phandle(np, "mii-bus", 0);
    if (!mdio_node) 
    {
        dev_err(&pdev->dev, "cannot find mdio node phandle");
        return NULL;
    }

    cfg->ext_mbus = of_mdio_find_bus(mdio_node);
    if (!cfg->ext_mbus)
    {
        dev_info(&pdev->dev, "cannot find mdio bus from bus handle (yet)");
        return NULL;
    }

    cfg->reg_ops = &smi_reg_ops;
    dal_smi_set_timeout_thrd(cfg, DAL_SMI_TIMEOUT_CNT);

    return cfg;
}

unsigned char
i2c_write_byte(dal_kernel_asw_dev_t *cfg, unsigned char reg, unsigned char val)
{
    struct i2c_msg msgs;
    unsigned char buf[2] = { reg, val };

    msgs.flags = 0;
    msgs.addr = cfg->i2c_addr;
    msgs.len = 2;
    msgs.buf = buf;

    CTC_PRINTK("i2c write 0x%x 0x%x 0x%x\n", cfg->i2c_addr, reg, val);
    return i2c_transfer(cfg->i2c_adapt, &msgs, 1);
}

unsigned char
i2c_read_byte(dal_kernel_asw_dev_t *cfg, unsigned char reg)
{
    struct i2c_msg msgs[2];
    unsigned char buf1[] = {0x00};
    unsigned char buf2[] = {0x00};

    buf1[0] = reg;
    msgs[0].flags = 0;
    msgs[0].addr = cfg->i2c_addr;
    msgs[0].len = 1;
    msgs[0].buf = buf1;

    msgs[1].flags = I2C_M_RD;
    msgs[1].addr = cfg->i2c_addr;
    msgs[1].len = 1;
    msgs[1].buf = buf2;
    i2c_transfer(cfg->i2c_adapt, msgs, 2);

    CTC_PRINTK("i2c read 0x%x 0x%x, val is 0x%x\n", cfg->i2c_addr, reg, buf2[0]);
    return buf2[0];
}

int
dal_i2c_device_id(dal_kernel_asw_dev_t *cfg)
{
    unsigned char buf[4] = {0x00, 0x00, 0x00, 0x00};
    cfg->i2c_addr = 0x3c;
    i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDREAD | CHIPACCESS_DEVICEID);
    buf[0] = i2c_read_byte(cfg, DAL_I2C_RD_DATA3);
    buf[1] = i2c_read_byte(cfg, DAL_I2C_RD_DATA2);
    buf[2] = i2c_read_byte(cfg, DAL_I2C_RD_DATA1);
    buf[3] = i2c_read_byte(cfg, DAL_I2C_RD_DATA0);

    CTC_PRINTK("%s: i2cslave device id is 0x%x\n", __func__, buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]);
    return 0;
}
int
dal_i2c_status(dal_kernel_asw_dev_t *cfg)
{
    unsigned char buf[4] = {0x00, 0x00, 0x00, 0x00};
    unsigned long start_time;

    start_time = jiffies;
    while (1) 
    {
        i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDREAD | CHIPACCESS_STATUS);
        buf[0] = i2c_read_byte(cfg, DAL_I2C_RD_DATA3);
        buf[1] = i2c_read_byte(cfg, DAL_I2C_RD_DATA2);
        buf[2] = i2c_read_byte(cfg, DAL_I2C_RD_DATA1);
        buf[3] = i2c_read_byte(cfg, DAL_I2C_RD_DATA0);

        CTC_PRINTK("CHIPACCESS_Status is %x\n", buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]);

        if (buf[0] & I2C_STATUS_OVERLAP) 
        {
            CTC_PRINTK("%s: i2cslave read overlap\n", __func__);
            return -1;
        } 
        else if (buf[0] & I2C_STATUS_ERROR)
        {
            CTC_PRINTK("%s: i2cslave read error\n", __func__);
            return -1;
        } 
        else if ((buf[0] & I2C_STATUS_TIMEOUT) | (time_after(jiffies, start_time + I2C_TIMEOUT)))
        {
            CTC_PRINTK("%s: i2cslave read timeout\n", __func__);
            return -1;
        }
        else if (buf[0] & I2C_STATUS_DONE) 
        {
            break;
        }
    }
    return 0;
}

int
dal_i2c_read_reg(void *p_cfg, unsigned int addr, unsigned int *data)
{
    unsigned int ret;
    unsigned char buf[4] = {0x00, 0x00, 0x00, 0x00};
    dal_kernel_asw_dev_t *cfg = p_cfg;
    cfg->i2c_addr = 0x3c;
    i2c_write_byte(cfg, DAL_I2C_WR_DATA3, (addr >> 24) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA2, (addr >> 16) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA1, (addr >> 8) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA0, (addr >> 0) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDWRITE | CHIPACCESS_ADDRCMD);

    ret = dal_i2c_status(cfg);
    if (ret)
        return -1;

    i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDREAD | CHIPACCESS_RDDATE);
    buf[0] = i2c_read_byte(cfg, DAL_I2C_RD_DATA3);
    buf[1] = i2c_read_byte(cfg, DAL_I2C_RD_DATA2);
    buf[2] = i2c_read_byte(cfg, DAL_I2C_RD_DATA1);
    buf[3] = i2c_read_byte(cfg, DAL_I2C_RD_DATA0);

    dal_i2c_status(cfg);

    *data = buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3];
    CTC_PRINTK("ctc2118_i2c_read_reg, addr is 0x%x, data is 0x%x\n", addr, *data);	
    return 0;
}

int
dal_i2c_write_reg(void *p_cfg, unsigned int addr, unsigned int data)
{
    unsigned int ret;
    dal_kernel_asw_dev_t *cfg = p_cfg;
    cfg->i2c_addr = 0x3c;
    i2c_write_byte(cfg, DAL_I2C_WR_DATA3, (data >> 24) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA2, (data >> 16) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA1, (data >> 8) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA0, (data >> 0) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDWRITE | CHIPACCESS_WRDATA);

    i2c_write_byte(cfg, DAL_I2C_WR_DATA3, (addr >> 24) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA2, (addr >> 16) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA1, (addr >> 8) & 0xff);
    i2c_write_byte(cfg, DAL_I2C_WR_DATA0, ((addr >> 0) & 0xff) | 0x1);
    i2c_write_byte(cfg, DAL_I2C_ADDR_CMD, CHIPACCESS_CMDWRITE | CHIPACCESS_ADDRCMD);

    ret = dal_i2c_status(cfg);
    if (ret)
        return -1;

    CTC_PRINTK("ctc2118_i2c_write_reg, addr is 0x%x, data is 0x%x\n", addr, data);
    return 0;
}

static struct dal_reg_ops i2c_reg_ops = {
    .write_reg = dal_i2c_write_reg,
    .read_reg = dal_i2c_read_reg,
};

dal_kernel_asw_dev_t *
dal_i2c_probe_of(struct platform_device *pdev, dal_kernel_asw_dev_t *cfg)
{
    struct device_node *np = pdev->dev.of_node;
    struct device_node *i2c_node;
    unsigned int ret,tmp;

    i2c_node = of_parse_phandle(np, "i2c-bus", 0);
    if (!i2c_node)
    {
        dev_err(&pdev->dev, "cannot find i2c node phandle");
        return NULL;
    }

    cfg->i2c_adapt = of_find_i2c_adapter_by_node(i2c_node);
    if (!cfg->i2c_adapt)
    {
        dev_err(&pdev->dev, "cannot find i2c bus from bus handle (yet)");
        return NULL;
    } 
    else 
    {
        of_node_put(i2c_node);
    }

    ret = of_property_read_u32(np, "slave-addr", &tmp);
    if (ret) {
    	dev_err(&pdev->dev, "has no valid 'slave-addr' property (%d)\n", ret);
    	return NULL;
    } else {
    	cfg->i2c_addr = tmp;
    }

    dal_i2c_device_id(cfg);
    cfg->reg_ops = &i2c_reg_ops;

    return cfg;
}


int
dal_spi_read_reg(void *p_cfg, unsigned int addr, unsigned int *data)
{
    struct spi_transfer t[4] = {};
    struct spi_message m;
    unsigned char tx_cmd[1];
    unsigned char tx_addr[4];
    unsigned char dummy[1];
    unsigned char rx_data[4];

    dal_kernel_asw_dev_t *cfg = p_cfg;
    spi_message_init(&m);
    tx_cmd[0] = CTC_SPI_CMD_READ;
    t[0].tx_buf = tx_cmd;
    t[0].len = 1;
    spi_message_add_tail(&t[0], &m);

    tx_addr[0] = addr >> 24;
    tx_addr[1] = addr >> 16;
    tx_addr[2] = addr >> 8;
    tx_addr[3] = addr >> 0;
    t[1].tx_buf = tx_addr;
    t[1].len = 4;
    spi_message_add_tail(&t[1], &m);

    dummy[0] = 0xff;
    t[2].tx_buf = dummy;
    t[2].len = 1;
    spi_message_add_tail(&t[2], &m);

    t[3].rx_buf = rx_data;
    t[3].len = 4;
    spi_message_add_tail(&t[3], &m);
    spi_sync(cfg->spi_device, &m);

    *data = rx_data[0] << 24 | rx_data[1] << 16 | rx_data[2] << 8 | rx_data[3];
    if (*data == CTC_SPI_STATUS_ERR) 
    {
        CTC_PRINTK("%s: spislave read error\n", __func__);
    }
    if (*data == CTC_SPI_STATUS_TIMEOUT) 
    {
        CTC_PRINTK("%s: spislave read timeout\n", __func__);
    }
    if (*data == CTC_SPI_STATUS_NODATA) 
    {
        CTC_PRINTK("%s: spislave read no data\n", __func__);
    }
    CTC_PRINTK("0x%x, 0x%x, 0x%x, 0x%x\n", rx_data[0], rx_data[1], rx_data[2], rx_data[3]);
    CTC_PRINTK("ctc2118_spi_read_reg, addr is 0x%x, data is 0x%x\n", addr, *data);

    return 0;
}

int
dal_spi_write_reg(void *p_cfg, unsigned int addr, unsigned int data)
{
    struct spi_transfer t[3] = {};
    struct spi_message m;
    unsigned char tx_cmd[1];
    unsigned char tx_addr[4];
    unsigned char tx_data[4];
    dal_kernel_asw_dev_t *cfg = p_cfg;
    spi_message_init(&m);
    tx_cmd[0] = CTC_SPI_CMD_WRITE;
    t[0].tx_buf = tx_cmd;
    t[0].len = 1;
    spi_message_add_tail(&t[0], &m);
    tx_addr[0] = addr >> 24;
    tx_addr[1] = addr >> 16;
    tx_addr[2] = addr >> 8;
    tx_addr[3] = addr >> 0;
    t[1].tx_buf = tx_addr;
    t[1].len = 4;
    spi_message_add_tail(&t[1], &m);	
    tx_data[0] = data >> 24;
    tx_data[1] = data >> 16;
    tx_data[2] = data >> 8;
    tx_data[3] = data >> 0;
    t[2].tx_buf = tx_data;
    t[2].len = 4;
    spi_message_add_tail(&t[2], &m);
    spi_sync(cfg->spi_device, &m);

    CTC_PRINTK("ctc2118_spi_write_reg, addr is 0x%x, data is 0x%x\n", addr, data);	
    return 0;
}

static struct dal_reg_ops spi_reg_ops = {
    .write_reg = dal_spi_write_reg,
    .read_reg = dal_spi_read_reg,
};

dal_kernel_asw_dev_t *
dal_spi_probe_of(struct platform_device *pdev, dal_kernel_asw_dev_t *cfg)
{
    struct spi_master *master;
    unsigned int value, ret;
    struct device_node *np = pdev->dev.of_node;

    ret = of_property_read_u32(np, "bus-num", &value);
    if (ret) {
    	dev_err(&pdev->dev, "has no valid 'bus-num' property (%d)\n", ret);
    	return NULL;
    }
    master = spi_busnum_to_master(value);

    cfg->spi_device = spi_alloc_device(master);
    cfg->spi_device->mode = SPI_MODE_3;
    cfg->spi_device->dev.of_node = pdev->dev.of_node;

    ret = of_property_read_u32(np, "spi-max-frequency", &value);
    if (ret) {
    	dev_err(&pdev->dev, "has no valid 'spi-max-frequency' property (%d)\n", ret);
    	return NULL;
    }

	cfg->spi_device->max_speed_hz = value;

    cfg->spi_device->chip_select = 3;
    spi_add_device(cfg->spi_device);
    cfg->reg_ops = &spi_reg_ops;
	return cfg;
}

static int
linux_dal_asw_probe(struct platform_device *pdev)
{
    dal_kernel_asw_dev_t* dev = NULL;
    unsigned char lchip = 0;
    int i = 0;
    int irq = 0;
    int32 ret = 0;
    
    CTC_PRINTK(KERN_WARNING "********found dal asw device deviceid:%d*****\n", dal_glb.dal_chip_num);
    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if (NULL == dal_master[lchip])
        {
            break;
        }
    }

    if (lchip >= CTC_MAX_LOCAL_CHIP_NUM)
    {
        CTC_PRINTK("Exceed max local chip num\n");
        return -1;
    }

    dal_master[lchip] = kmalloc(sizeof(dal_kernel_master_t), GFP_ATOMIC);
    if (NULL == dal_master[lchip])
    {
        printk("no memory for dal cpu dev, lchip %d\n", lchip);
        return -1;
    }
    memset(dal_master[lchip], 0, sizeof(dal_kernel_master_t));

    if (dal_io_user_mode)
    {
        dal_glb.dal_io_mode = simple_strtoul(dal_io_user_mode, NULL, 0);
        CTC_PRINTK("dal io mode: 0x%x \n", dal_glb.dal_io_mode);
    }

    if (NULL == dal_dev[lchip])
    {
        dal_dev[lchip] = kmalloc(sizeof(dal_kernel_asw_dev_t), GFP_ATOMIC);
        if (NULL == dal_dev[lchip])
        {
            CTC_PRINTK("no memory for dal soc dev, lchip %d\n", lchip);
            ret = -1;
            goto roll_back_0;
        }
    }
    dev = dal_dev[lchip];
    dal_glb.dal_chip_num++;

    dev->p_dev = pdev;

    if (dal_glb.dal_io_mode == DAL_SMI_IO)
        dev = dal_smi_probe_of(pdev, dev);
    else if (dal_glb.dal_io_mode == DAL_I2C_IO)
        dev = dal_i2c_probe_of(pdev, dev);
    else if (dal_glb.dal_io_mode == DAL_SPI_IO)
        dev = dal_spi_probe_of(pdev, dev);
    else
    {
        CTC_PRINTK("%s: invalid dal_io_mode node\n", __func__);
        ret = -1;
        goto roll_back_1;
    }
    dal_master[lchip]->active_type = active_type[lchip] = DAL_CPU_MODE_TYPE_ASW;
    return ret;

roll_back_1:
    kfree(dev);
roll_back_0:
    kfree(dal_master[lchip]);
    return ret;
}

static int
linux_dal_asw_remove(struct platform_device *pdev)
{
    unsigned int lchip = 0;
    unsigned int flag = 0;
    dal_kernel_asw_dev_t* dev = NULL;
    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        dev = dal_dev[lchip];
        if ((NULL != dev )&& (pdev == dev->p_dev))
        {
            flag = 1;
            break;
        }
    }
    if (1 == flag)
    {
        kfree(dev);
        dev->p_dev = NULL;
        dev = NULL;    
    }
    printk("linux_dal_remove end \n");
    return 0;

}
#endif
int
dal_smi_read(unsigned char ldev, unsigned int offset, unsigned int len, unsigned int* p_value)
{
#ifdef ASW_ACTIVE
    dal_kernel_asw_dev_t* dev = NULL;
    unsigned int index;
    unsigned int value = 0;
    if (!VERIFY_CHIP_INDEX(ldev))
    {
        WARN_ON(1);
        return -1;
    }
    dev = dal_dev[ldev];
    for (index=0; index<len/4; index++)
    {
        dev->reg_ops->read_reg(dev, offset+index*4, &value);
        *(p_value+index) = value;
    }
#endif
    return 0;
}
int
dal_smi_write(unsigned char ldev, unsigned int offset, unsigned int len, unsigned int* p_value)
{
#ifdef ASW_ACTIVE
    dal_kernel_asw_dev_t* dev = NULL;
    unsigned int index;
    unsigned int value = 0;
    if (!VERIFY_CHIP_INDEX(ldev))
    {
        WARN_ON(1);
        return -1;
    }
    dev = dal_dev[ldev];
    for (index=0; index<len/4; index++)
    {
        value =  *(p_value+index) ;
        dev->reg_ops->write_reg(dev, offset+index*4, value);
    }
#endif
    return 0;
}

uint8
dal_device_get_pp_num(uint8 ldev)
{
    uint32 part_num = 0;
    uint8 pp_num = 0;
    uint8 slot_idx = 0;
    
    if (ldev >= dal_glb.dal_chip_num)
    {
        return 0;
    }

    if (0 == g_dal_dev_pp_num[ldev])
    {
        slot_idx = g_dal_ldev_init[ldev] ? g_dal_ldev_2_slot[ldev]: ldev;
        /* ldev have not init, only get pp_num base default ldev*/
        if (DAL_ARCTIC_DEVICE_ID == ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->device)
        {
            dal_pci_read_ext(ldev, 0x00890320, 1, &part_num);
            

            if (part_num == 4)
            {
                /*CTC9262*/
                pp_num = 3;
            }
            else if (part_num == 2)
            {
                /*CTC9260*/
                pp_num = 4;
            }
            else if (part_num == 12)
            {
                /*CTC9282*/
                pp_num = 6;
            }
            else
            {                
                /*CTC9280*/
                pp_num = 8;
            }
        }
        else
        {
            pp_num = 1;
        }
    }
    else
    {
        pp_num = g_dal_dev_pp_num[ldev];
    }

    return pp_num;
}


#define _KERNEL_DAL_IO
int
dal_pci_read(unsigned char ldev, unsigned int offset, unsigned int* value)
{
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];
    /*Notice: must do ldev2slot convert, because of access to dal_master*/

    *value = *(volatile unsigned int*)(dal_master[slot_idx]->dal_dev.logic_address + offset);

    return 0;
}

int
dal_pci_write(unsigned char ldev, unsigned int offset, unsigned int value)
{
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];
    /*Notice: must do ldev2slot convert, because of access to dal_master*/

    *(volatile unsigned int*)(dal_master[slot_idx]->dal_dev.logic_address + offset) = value;

    return 0;
}

int
dal_pci_read_ext(unsigned char ldev, unsigned long long addr, unsigned int len, unsigned int* value)
{
    
    ldev = g_dal_ldev_2_slot[ldev];
    /*Notice: must do ldev2slot convert, because of access to dal_master*/

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdReadType = 1;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdEntryWords = (len == 16)? 0 : len;   /* normal operate only support 1 entry */
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdDataLen = len;
        dal_master[ldev]->pci_cmd_status.val |= (((addr >> 32) & 0x3F) << 16) | (((addr >> 38) & 0x3) << 26);
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr & 0xFFFFFFFF;

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS);
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        if(!dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone|| dal_master[ldev]->pci_cmd_status.cmd_status.reqProcError)
        {
          return -1;
        }

        dal_master[ldev]->pci_cmd_offset = 0;
        /* this mode is usual mode, for support mmap device access */
        do{
            *(value + dal_master[ldev]->pci_cmd_offset) = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2));
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);

    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdReadType = 1;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdEntryWords = (len == 16)? 0 : len;   /* normal operate only support 1 entry */
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdDataLen = len;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr;

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS);
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        if(!dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone || dal_master[ldev]->pci_cmd_status.cmd_status.reqProcError)
        {
          return -1;
        }

        dal_master[ldev]->pci_cmd_offset = 0;
        /* this mode is usual mode, for support mmap device access */
        do{
            *(value + dal_master[ldev]->pci_cmd_offset) = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2));
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);
    }
#endif

    return 0;
}

int
dal_pci_write_ext(unsigned char ldev, unsigned long long addr, unsigned int len, unsigned int* value)
{
    
    ldev = g_dal_ldev_2_slot[ldev];
    /*Notice: must do ldev2slot convert, because of access to dal_master*/

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdReadType = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdEntryWords = (len == 16)? 0 : len;   /* normal operate only support 1 entry */
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdDataLen = len;
        dal_master[ldev]->pci_cmd_status.val |= (((addr >> 32) & 0x3F) << 16) | (((addr >> 38) & 0x3) << 26);
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr&0xFFFFFFFF;

        dal_master[ldev]->pci_cmd_offset = 0;
        do{
            *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2)) = value[dal_master[ldev]->pci_cmd_offset];
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) ;
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        return (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone));
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdReadType = 0;
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdEntryWords = (len == 16)? 0 : len;   /* normal operate only support 1 entry */
        dal_master[ldev]->pci_cmd_status.cmd_status.cmdDataLen = len;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr;

        dal_master[ldev]->pci_cmd_offset = 0;
        do{
            *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2)) = value[dal_master[ldev]->pci_cmd_offset];
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) ;
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        return (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone));
    }
#endif

   return 0;
}


int
dal_pci_read_ext2(unsigned char ldev, unsigned long long addr, unsigned int len, unsigned int* value, uint8 offset, uint16 real_len)
{
    
    ldev = g_dal_ldev_2_slot[ldev];
    /*Notice: must do ldev2slot convert, because of access to dal_master*/

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.val = ((1 << 0) | (1 << 3) | ((len & 0xff) << 4) | (1 << 12));
        dal_master[ldev]->pci_cmd_status.val |= (((addr >> 32) & 0x3F) << 16) | (((addr >> 38) & 0x3) << 26);
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr & 0xFFFFFFFF;

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS);
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        if(!dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone || dal_master[ldev]->pci_cmd_status.cmd_status.reqProcError)
        {
          return -1;
        }

        dal_master[ldev]->pci_cmd_offset = 0;
        /* this mode is usual mode, for support mmap device access */
        do{
            *(value + (dal_master[ldev]->pci_cmd_offset++)) = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + offset);
            offset += 4;
        }while (--real_len);
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.val = ((1 << 0) | (1 << 3) | ((len & 0xff) << 4) | (1 << 12));
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr;

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS);
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        if(!dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone || dal_master[ldev]->pci_cmd_status.cmd_status.reqProcError)
        {
          return -1;
        }

        dal_master[ldev]->pci_cmd_offset = 0;
        /* this mode is usual mode, for support mmap device access */
        do{
            *(value + (dal_master[ldev]->pci_cmd_offset++)) = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + offset);
            offset += 4;
        }while (--real_len);
    }
#endif
    return 0;
}

int
dal_pci_write_ext2(unsigned char ldev, unsigned long long addr, unsigned int len, unsigned int* value)
{
    
    ldev = g_dal_ldev_2_slot[ldev];

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.val = ((0 << 0) | (1 << 3) | ((len & 0xff) << 4) | (1 << 12));
        dal_master[ldev]->pci_cmd_status.val |= (((addr >> 32) & 0x3F) << 16) | (((addr >> 38) & 0x3) << 26);
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr & 0xFFFFFFFF;

        dal_master[ldev]->pci_cmd_offset = 0;
        do{
            *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2)) = value[dal_master[ldev]->pci_cmd_offset];
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) ;
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        return (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone));
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[ldev]->active_type)
    {
        dal_master[ldev]->pci_cmd_status.val = 0;
        dal_master[ldev]->pci_cmd_status.val = ((0 << 0) | (1 << 3) | ((len & 0xff) << 4) | (1 << 12));
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) = dal_master[ldev]->pci_cmd_status.val;
        *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_ADDR) = addr;

        dal_master[ldev]->pci_cmd_offset = 0;
        do{
            *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_DATA_BUF + (dal_master[ldev]->pci_cmd_offset << 2)) = value[dal_master[ldev]->pci_cmd_offset];
            dal_master[ldev]->pci_cmd_offset += 1;
        }while (--len);

        dal_master[ldev]->pci_cmd_timeout = 0;
        do{
            dal_master[ldev]->pci_cmd_status.val = *(volatile unsigned int*)(dal_master[ldev]->dal_dev.logic_address + DAL_PCI_CMD_STATUS) ;
        }while (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone) && (++dal_master[ldev]->pci_cmd_timeout < 2000));

        return (!(dal_master[ldev]->pci_cmd_status.cmd_status.reqProcDone));
    }
#endif

    return 0;
}

int
dal_pci_conf_read(unsigned char ldev, unsigned int offset, unsigned int* value)
{
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];

    if (!VERIFY_CHIP_INDEX(slot_idx))
    {
        return -1;
    }

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[slot_idx]->active_type)
    {
        pci_read_config_dword(dal_master[slot_idx]->dal_dev.pci_dev, offset, value);
    }

    return 0;
}

int
dal_pci_conf_write(unsigned char ldev, unsigned int offset, unsigned int value)
{
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];

    if (!VERIFY_CHIP_INDEX(slot_idx))
    {
        return -1;
    }

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[slot_idx]->active_type)
    {
        pci_write_config_dword(dal_master[slot_idx]->dal_dev.pci_dev, offset, value);
    }

    return 0;
}

int
dal_user_read_pci_conf(unsigned long arg)
{
    dal_pci_cfg_ioctl_t dal_cfg;

    if (copy_from_user(&dal_cfg, (void*)arg, sizeof(dal_pci_cfg_ioctl_t)))
    {
        return -EFAULT;
    }

    if (dal_pci_conf_read(dal_cfg.lchip, dal_cfg.offset, &dal_cfg.value))
    {
        printk("dal_pci_conf_read failed.\n");
        return -EFAULT;
    }

    if (copy_to_user((dal_pci_cfg_ioctl_t*)arg, (void*)&dal_cfg, sizeof(dal_pci_cfg_ioctl_t)))
    {
        return -EFAULT;
    }

    return 0;
}

int
linux_dal_mmap0(struct file* flip, struct vm_area_struct* vma)
{
    size_t size = vma->vm_end - vma->vm_start;
    unsigned long pfn = 0;

    printk("linux_dal0_mmap begin.\n");

#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[0]->active_type)
    {
        pfn = (dal_master[0]->dal_dev.phys_address) >> PAGE_SHIFT;
    }
#endif
    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[0]->active_type)
    {
        pfn = (dal_master[0]->dal_dev.phys_address) >> PAGE_SHIFT;
    }

    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot))
    {
        return -EAGAIN;
    }

    printk("linux_dal_mmap0 finish.\n");

    return 0;
}

int
dal_user_write_pci_conf(unsigned long arg)
{
    dal_pci_cfg_ioctl_t dal_cfg;

    if (copy_from_user(&dal_cfg, (void*)arg, sizeof(dal_pci_cfg_ioctl_t)))
    {
        return -EFAULT;
    }

    return dal_pci_conf_write(dal_cfg.lchip, dal_cfg.offset, dal_cfg.value);
}

int32
dal_dma_cache_inval(uint8 ldev, unsigned long long ptr, int length)
{
#ifndef DMA_MEM_MODE_PLATFORM
#ifdef DMA_CACHE_COHERENCE_EN

    /*dma_cache_wback_inv((unsigned long)ptr, length);*/

    dma_sync_single_for_cpu(NULL, ptr, length, DMA_FROM_DEVICE);

    /*dma_cache_sync(NULL, (void*)bus_to_virt(ptr), length, DMA_FROM_DEVICE);*/
#endif
#endif
    return 0;
}

int32
dal_dma_cache_flush(uint8 ldev, unsigned long long ptr, int length)
{
#ifndef DMA_MEM_MODE_PLATFORM
#ifdef DMA_CACHE_COHERENCE_EN

    /*dma_cache_wback_inv(ptr, length);*/

    dma_sync_single_for_device(NULL, ptr, length, DMA_TO_DEVICE);

    /*dma_cache_sync(NULL, (void*)bus_to_virt(ptr), length, DMA_TO_DEVICE);*/
#endif
#endif
    return 0;
}

#if defined(SOC_ACTIVE)
static int linux_dal_local_probe(struct platform_device *pdev)
{
    dal_kernel_dev_t* dev = NULL;
    unsigned int temp = 0;
    unsigned char lchip = 0;
    int i = 0;
    int irq = 0;
    struct resource * res = NULL;
    struct resource * dma_res = NULL;

    printk(KERN_WARNING "********found dal soc device deviceid:%d*****\n", dal_glb.dal_chip_num);
    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if (NULL == dal_master[lchip])
        {
            break;
        }
    }

    if (lchip >= CTC_MAX_LOCAL_CHIP_NUM)
    {
        printk("Exceed max local chip num\n");
        return -1;
    }

    dal_master[lchip] = kmalloc(sizeof(dal_kernel_dev_t), GFP_ATOMIC);
    if (NULL == dal_master[lchip])
    {
        printk("no memory for dal soc dev, lchip %d\n", lchip);
        return -1;
    }
    memset(dal_master[lchip], 0, sizeof(dal_kernel_master_t));

    dev = &dal_master[lchip]->dal_dev;
    dal_glb.dal_chip_num++;
    dev->pci_dev = (void*)pdev;

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    dev->phys_address = res->start;
    dev->logic_address = devm_ioremap_resource(&pdev->dev, res);
    if (IS_ERR(dev->logic_address))
    {
        kfree(dev);
        return PTR_ERR(dev->logic_address);
    }

    for (i = 0; i < DAL_MAX_INTR_NUM; i++)
    {
        irq = platform_get_irq(pdev, i);
        if (irq < 0)
        {
            printk( "can't get irq number\n");
            kfree(dev);
            return irq;
        }
        dal_master[lchip]->msi_irq_base[i] = irq;
        printk( "irq %d vector %d\n", i, irq);
    }
    dal_master[lchip]->msi_irq_num = DAL_MAX_INTR_NUM;

    dal_master[lchip]->active_type = DAL_CPU_MODE_TYPE_LOCAL;

    temp= *(volatile unsigned int*)(dal_master[lchip]->dal_dev.logic_address + 0x48);
    if (((temp >> 8) & 0xffff) == 0x3412)
    {
        *(volatile unsigned int*)(dal_master[lchip]->dal_dev.logic_address + 0x48) = 0xFFFFFFFF;                       
    }
    printk("Little endian Cpu detected!!! \n");

    if (dal_glb.dma_mem_size)
    {
        dal_alloc_dma_pool(lchip , dal_glb.dma_mem_size);
        /*add check Dma memory pool cannot cross 4G space*/
        if ((dal_master[lchip]->dma_phy_base>>32) != ((dal_master[lchip]->dma_phy_base+dal_glb.dma_mem_size)>>32))
        {
            printk("Dma malloc memory cross 4G space!!!!!! \n");
            kfree(dev);
            return -1;
        }
    }

    printk(KERN_WARNING "linux_dal_probe end \n");

    return 0;
}
#endif

int linux_dal_pcie_probe(struct pci_dev* pdev, const struct pci_device_id* id)
{
    dal_kernel_dev_t* dev = NULL;
    unsigned int temp = 0;
    unsigned char lchip = 0;
    int bar = 0;
    int ret = 0;
    int endian_mode = 0;

    printk(KERN_WARNING "********found dal cpu device deviceid:%d*****\n", dal_glb.dal_chip_num);

    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if (NULL == dal_master[lchip])
        {
            break;
        }
    }

    if (lchip >= CTC_MAX_LOCAL_CHIP_NUM)
    {
        printk("Exceed max local chip num\n");
        return -1;
    }

    dal_master[lchip] = kmalloc(sizeof(dal_kernel_dev_t), GFP_ATOMIC);
    if (NULL == dal_master[lchip])
    {
        printk("no memory for dal soc dev, lchip %d\n", lchip);
        return -1;
    }
    memset(dal_master[lchip], 0, sizeof(dal_kernel_master_t));

    dev = &dal_master[lchip]->dal_dev;
    dal_glb.dal_chip_num++;
    dev->pci_dev = (void*)pdev;

    if (pci_enable_device(pdev) < 0)
    {
        printk("Cannot enable PCI device: vendor id = %x, device id = %x\n",
               pdev->vendor, pdev->device);
    }

    ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
    if (ret)
    {
        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (ret) {
        printk("Could not set PCI DMA Mask\n");
        kfree(dev);
        return ret;
        }
    }

    if (pci_request_regions(pdev, DAL_NAME) < 0)
    {
        printk("Cannot obtain PCI resources\n");
    }

    if (pdev->device == 0x5236)
    {
        printk("use bar2 to config memory space\n");
        bar = 2;
    }

    dev->phys_address = pci_resource_start(pdev, bar);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0))
    dev->logic_address = ioremap(dev->phys_address,
                                                pci_resource_len(pdev, bar));
#else                                                
    dev->logic_address = ioremap_nocache(dev->phys_address,
                                                pci_resource_len(pdev, bar));
#endif

    /*0: little endian 1: big endian*/
    endian_mode = ((DAL_TSINGMA_DEVICE_ID == pdev->device) || (DAL_TSINGMA_MX_DEVICE_ID == pdev->device) || (DAL_TSINGMA_GX_DEVICE_ID == pdev->device)
                  || (DAL_ARCTIC_DEVICE_ID == pdev->device))?0:1;
    dal_master[lchip]->active_type = DAL_CPU_MODE_TYPE_PCIE;

    temp = *(volatile unsigned int*)(dal_master[lchip]->dal_dev.logic_address + 0x48);
    if (((temp >> 8) & 0xffff) == 0x3412)
    {
        endian_mode = ((DAL_TSINGMA_DEVICE_ID == pdev->device) || (DAL_TSINGMA_MX_DEVICE_ID == pdev->device) || (DAL_TSINGMA_GX_DEVICE_ID == pdev->device)
                       || (DAL_ARCTIC_DEVICE_ID == pdev->device))?1:0;
        *(volatile unsigned int*)(dal_master[lchip]->dal_dev.logic_address + 0x48) = 0xFFFFFFFF;                       
    }

    if (endian_mode)
    {
        printk("Big endian Cpu detected!!! \n");
    }
    else
    {
        printk("Little endian Cpu detected!!! \n");
    }

    pci_set_master(pdev);

    if (dal_glb.dma_mem_size)
    {
        dal_alloc_dma_pool(lchip , dal_glb.dma_mem_size);
        /*add check Dma memory pool cannot cross 4G space*/
        if ((dal_master[lchip]->dma_phy_base>>32) != ((dal_master[lchip]->dma_phy_base+dal_glb.dma_mem_size)>>32))
        {
            printk("Dma malloc memory cross 4G space!!!!!! \n");
            kfree(dev);
            return -1;
        }
    }

    printk(KERN_WARNING "linux_dal_probe end \n");

    return 0;
}

static int
linux_dal_get_device(unsigned long arg)
{
    dal_user_dev_t user_dev;
    int slot_idx = 0;

    if (copy_from_user(&user_dev, (void*)arg, sizeof(user_dev)))
    {
        return -EFAULT;
    }

    user_dev.chip_num = dal_glb.dal_chip_num;
    slot_idx = g_dal_ldev_2_slot[user_dev.lchip];

    if (slot_idx < CTC_MAX_LOCAL_CHIP_NUM)
    {
        if (DAL_CPU_MODE_TYPE_PCIE == dal_master[slot_idx]->active_type)
        {
            user_dev.phy_base0 = (unsigned int)(dal_master[slot_idx]->dal_dev.phys_address);
#ifdef PHYS_ADDR_IS_64BIT
            user_dev.phy_base1 = (unsigned int)(dal_master[slot_idx]->dal_dev.phys_address >> 32);
#endif
            user_dev.domain_no = pci_domain_nr(((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->bus);
            user_dev.bus_no = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->bus->number;
            user_dev.dev_no = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->device;
            user_dev.fun_no = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->devfn;
        }
#if defined(SOC_ACTIVE)
        if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[slot_idx]->active_type)
        {
            user_dev.phy_base0 = (unsigned int)(dal_master[slot_idx]->dal_dev.phys_address);
            user_dev.phy_base1 = (unsigned int)(dal_master[slot_idx]->dal_dev.phys_address >> 32);
            user_dev.bus_no = 0;
            user_dev.dev_no = DAL_TSINGMA_DEVICE_ID;
            user_dev.fun_no = 0;
        }
#endif
    }

    if (copy_to_user((dal_user_dev_t*)arg, (void*)&user_dev, sizeof(user_dev)))
    {
        return -EFAULT;
    }

    return 0;
}

/* get dma information */
int32
dal_get_dma_info(unsigned int ldev, void* p_info)
{
    dma_info_t* p_dma = NULL;
    uint8 slot_idx = 0;
    
    slot_idx = g_dal_ldev_2_slot[ldev];
    p_dma = (dma_info_t*)p_info;

    p_dma->phy_base = (unsigned int)dal_master[slot_idx]->dma_phy_base;
#ifdef PHYS_ADDR_IS_64BIT
    p_dma->phy_base_hi = dal_master[slot_idx]->dma_phy_base >> 32;
#else
    p_dma->phy_base_hi = 0;
#endif

    p_dma->size = dal_glb.dma_mem_size;

    return 0;
}

#ifdef CONFIG_COMPAT
static long
linux_dal_ioctl(struct file* file,
                unsigned int cmd, unsigned long arg)
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
static long
linux_dal_ioctl(struct file* file,
                unsigned int cmd, unsigned long arg)
#else
static int
linux_dal_ioctl(struct inode* inode, struct file* file,
                unsigned int cmd, unsigned long arg)
#endif
#endif
{
    switch (cmd)
    {
#if 0
    case CMD_READ_CHIP:
        return linux_dal_read(arg);

    case CMD_WRITE_CHIP:
        return linux_dal_write(arg);

#endif

    case CMD_GET_DEVICES:
        return linux_dal_get_device(arg);

    case CMD_PCI_CONFIG_READ:
        return dal_user_read_pci_conf(arg);

    case CMD_PCI_CONFIG_WRITE:
        return dal_user_write_pci_conf(arg);

		break;

    default:
        break;
    }

    return 0;
}

#if defined(SOC_ACTIVE)
static int
linux_dal_local_remove(struct platform_device *pdev)
{
    unsigned int lchip = 0;
    unsigned int flag = 0;

    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if ((NULL != dal_master[lchip])&& (pdev == dal_master[lchip]->dal_dev.pci_dev))
        {
            flag = 1;
            break;
        }
    }

    if (1 == flag)
    {
        dal_free_dma_pool(lchip);
        dal_glb.dal_chip_num--;
        dal_master[lchip]->active_type = DAL_CPU_MODE_TYPE_NONE;
        kfree(dal_master[lchip]);
        dal_master[lchip] = NULL;
    }

    return 0;
}
#endif

void
linux_dal_pcie_remove(struct pci_dev* pdev)
{
    unsigned char lchip = 0;
    unsigned int flag = 0;
    unsigned char index = 0;

    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if ((NULL != dal_master[lchip])&& (pdev == dal_master[lchip]->dal_dev.pci_dev))
        {
            flag = 1;
            break;
        }
    }

    if (1 == flag)
    {
        if(dal_master[lchip]->msi_used)
        {
            for (index = 0; index < dal_master[lchip]->msi_irq_num; index++)
            {
                dal_interrupt_unregister(dal_master[lchip]->msi_irq_base[index]);
            }
            _dal_set_msi_disable(lchip, (1 == dal_master[lchip]->msi_used)?DAL_MSI_TYPE_MSI:DAL_MSI_TYPE_MSIX);
        }
        dal_free_dma_pool(lchip);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        dal_glb.dal_chip_num--;
        dal_master[lchip]->active_type = DAL_CPU_MODE_TYPE_NONE;
        kfree(dal_master[lchip]);
        dal_master[lchip] = NULL;
    }
}

static struct file_operations fops_dal =
{
    .owner = THIS_MODULE,
#ifdef CONFIG_COMPAT
    .compat_ioctl = linux_dal_ioctl,
    .unlocked_ioctl = linux_dal_ioctl,
    .mmap = linux_dal_mmap0,
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
    .unlocked_ioctl = linux_dal_ioctl,
    .mmap = linux_dal_mmap0,
#else
    .ioctl = linux_dal_ioctl,
    .mmap = linux_dal_mmap0,
#endif
#endif
};

static struct pci_driver linux_dal_pcie_driver =
{
    .name = DAL_NAME,
    .id_table = dal_id_table,
    .probe = linux_dal_pcie_probe,
    .remove = linux_dal_pcie_remove,
};
#if defined(SOC_ACTIVE)
static struct platform_driver linux_dal_local_driver =
{
    .probe = linux_dal_local_probe,
    .remove = linux_dal_local_remove,
    .driver = {
        .name = DAL_NAME,
        .of_match_table = of_match_ptr(linux_dal_of_match),
    },
};
#endif
#ifdef ASW_ACTIVE
static struct platform_driver linux_dal_asw_driver =
{
    .probe = linux_dal_asw_probe,
    .remove = linux_dal_asw_remove,
    .driver = {
        .name = DAL_NAME,
        .owner      = THIS_MODULE,
        .of_match_table = of_match_ptr(linux_dal_asw_of_match),
    },
};
#endif

static int __init
linux_kernel_init(void)
{
    int ret;

    memset(&dal_glb, 0, sizeof(dal_glb));
#ifdef ASW_ACTIVE
	dal_glb.dal_io_mode = DAL_SMI_IO;
#endif
    dal_glb.dma_mem_size = 0x2000000;
    /* Get DMA memory pool size */
    if (dma_pool_size)
    {
        if ((dma_pool_size[strlen(dma_pool_size) - 1] & ~0x20) == 'M')
        {
            dal_glb.dma_mem_size = simple_strtoul(dma_pool_size, NULL, 0);
            dal_glb.dma_mem_size *= DAL_MB_SIZE;
        }
        else
        {
            printk("DMA memory pool size must be specified as e.g. dma_pool_size=8M\n");
        }

        if (dal_glb.dma_mem_size & (dal_glb.dma_mem_size - 1))
        {
            printk("dma_mem_size must be a power of 2 (1M, 2M, 4M, 8M etc.)\n");
            dal_glb.dma_mem_size = 0;
        }
    }

    ret = register_chrdev(DAL_DEV_MAJOR, "linux_dal0", &fops_dal);
    if (ret < 0)
    {
        printk(KERN_WARNING "Register linux_dal device, ret %d\n", ret);
        return ret;
    }

    ret = pci_register_driver(&linux_dal_pcie_driver);
    if (ret < 0)
    {
        printk(KERN_WARNING "Register ASIC PCI driver failed, ret %d\n", ret);
        goto error_rollback0;
    }
#if defined(SOC_ACTIVE)
    ret = platform_driver_register(&linux_dal_local_driver);
    if (ret < 0)
    {
        printk(KERN_WARNING "Register ASIC LOCALBUS driver failed, ret %d\n", ret);
        goto error_rollback1;
    }
#endif
#if defined(ASW_ACTIVE)
    ret = platform_driver_register(&linux_dal_asw_driver);
    if (ret < 0)
    {
        printk(KERN_WARNING "Register ASIC LOCALBUS driver failed, ret %d\n", ret);
        goto error_rollback1;
    }
#endif

    /* init interrupt function */
    dal_glb.dal_isr[0].intr_handler_fun = intr0_handler;
    dal_glb.dal_isr[1].intr_handler_fun = intr1_handler;
    dal_glb.dal_isr[2].intr_handler_fun = intr2_handler;
    dal_glb.dal_isr[3].intr_handler_fun = intr3_handler;
    dal_glb.dal_isr[4].intr_handler_fun = intr4_handler;
    dal_glb.dal_isr[5].intr_handler_fun = intr5_handler;
    dal_glb.dal_isr[6].intr_handler_fun = intr6_handler;
    dal_glb.dal_isr[7].intr_handler_fun = intr7_handler;


    return ret;
#if (defined(ASW_ACTIVE)) || (defined(SOC_ACTIVE))
error_rollback1:
    pci_unregister_driver(&linux_dal_pcie_driver);
#endif
error_rollback0:
    unregister_chrdev(DAL_DEV_MAJOR, "linux_dal0");
    return ret;
}


static void __exit
linux_kernel_exit(void)
{
    int intr_idx = 0;
    int intr_num = 0;
    uint8 lchip = 0;

    intr_num = dal_glb.dal_intr_num;

    for (intr_idx = 0; intr_idx < intr_num; intr_idx++)
    {
        dal_interrupt_unregister(dal_glb.dal_isr[intr_idx].irq);
    }

    for (lchip = 0; lchip < CTC_MAX_LOCAL_CHIP_NUM; lchip ++)
    {
        if (NULL == dal_master[lchip])
        {
            continue;
        }
        if (dal_master[lchip]->msi_used)
        {
            _dal_set_msi_disable(lchip, (1 == dal_master[lchip]->msi_used)?DAL_MSI_TYPE_MSI:DAL_MSI_TYPE_MSIX);
        }
        dal_free_dma_pool(lchip);
        dal_glb.dal_chip_num--;
        dal_master[lchip]->active_type = DAL_CPU_MODE_TYPE_NONE;
        

        kfree(dal_master[lchip]);
        dal_master[lchip] = NULL;
    }
    unregister_chrdev(DAL_DEV_MAJOR, "linux_dal0");
    pci_unregister_driver(&linux_dal_pcie_driver);
#if defined(SOC_ACTIVE)
    platform_driver_unregister(&linux_dal_local_driver);
#endif
#if defined(ASW_ACTIVE)
    platform_driver_unregister(&linux_dal_asw_driver);
#endif

}

/* set device access type, must be configured before dal_op_init */
int32_t
dal_set_device_access_type(dal_access_type_t device_type)
{
    if (device_type >= DAL_MAX_ACCESS_TYPE)
    {
        return DAL_E_INVALID_ACCESS;
    }

    g_dal_access = device_type;
    return 0;
}

/* get device access type */
int32_t
dal_get_device_access_type(dal_access_type_t* p_device_type)
{
    *p_device_type = g_dal_access;
    return 0;
}

void*
dal_get_dev_addr(uint8 ldev)
{
    uint8 slot_idx = 0;
    slot_idx = g_dal_ldev_2_slot[ldev];
    return dal_master[slot_idx]->dal_dev.logic_address;
}


/* get local chip num */
int32_t
dal_get_chip_number(uint8* p_num)
{
    *p_num = dal_glb.dal_chip_num;

    return 0;
}

int32_t
dal_check_ldev_lchip(uint8 ldev)
{
    uint8 index = 0;
    uint8 lchip = 0;
    uint8 pp_num = 0;
    int32 ret = 0;
    uint32 lchip_activ_bmp[8] = {0};
    uint32 dev_id = 0;
    uint8 slot_idx = 0;
    
    sal_memset(lchip_activ_bmp, 0, sizeof(lchip_activ_bmp));

    slot_idx = g_dal_ldev_2_slot[ldev];    
    dev_id = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->device;

    if ((dev_id == DAL_GREATBELT_DEVICE_ID || dev_id == DAL_GOLDENGATE_DEVICE_ID || dev_id == 
                DAL_GOLDENGATE_DEVICE_ID1 || dev_id == DAL_TSINGMA_AX_DEVICE_ID))
    {
        if (g_dal_dev_lchip[ldev] != ldev)
        {
          /*Notice: in multi-chip env, GreatBelt & GoldenGate & AX Chip Series must layout before Arctic Chip*/
          return -1;
        }
    }
                
    for (index = 0; index < ldev; index++)
    {       
        if (g_dal_ldev_init[index] == 0)
        {
            continue;
        }

        pp_num = g_dal_dev_pp_num[index];
        for (lchip = g_dal_dev_lchip[index]; lchip < g_dal_dev_lchip[index] + pp_num; lchip++)
        {
            if (lchip_activ_bmp[lchip/32] & (1 << (lchip%32)))
            {
                /*lchip alloc conflict!*/
                return -1;
            }
            lchip_activ_bmp[lchip/32] |= (1 << (lchip%32));
        }
    }

    return ret;
}

int32_t
dal_get_ldev_info(uint8 ldev, dal_ldev_info_t* p_ldev)
{
    uint32 dev_id = 0;
    uint8 slot_idx = 0;
    
    if (0 == g_dal_ldev_init[ldev] || p_ldev == NULL)
    {
        return -1;
    }

    slot_idx = g_dal_ldev_2_slot[ldev];    

    /*Get Device ID*/
    dev_id = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->device;

    /* find match ldev, return ldev information */
    p_ldev->pp_num = g_dal_dev_pp_num[ldev];
    p_ldev->lchip = g_dal_dev_lchip[ldev];
    p_ldev->dev_id = dev_id;
    
    return 0;
}


/* get local chip device id */
int32_t
dal_get_chip_dev_id(uint8 ldev, uint32* dev_id)
{
    uint8 slot_idx = 0;
    if (0 == g_dal_ldev_init[ldev])
    {
        return -1;
    }

    slot_idx = g_dal_ldev_2_slot[ldev];

    if (DAL_CPU_MODE_TYPE_PCIE == dal_master[slot_idx]->active_type)
    {
        *dev_id = ((struct pci_dev*)(dal_master[slot_idx]->dal_dev.pci_dev))->device;
    }
#if defined(SOC_ACTIVE)
    if (DAL_CPU_MODE_TYPE_LOCAL == dal_master[slot_idx]->active_type)
    {
        *dev_id = DAL_TSINGMA_DEVICE_ID;
    }
#endif
#if defined(ASW_ACTIVE)
    if (DAL_CPU_MODE_TYPE_ASW == dal_master[slot_idx]->active_type)
    {
        *dev_id = DAL_TSINGMA_AX_DEVICE_ID;
    }
#endif
    return 0;
}

bool
dal_get_soc_active(uint8 ldev)
{
    uint8 slot_idx = 0;
    if (0 == g_dal_ldev_init[ldev])
    {
        return -1;
    }

    slot_idx = g_dal_ldev_2_slot[ldev];
    return (DAL_CPU_MODE_TYPE_LOCAL == dal_master[slot_idx]->active_type)?1:0;
}

bool
dal_get_ldev_active(uint8 ldev)
{
    return g_dal_ldev_init[ldev]?1:0;
}


module_init(linux_kernel_init);
module_exit(linux_kernel_exit);
