/*
 * Driver for XDMA for Xilinx XDMA IP core
 *
 * Copyright (C) 2007-2015 Sidebranch
 * Copyright (C) 2015 Xilinx, Inc.
 *
 * Leon Woestenberg <leon@sidebranch.com>
 * Richard Tobin <richard.tobin@xilinx.com>
 * Sonal Santan <sonal.santan@xilinx.com>
 */

/* SECTION: Header includes */

// rm -rf build/ && mkdir build && cd build && cmake .. && make

#include <linux/ioctl.h>
#include <linux/types.h>
#include <asm/io.h>

/* include early, to verify it depends only on the headers above */
#include "xdma-core.h"
#include "xdma-ioctl.h"
#include "xdma-sgm.h"
// #include "xbar_sys_parameters.h"
#include "version.h"

/* SECTION: Module licensing */

#pragma GCC diagnostic ignored "-Wdate-time"

#if XDMA_GPL
MODULE_LICENSE("GPL v2");
#else
MODULE_LICENSE("Copyright (C) 2018-2022 SZFHE, Inc.");
#endif
MODULE_AUTHOR("Rockyhoon Zhang <3214274@qq.com>");

MODULE_VERSION(DRV_MODULE_VERSION);

/* SECTION: Module parameters */

static unsigned int poll_mode = 0;
module_param(poll_mode, uint, 0644);
MODULE_PARM_DESC(poll_mode, "Set 1 for hw polling, default is 0 (interrupts)");

static unsigned int enable_credit_mp;
module_param(enable_credit_mp, uint, 0644);
MODULE_PARM_DESC(enable_credit_mp, "Set 1 to enable creidt feature, default is 0 (no credit control)");

/* SECTION: Module global variables */

struct class *g_xdma_class; /* sys filesystem */
dev_t dev;

static const struct pci_device_id pci_ids[] = {
	{
		PCI_DEVICE(0x0709, 0x9202),
	},
	{
		0,
	}};
MODULE_DEVICE_TABLE(pci, pci_ids);

/* SECTION: Function prototypes */

/* PCIe HW register access */

#define MAX_XDMA_DEVICES 64
static char dev_present[MAX_XDMA_DEVICES];

/* SECTION: Callback tables */

/*
 * character device file operations for SG DMA engine
 */

static int cardId = 0;

const char *cdev_name[10] = {
	"decd1",
	"decd2",
	"decd3",
	"decd4",
	"decd5",
	"decd6",
	"decd7",
	"decd8",
	"decd9",
	"decd10",
};


static const struct file_operations fileOps = {
	.owner = THIS_MODULE,
	.open = char_sgdma_open,
	.release = char_sgdma_close,
	.read = char_sgdma_read,
	.write = char_sgdma_write,
	.unlocked_ioctl = decd_ioctl,
};

static struct pci_driver pci_driver = {
	.name = DRV_NAME,
	.id_table = pci_ids,
	.probe = probe,
	.remove = remove,
};

/* SECTION: Function definitions */

inline void write_register(u32 value, void *iomem)
{
	iowrite32(value, iomem);
}

inline u32 read_register(void *iomem)
{
	return ioread32(iomem);
}

static inline u32 build_u32(u32 hi, u32 lo)
{
	return ((hi & 0xFFFFUL) << 16) | (lo & 0xFFFFUL);
}


// static void interrupt_status(struct xdma_dev *lro)
// {
// 	struct interrupt_regs *reg = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);
// 	u32 w;

// 	pr_info(DRV_NAME "reg = %p\n", reg);
// 	pr_info(DRV_NAME "&reg->user_int_enable = %p\n", &reg->user_int_enable);

// 	w = read_register(&reg->user_int_enable);
// 	pr_info(DRV_NAME "user_int_enable = 0x%08x\n", w);
// 	w = read_register(&reg->channel_int_enable);
// 	pr_info(DRV_NAME "channel_int_enable = 0x%08x\n", w);

// 	w = read_register(&reg->user_int_request);
// 	pr_info(DRV_NAME "user_int_request = 0x%08x\n", w);
// 	w = read_register(&reg->channel_int_request);
// 	pr_info(DRV_NAME "channel_int_request = 0x%08x\n", w);

// 	w = read_register(&reg->user_int_pending);
// 	pr_info(DRV_NAME "user_int_pending = 0x%08x\n", w);
// 	w = read_register(&reg->channel_int_pending);
// 	pr_info(DRV_NAME "channel_int_pending = 0x%08x\n", w);
// }


/* channel_interrupts_enable -- Enable interrupts we are interested in */
static void channel_interrupts_enable(struct xdma_dev *lro, u32 mask)
{
	struct interrupt_regs *reg = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);

	write_register(mask, &reg->channel_int_enable_w1s);
}

/* channel_interrupts_disable -- Disable interrupts we not interested in */
static void channel_interrupts_disable(struct xdma_dev *lro, u32 mask)
{
	struct interrupt_regs *reg = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);

	write_register(mask, &reg->channel_int_enable_w1c);
}


/* read_interrupts -- Print the interrupt controller status */
static u32 read_interrupts(struct xdma_dev *lro)
{
	struct interrupt_regs *reg = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);
	u32 lo;
	u32 hi;

	/* extra debugging; inspect complete engine set of registers */
	hi = read_register(&reg->user_int_request);
	pr_info(DRV_NAME "ioread32(0x%p) returned 0x%08x (user_int_request).\n", &reg->user_int_request, hi);
	lo = read_register(&reg->channel_int_request);
	pr_info(DRV_NAME "ioread32(0x%p) returned 0x%08x (channel_int_request)\n", &reg->channel_int_request, lo);

	pr_info(DRV_NAME "ioread32(0x%p) returned 0x%08x (channel_int_enable)\n", &reg->channel_int_enable, read_register(&reg->channel_int_enable));

	/* return interrupts: user in upper 16-bits, channel in lower 16-bits */
	return build_u32(hi, lo);
}


/**
 * engine_status_read() - read status of SG DMA engine (optionally reset)
 *
 * Stores status in engine->status.
 *
 * @return -1 on failure, status register otherwise
 */
static u32 engine_status_read(struct xdma_engine *engine, int clear)
{
	u32 value;

	BUG_ON(!engine);

	/* read status register */
	pr_info(DRV_NAME "Status of SG DMA %s engine:\n", engine->name);
	pr_info(DRV_NAME "ioread32(0x%p).\n", &engine->regs->status);
	if (clear)
	{
		value = engine->status =
			read_register(&engine->regs->status_rc);
	}
	else
	{
		value = engine->status = read_register(&engine->regs->status);
	}
	pr_info(DRV_NAME "status = 0x%08x: %s%s%s%s%s%s%s%s%s\n", (u32)engine->status,
			(value & XDMA_STAT_BUSY) ? "BUSY " : "IDLE ",
			(value & XDMA_STAT_DESC_STOPPED) ? "DESC_STOPPED " : "",
			(value & XDMA_STAT_DESC_COMPLETED) ? "DESC_COMPLETED " : "",
			(value & XDMA_STAT_ALIGN_MISMATCH) ? "ALIGN_MISMATCH " : "",
			(value & XDMA_STAT_MAGIC_STOPPED) ? "MAGIC_STOPPED " : "",
			(value & XDMA_STAT_FETCH_STOPPED) ? "FETCH_STOPPED " : "",
			(value & XDMA_STAT_READ_ERROR) ? "READ_ERROR " : "",
			(value & XDMA_STAT_DESC_ERROR) ? "DESC_ERROR " : "",
			(value & XDMA_STAT_IDLE_STOPPED) ? "IDLE_STOPPED " : "");

	return value;
}

#if XDMA_STATUS_DUMPS
static inline void dump_engine_status(struct xdma_engine *engine)
{
	engine_status_read(engine, 0);
}
#else
static inline void dump_engine_status(struct xdma_engine *engine)
{
}
#endif


/**
 * engine_start() - start an idle engine with its first transfer on queue
 *
 * The engine will run and process all transfers that are queued using
 * transfer_queue() and thus have their descriptor lists chained.
 *
 * During the run, new transfers will be processed if transfer_queue() has
 * chained the descriptors before the hardware fetches the last descriptor.
 * A transfer that was chained too late will invoke a new run of the engine
 * initiated from the engine_service() routine.
 *
 * The engine must be idle and at least one transfer must be queued.
 * This function does not take locks; the engine spinlock must already be
 * taken.
 *
 */
static int engine_start(struct xdma_engine *engine, struct xdma_transfer *transfer)
{
	u32 w;
	unsigned long flags;

	/* engine must be idle */
	BUG_ON(engine->running);
	BUG_ON(!transfer);

	/* engine is no longer shutdown */
	engine->shutdown = ENGINE_SHUTDOWN_NONE;

	// pr_info(DRV_NAME "engine_start(%s): transfer=0x%p.\n", engine->name, transfer);

	/* initialize number of descriptors of dequeued transfers */
	engine->desc_dequeued = 0;
	
	spin_lock_irqsave(&engine->lock, flags);

	engine->running = 1;	

	w = (u32)XDMA_CTRL_RUN_STOP;
	if(engine->dir_to_dev == 1) {
		channel_interrupts_enable(engine->lro, 2);
	} else {
		channel_interrupts_enable(engine->lro, 8);
	}
	write_register(w, &engine->regs->control_w1s);

	spin_unlock_irqrestore(&engine->lock, flags);

	// pr_info(DRV_NAME "%s engine 0x%p now running\n", engine->name, engine);

	return 0;
}



static int sm1_engine_start(struct xdma_engine *engine, struct xdma_transfer *transfer)
{
	u32 w;
	unsigned long flags;

	/* engine must be idle */
	BUG_ON(engine->running);
	BUG_ON(!transfer);

	/* engine is no longer shutdown */
	engine->shutdown = ENGINE_SHUTDOWN_NONE;

	/* initialize number of descriptors of dequeued transfers */
	engine->desc_dequeued = 0;

	spin_lock_irqsave(&engine->lock, flags);

	engine->running = 1;	

	w = (u32)XDMA_CTRL_RUN_STOP;
	write_register(w, &engine->regs->control_w1s);

	spin_unlock_irqrestore(&engine->lock, flags);

	// pr_info(DRV_NAME "%s engine 0x%p now running\n", engine->name, engine);
	return 0;
}


/**
 * engine_service() - service an SG DMA engine
 *
 * must be called with engine->lock already acquired
 *
 * @engine pointer to struct xdma_engine
 *
 */
static int engine_service(struct xdma_engine *engine)
{
	u32 desc_count;
	int rc = 0;

	BUG_ON(!engine);

	/* Service the engine */

	if (!engine->running)
	{
		pr_info(DRV_NAME "Engine was not running!!! Clearing status\n");
		// engine_status_read(engine, 1);
		return 0;
	}

	if(!engine->transfer) {
		pr_info(DRV_NAME "there is no transfer\n");
		return 0;
	}

	/*
	 * If called from the ISR, or if an error occurred, the descriptor
	 * count will be zero.  In this scenario, read the descriptor count
	 * from HW.  In polled mode descriptor completion, this read is
	 * unnecessary and is skipped to reduce latency
	 */
	desc_count = read_register(&engine->regs->completed_desc_count);

	// pr_info(DRV_NAME "desc_count = %d\n", desc_count);

	/* account for already dequeued transfers during this engine run */
	desc_count -= engine->desc_dequeued;

	engine->transfer->state = TRANSFER_STATE_COMPLETED;

	if(engine->channel == 1 || (engine->channel == 0 && engine->dir_to_dev == 0)) {
		// pr_info(DRV_NAME "wake up %s tx_engine channel %d direction %d transfer=0x%p dma complete...\n", engine->name, engine->channel, engine->dir_to_dev, engine->transfer);
		wake_up(&engine->transfer->wq);
	}

	// pr_info(DRV_NAME "leave engine_service (engine=%p)\n", engine);
	return rc;
}

/* engine_service_work */
static void engine_service_work(struct work_struct *work)
{
	struct xdma_engine *engine = NULL;
	struct xdma_dev *lro;
	u32 *usr_regs;
	// unsigned long flags;

	// pr_info(DRV_NAME "start engine_service_work (work=%p)\n", work);
	engine = container_of(work, struct xdma_engine, work);
	lro = engine->lro;
	BUG_ON(engine->magic != MAGIC_ENGINE);
	BUG_ON(lro->magic != MAGIC_DEVICE);

	usr_regs = (u32 *)(engine->lro->bar[engine->lro->user_bar_idx]);

	// spin_lock(&engine->lock);
	// spin_lock_irqsave(&engine->lock, flags);
	spin_lock_irq(&engine->lock);

	if (engine->channel == 0) //that's for sm1 dma channel
	{
		// pr_info(DRV_NAME "engine_service_work(): sm1_tx_engine name: %s engine.\n", lro->engine[0][DIR_H2C]->name);
		// pr_info(DRV_NAME "engine_service_work(): sm1_rx_engine name: %s engine.\n", engine->name);
		engine_service(lro->engine[0][DIR_H2C]);
		engine_service(engine);
	}
	else
		engine_service(engine);

	channel_interrupts_enable(engine->lro, 4);

	// spin_unlock(&engine->lock);
	// spin_unlock_irqrestore(&engine->lock, flags);
	spin_unlock_irq(&engine->lock);

	// pr_info(DRV_NAME "engine %p name %s irq_bitmask=0x%08x\n", engine, engine->name, (int)engine->irq_bitmask);
}


/*
 * xdma_isr() - Interrupt handler
 *
 * @dev_id pointer to xdma_dev
 */
static irqreturn_t xdma_isr(int irq, void *dev_id)
{
	u32 ch_irq;
	struct xdma_dev *lro;
	struct interrupt_regs *irq_regs;
	struct xdma_engine *engine;
	int channel;

	BUG_ON(!dev_id);
	lro = (struct xdma_dev *)dev_id;
	if (!lro) {
		WARN_ON(!lro);
		pr_info(DRV_NAME "xdma_isr(irq=%d) lro=%p ??\n", irq, lro);
		return IRQ_NONE;
	}

	irq_regs = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);

	ch_irq = read_register(&irq_regs->channel_int_request);

	if(ch_irq != 0) {
		// channel_interrupts_disable(lro, ch_irq);
		read_register(&(lro->engine[0][DIR_H2C]->regs->status_rc));
		read_register(&(lro->engine[0][DIR_C2H]->regs->status_rc));
		write_register(ch_irq | 1, &irq_regs->channel_int_enable_w1c);
		// pr_info(DRV_NAME "ch_irq = 0x%08x\n", ch_irq);
	}

	if(ch_irq == 0) {
		pr_info(DRV_NAME "EmptyIRQ ch_irq = %d\n", ch_irq);
		return IRQ_NONE;
	}

	/* iterate over H2C (PCIe read) */
	for (channel = 0; channel < XDMA_CHANNEL_NUM_MAX; channel++)
	{
		engine = lro->engine[channel][DIR_H2C];
		/* engine present and its interrupt fired? */
		if (engine && (engine->irq_bitmask & ch_irq))
		{
			// pr_info(DRV_NAME "schedule_work(engine=%p)\n", engine);
			schedule_work(&engine->work);
		}
	}

	/* iterate over C2H (PCIe write) */
	for (channel = 0; channel < XDMA_CHANNEL_NUM_MAX; channel++)
	{
		engine = lro->engine[channel][DIR_C2H];
		/* engine present and its interrupt fired? */
		if (engine && (engine->irq_bitmask & ch_irq))
		{
			// pr_info(DRV_NAME "schedule_work(engine %s)\n", engine->name);
			schedule_work(&engine->work);
		}
	}

	lro->irq_count++;
	return IRQ_HANDLED;
}


/*
 * Unmap the BAR regions that had been mapped earlier using map_bars()
 */
static void unmap_bars(struct xdma_dev *lro, struct pci_dev *dev)
{
	int i;

	for (i = 0; i < XDMA_BAR_NUM; i++)
	{
		/* is this BAR mapped? */
		if (lro->bar[i])
		{
			/* unmap BAR */
			pci_iounmap(dev, lro->bar[i]);
			/* mark as unmapped */
			lro->bar[i] = NULL;
		}
	}
}


static int map_single_bar(struct xdma_dev *lro, struct pci_dev *dev, int idx)
{
	resource_size_t bar_start;
	resource_size_t bar_len;
	resource_size_t map_len;

	bar_start = pci_resource_start(dev, idx);
	bar_len = pci_resource_len(dev, idx);
	map_len = bar_len;

	lro->bar[idx] = NULL;

	/* do not map BARs with length 0. Note that start MAY be 0! */
	if (!bar_len)
	{
		pr_info(DRV_NAME "BAR #%d is not present - skipping\n", idx);
		return 0;
	}

	/* BAR size exceeds maximum desired mapping? */
	if (bar_len > INT_MAX)
	{
		pr_info(DRV_NAME "Limit BAR %d mapping from %llu to %d bytes\n", idx, (u64)bar_len, INT_MAX);
		map_len = (resource_size_t)INT_MAX;
	}
	/*
	 * map the full device memory or IO region into kernel virtual address space
	 */
	pr_info(DRV_NAME "BAR%d: %llu bytes to be mapped.\n", idx, (u64)map_len);
	lro->bar[idx] = pci_iomap(dev, idx, map_len);


	if (!lro->bar[idx])
	{
		pr_info(DRV_NAME "Could not map BAR %d", idx);
		return -1;
	}

	pr_info(DRV_NAME "BAR%d at 0x%llx mapped at 0x%p, length=%llu(/%llu)\n", idx, (u64)bar_start, lro->bar[idx], (u64)map_len, (u64)bar_len);

	return (int)map_len;
}


static int is_config_bar(struct xdma_dev *lro, int idx)
{
	u32 irq_id = 0;
	u32 cfg_id = 0;
	int flag = 0;
	u32 mask = 0xffff0000; /* Compare only XDMA ID's not Version number */
	struct interrupt_regs *irq_regs = (struct interrupt_regs *)(lro->bar[idx] + XDMA_OFS_INT_CTRL);
	struct config_regs *cfg_regs = (struct config_regs *)(lro->bar[idx] + XDMA_OFS_CONFIG);

	irq_id = read_register(&irq_regs->identifier);
	cfg_id = read_register(&cfg_regs->identifier);

	if (((irq_id & mask) == IRQ_BLOCK_ID) && ((cfg_id & mask) == CONFIG_BLOCK_ID))
	{
		pr_info(DRV_NAME "BAR %d is the XDMA config BAR\n", idx);
		flag = 1;
	}
	else
	{
		pr_info(DRV_NAME "BAR %d is not XDMA config BAR, irq_id = %x, cfg_id = %x\n", idx, irq_id, cfg_id);
		flag = 0;
	}

	return flag;
}


static void identify_bars(struct xdma_dev *lro, int *bar_id_list, int num_bars, int config_bar_pos)
{
	/*
	 * The following logic identifies which BARs contain what functionality
	 * based on the position of the XDMA config BAR and the number of BARs
	 * detected. The rules are that the user logic and bypass logic BARs
	 * are optional.  When both are present, the XDMA config BAR will be the
	 * 2nd BAR detected (config_bar_pos = 1), with the user logic being
	 * detected first and the bypass being detected last. When one is
	 * omitted, the type of BAR present can be identified by whether the
	 * XDMA config BAR is detected first or last.  When both are omitted,
	 * only the XDMA config BAR is present.  This somewhat convoluted
	 * approach is used instead of relying on BAR numbers in order to work
	 * correctly with both 32-bit and 64-bit BARs.
	 */

	BUG_ON(!lro);
	BUG_ON(!bar_id_list);

	switch (num_bars)
	{
	case 1:
		/* Only one BAR present - no extra work necessary */
		break;

	case 2:
		if (config_bar_pos == 0)
		{
			lro->bypass_bar_idx = bar_id_list[1];
		}
		else if (config_bar_pos == 1)
		{
			lro->user_bar_idx = bar_id_list[0];
		}
		else
		{
			pr_info(DRV_NAME "case 2\n");
			pr_info(DRV_NAME "XDMA config BAR in unexpected position (%d)", config_bar_pos);
		}
		break;

	case 3:
		if (config_bar_pos == 1)
		{
			lro->user_bar_idx = bar_id_list[0];
			lro->bypass_bar_idx = bar_id_list[2];
		}
		else
		{
			pr_info(DRV_NAME "case 3\n");
			pr_info(DRV_NAME "XDMA config BAR in unexpected position (%d)", config_bar_pos);
		}
		break;

	default:
		/* Should not occur - warn user but safe to continue */
		pr_info(DRV_NAME "Unexpected number of BARs (%d)\n", num_bars);
		pr_info(DRV_NAME "Only XDMA config BAR accessible\n");
		break;
	}
}

/* map_bars() -- map device regions into kernel virtual address space
 *
 * Map the device memory regions into kernel virtual address space after
 * verifying their sizes respect the minimum sizes needed
 */
static int map_bars(struct xdma_dev *lro, struct pci_dev *dev)
{
	int rc;
	int i;
	int bar_id_list[XDMA_BAR_NUM];
	int bar_id_idx = 0;
	int config_bar_pos = 0;

	/* iterate through all the BARs */
	for (i = 0; i < XDMA_BAR_NUM; i++)
	{
		int bar_len;

		bar_len = map_single_bar(lro, dev, i);
		if (bar_len == 0)
		{
			continue;
		}
		else if (bar_len < 0)
		{
			rc = -1;
			goto fail;
		}

		/* Try to identify BAR as XDMA control BAR */
		if ((bar_len >= XDMA_BAR_SIZE) && (lro->config_bar_idx < 0))
		{

			if (is_config_bar(lro, i))
			{
				lro->config_bar_idx = i;
				config_bar_pos = bar_id_idx;
			}
		}

		bar_id_list[bar_id_idx] = i;
		bar_id_idx++;
	}

	/* The XDMA config BAR must always be present */
	if (lro->config_bar_idx < 0)
	{
		pr_info(DRV_NAME "Failed to detect XDMA config BAR\n");
		rc = -1;
		goto fail;
	}

	identify_bars(lro, bar_id_list, bar_id_idx, config_bar_pos);
	pr_info(DRV_NAME "lro->config_bar_idx = %x\n", lro->config_bar_idx);
	pr_info(DRV_NAME "lro->user_bar_idx = %x\n", lro->user_bar_idx);
	pr_info(DRV_NAME "lro->bypass_bar_idx = %x\n", lro->bypass_bar_idx);

	/* successfully mapped all required BAR regions */
	rc = 0;
	goto success;
fail:
	/* unwind; unmap any BARs that we did map */
	unmap_bars(lro, dev);
success:
	return rc;
}

// static void dump_desc(struct xdma_desc *desc_virt)
// {
// 	int j;
// 	u32 *p = (u32 *)desc_virt;
// 	static char *const field_name[] = {
// 		"magic|extra_adjacent|control", "bytes", "src_addr_lo",
// 		"src_addr_hi", "dst_addr_lo", "dst_addr_hi", "next_addr",
// 		"next_addr_pad"};
// 	char *dummy;

// 	/* remove warning about unused variable when debug printing is off */
// 	dummy = field_name[0];

// 	for (j = 0; j < 8; j += 1)
// 	{
// 		// dbg_desc("0x%08lx/0x%02lx: 0x%08x 0x%08x %s\n",
// 		// 		 (uintptr_t)p, (uintptr_t)p & 15, (int)*p,
// 		// 		 le32_to_cpu(*p), field_name[j]);
// 		p++;
// 	}
// 	// dbg_desc("\n");
// }


// static void transfer_dump(struct xdma_transfer *transfer)
// {
// 	int i;
// 	struct xdma_desc *desc_virt = transfer->desc_virt;

// 	dbg_desc("Descriptor Entry (Pre-Transfer)\n");
// 	for (i = 0; i < transfer->desc_num; i += 1)
// 		dump_desc(desc_virt + i);
// }


/* xdma_desc_free - Free cache-coherent linked list of N descriptors.
 *
 * @dev Pointer to pci_dev
 * @number Number of descriptors to be allocated
 * @desc_virt Pointer to (i.e. virtual address of) first descriptor in list
 * @desc_bus Bus address of first descriptor in list
 */
static void xdma_desc_free(struct pci_dev *dev, int number, struct xdma_desc *desc_virt, dma_addr_t desc_bus)
{
	BUG_ON(!desc_virt);
	BUG_ON(number < 0);
	/* free contiguous list */
	
	#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
		dma_free_coherent(&(dev->dev), number * sizeof(struct xdma_desc), desc_virt, desc_bus);
	#else
		pci_free_consistent(dev, number * sizeof(struct xdma_desc), desc_virt, desc_bus);
	#endif
}


static void engine_alignments(struct xdma_engine *engine)
{
	u32 w;
	u32 align_bytes;
	u32 granularity_bytes;
	u32 address_bits;

	w = read_register(&engine->regs->alignments);
	pr_info(DRV_NAME "engine %p name %s alignments=0x%08x\n", engine, engine->name, (int)w);

	/* RTO  - add some macros to extract these fields */
	align_bytes = (w & 0x00ff0000U) >> 16;
	granularity_bytes = (w & 0x0000ff00U) >> 8;
	address_bits = (w & 0x000000ffU);

	pr_info(DRV_NAME "align_bytes = %d\n", align_bytes);
	pr_info(DRV_NAME "granularity_bytes = %d\n", granularity_bytes);
	pr_info(DRV_NAME "address_bits = %d\n", address_bits);

	if (w)
	{
		engine->addr_align = align_bytes;
		engine->len_granularity = granularity_bytes;
		engine->addr_bits = address_bits;
	}
	else
	{
		/* Some default values if alignments are unspecified */
		engine->addr_align = 1;
		engine->len_granularity = 1;
		engine->addr_bits = 64;
	}
}


static void engine_destroy(struct xdma_dev *lro, struct xdma_engine *engine)
{
	BUG_ON(!lro);
	BUG_ON(!engine);

	// dbg_sg("Shutting down engine %s%d", engine->name, engine->channel);

	/* Disable interrupts to stop processing new events during shutdown */
	write_register(0x0, &engine->regs->interrupt_enable_mask);

	/* Release memory for the engine */
	kfree(engine);

	/* Decrement the number of engines available */
	lro->engines_num--;
}


/* engine_create() - Create an SG DMA engine bookkeeping data structure
 *
 * An SG DMA engine consists of the resources for a single-direction transfer
 * queue; the SG DMA hardware, the software queue and interrupt handling.
 *
 * @dev Pointer to pci_dev
 * @offset byte address offset in BAR[lro->config_bar_idx] resource for the
 * SG DMA * controller registers.
 * @dir_to_dev Whether the engine transfers to the device (PCIe Rd).
 * @streaming Whether the engine is attached to AXI ST (rather than MM)
 */
static struct xdma_engine *engine_create(struct xdma_dev *lro, int offset, int dir_to_dev, int channel)  // 0920
{
	u32 reg_value;
	int sgdma_offset = offset + SGDMA_OFFSET_FROM_CHANNEL;
	/* allocate data structure for engine book keeping */
	struct xdma_engine *engine;

	engine = kzalloc(sizeof(struct xdma_engine), GFP_KERNEL);

	/* memory allocation failure? */
	if (!engine)
		return NULL;

	/* set magic */
	engine->magic = MAGIC_ENGINE;

	/* indices */
	engine->channel = channel;
	engine->number_in_channel = !dir_to_dev;

	engine->non_incr_addr = 1;

	/* engine interrupt request bit */
	engine->irq_bitmask = (1 << XDMA_ENG_IRQ_NUM) - 1;
	engine->irq_bitmask <<= (lro->engines_num * XDMA_ENG_IRQ_NUM);
	//Added by ZJH @ 2019/8/1, h2c dma of channel_0(sm1 dma) should not interrupt.
	engine->irq_bitmask &= 0xfffffffeUL;

	engine->bypass_offset = lro->engines_num * BYPASS_MODE_SPACING;

	/* initialize spinlock */
	spin_lock_init(&engine->lock);
	/* parent */
	engine->lro = lro;
	/* register address */
	engine->regs = (lro->bar[lro->config_bar_idx] + offset);
	engine->sgdma_regs = (lro->bar[lro->config_bar_idx] + sgdma_offset);
	/* remember SG DMA direction */
	engine->dir_to_dev = dir_to_dev;
	engine->name = engine->dir_to_dev ? "H2C" : "C2H";
	engine->streaming = get_engine_type(engine->regs);

	pr_info(DRV_NAME "engine %p name %s irq_bitmask=0x%08x\n", engine, engine->name, (int)engine->irq_bitmask);

	/* initialize the deferred work for transfer completion */
	INIT_WORK(&engine->work, engine_service_work);

	lro->engines_num++;

	// write_register(XDMA_CTRL_NON_INCR_ADDR, &engine->regs->control_w1c);

	engine_alignments(engine);

	reg_value = XDMA_CTRL_IE_ALL;
	write_register(reg_value, &engine->regs->interrupt_enable_mask_w1s);
	write_register(reg_value, &engine->regs->control_w1s);

	/* all engine setup completed successfully */
	goto success;

success:
	return engine;
}

/* transfer_destroy() - free transfer */
static void transfer_destroy(struct xdma_dev *lro, struct xdma_transfer *transfer)
{
	/* user space buffer was locked in on account of transfer? */
	if (transfer->sgm)
	{
		/* unmap scatterlist */
		/* the direction is needed to synchronize caches */
		
		#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
			dma_unmap_sg(&(lro->pci_dev->dev), transfer->sgm->sgl, transfer->sgm->mapped_pages, transfer->dir_to_dev ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		#else
			pci_unmap_sg(lro->pci_dev, transfer->sgm->sgl, transfer->sgm->mapped_pages, transfer->dir_to_dev ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		#endif

		if (transfer->userspace) {
			/* dirty and unlock the pages */
			sgm_put_user_pages(transfer->sgm, transfer->dir_to_dev ? 0 : 1);
		}
		transfer->sgm->mapped_pages = 0;
		sg_destroy_mapper(transfer->sgm);
	}

	/* free descriptors */
	xdma_desc_free(lro->pci_dev, transfer->sgl_nents, transfer->desc_virt, transfer->desc_bus);
	/* free transfer */
	kfree(transfer);
}


static struct xdma_transfer *transfer_create(struct xdma_engine *engine, struct xdma_dev *lro, const char *start, size_t cnt, u64 ep_addr, int dir_to_dev, int non_incr_addr, int force_new_desc, int userspace)
{
	int rc;
	struct xdma_transfer *transfer;
	u32 w;
	dma_addr_t desc_bus;
	int i;

	/* allocate transfer data structure */
	transfer = kzalloc(sizeof(struct xdma_transfer), GFP_KERNEL);
	if (!transfer)
		return NULL;

	transfer->dir_to_dev = dir_to_dev;
	transfer->userspace = userspace;
	transfer->sgm = sg_create_mapper(cnt);
	
	/* lock user pages in memory and create a scatter gather list */
	if (userspace)
		rc = sgm_get_user_pages(transfer->sgm, start, cnt, !dir_to_dev);
	else
		rc = sgm_kernel_pages(transfer->sgm, start, cnt, !dir_to_dev);
	
	
	#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
		transfer->sgl_nents = dma_map_sg(&(lro->pci_dev->dev), transfer->sgm->sgl, transfer->sgm->mapped_pages, dir_to_dev ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
	#else
		transfer->sgl_nents = pci_map_sg(lro->pci_dev, transfer->sgm->sgl, transfer->sgm->mapped_pages, dir_to_dev ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
	#endif

	if(transfer->sgl_nents == 0) {
		pr_info(DRV_NAME "transfer->sgl_nents = 0\n");
		return NULL;
	}

	transfer->desc_num = transfer->sgl_nents;

	#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
		transfer->desc_virt = (struct xdma_desc *)dma_alloc_coherent(&(lro->pci_dev->dev), transfer->sgl_nents * sizeof(struct xdma_desc), &(transfer->desc_bus), GFP_ATOMIC);
	#else
		transfer->desc_virt = (struct xdma_desc *)pci_alloc_consistent(lro->pci_dev, transfer->sgl_nents * sizeof(struct xdma_desc), &(transfer->desc_bus));
	#endif

	if (!transfer->desc_virt) {
		pr_info(DRV_NAME "transfer->desc_virt = NULL\n");
		return NULL;
	}

	w = cpu_to_le32(PCI_DMA_L(transfer->desc_bus));
	write_register(w, &engine->sgdma_regs->first_desc_lo);
	w = cpu_to_le32(PCI_DMA_H(transfer->desc_bus));
	write_register(w, &engine->sgdma_regs->first_desc_hi);
	write_register(0, &engine->sgdma_regs->first_desc_adjacent);

	// pr_info(DRV_NAME " Descriptor total number: %d\n", transfer->sgl_nents);
	desc_bus = transfer->desc_bus;
	for (i = 0; i < transfer->sgl_nents; i++) {
		desc_bus += sizeof(struct xdma_desc);

		if(i == (transfer->sgl_nents - 1)) {
			transfer->desc_virt[i].control = cpu_to_le32(XDMA_DESC_MAGIC | XDMA_DESC_STOP_BIT | XDMA_DESC_COMPLETED_BIT | XDMA_DESC_EOP_BIT);
			transfer->desc_virt[i].next_lo = 0;
			transfer->desc_virt[i].next_hi = 0;
		} else {
			transfer->desc_virt[i].control = XDMA_DESC_MAGIC;
			transfer->desc_virt[i].next_lo = cpu_to_le32(PCI_DMA_L(desc_bus));
			transfer->desc_virt[i].next_hi = cpu_to_le32(PCI_DMA_H(desc_bus));
		}
		transfer->desc_virt[i].bytes = cpu_to_le32(sg_dma_len(&transfer->sgm->sgl[i]));
		if(dir_to_dev == 1) {
			transfer->desc_virt[i].src_addr_lo = cpu_to_le32(PCI_DMA_L(sg_dma_address(&transfer->sgm->sgl[i])));
			transfer->desc_virt[i].src_addr_hi = cpu_to_le32(PCI_DMA_H(sg_dma_address(&transfer->sgm->sgl[i])));
		} else {
			transfer->desc_virt[i].dst_addr_lo = cpu_to_le32(PCI_DMA_L(sg_dma_address(&transfer->sgm->sgl[i])));
			transfer->desc_virt[i].dst_addr_hi = cpu_to_le32(PCI_DMA_H(sg_dma_address(&transfer->sgm->sgl[i])));
		}

		// pr_info(DRV_NAME " Descriptor[%d]:\n", i);
		// pr_info(DRV_NAME "   control: %08x\n", le32_to_cpu(transfer->desc_virt[i].control));
		// pr_info(DRV_NAME "   bytes: %08x\n", le32_to_cpu(transfer->desc_virt[i].bytes));
		// pr_info(DRV_NAME "   src_addr_lo: %08x\n", le32_to_cpu(transfer->desc_virt[i].src_addr_lo));
		// pr_info(DRV_NAME "   src_addr_hi: %08x\n", le32_to_cpu(transfer->desc_virt[i].src_addr_hi));
		// pr_info(DRV_NAME "   dst_addr_lo: %08x\n", le32_to_cpu(transfer->desc_virt[i].dst_addr_lo));
		// pr_info(DRV_NAME "   dst_addr_hi: %08x\n", le32_to_cpu(transfer->desc_virt[i].dst_addr_hi));
		// pr_info(DRV_NAME "   next_lo: %08x\n", le32_to_cpu(transfer->desc_virt[i].next_lo));
		// pr_info(DRV_NAME "   next_hi: %08x\n", le32_to_cpu(transfer->desc_virt[i].next_hi));
	}

	/* initialize wait queue */
	init_waitqueue_head(&transfer->wq);

	return transfer;
}


static int transfer_monitor(struct xdma_engine *engine, struct xdma_transfer *transfer)
{
	int rc = 0;
	u32 w = 1;

	wait_event(transfer->wq, transfer->state != TRANSFER_STATE_SUBMITTED);

	pr_info(DRV_NAME "Finally back! dma has completed!\n");

	engine->running = 0;
	engine_status_read(engine, 1);
	write_register(w, &engine->regs->control_w1c);

	return rc;
}


static int sm1_transfer_monitor(struct xdma_engine *tx_engine, struct xdma_engine *rx_engine, struct xdma_transfer *transfer)
{
	int rc = 0;
	u32 w = 1;

	wait_event(transfer->wq, transfer->state != TRANSFER_STATE_SUBMITTED);
		
	// pr_info(DRV_NAME "Finally back! dma has completed!\n");
	
	tx_engine->running = 0;
	// engine_status_read(tx_engine, 1);
	write_register(w, &tx_engine->regs->control_w1c);
	rx_engine->running = 0;
	// engine_status_read(rx_engine, 1);
	write_register(w, &rx_engine->regs->control_w1c);

	return rc;
}


/* transfer_queue() - Queue a DMA transfer on the engine
 *
 * @engine DMA engine doing the transfer
 * @transfer DMA transfer submitted to the engine
 *
 * Takes and releases the engine spinlock
 */
static int transfer_queue(struct xdma_engine *engine, struct xdma_transfer *transfer)
{
	int rc = 0;

	BUG_ON(!engine);
	BUG_ON(!transfer);
	// BUG_ON(transfer->desc_num == 0);
	// pr_info(DRV_NAME "transfer_queue(transfer=0x%p).\n", transfer);

	/* engine is being shutdown; do not accept new transfers */
	if (engine->shutdown & ENGINE_SHUTDOWN_REQUEST)
	{
		rc = -1;
		goto shutdown;
	}

	/* mark the transfer as submitted */
	transfer->state = TRANSFER_STATE_SUBMITTED;

	// pr_info(DRV_NAME "transfer_queue(): starting %s engine.\n", engine->name);
	engine_start(engine, transfer);

shutdown:
	// pr_info(DRV_NAME "engine->running = %d\n", engine->running);
	return rc;
};


static int sm1_transfer_queue(struct xdma_engine *engine, struct xdma_transfer *transfer)
{
	int rc = 0;

	BUG_ON(!engine);
	BUG_ON(!transfer);
	BUG_ON(transfer->desc_num == 0);

	/* engine is being shutdown; do not accept new transfers */
	if (engine->shutdown & ENGINE_SHUTDOWN_REQUEST)
	{
		rc = -1;
		goto shutdown;
	}

	/* mark the transfer as submitted */
	transfer->state = TRANSFER_STATE_SUBMITTED;

	/* start engine */
	// pr_info(DRV_NAME "start %s tx_engine channel %d direction %d transfer=0x%p.\n", engine->name, engine->channel, engine->dir_to_dev, transfer);
	sm1_engine_start(engine, transfer);

shutdown:
	// pr_info(DRV_NAME "engine->running = %d\n", engine->running);
	return rc;
};


static ssize_t transfer_data(struct xdma_engine *engine, char *transfer_addr, ssize_t remaining)
{
	int rc;
	ssize_t res = 0;
	ssize_t done = 0;
	struct xdma_dev *lro;
	struct xdma_transfer *transfer;
	size_t transfer_len;

	BUG_ON(!engine);
	lro = engine->lro;
	BUG_ON(!lro);

	/* still good and anything left to transfer? */
	while ((res == 0) && (remaining > 0))
	{
		/* DMA transfer size, multiple if necessary */
		if (remaining > XDMA_TRANSFER_MAX_BYTES)
			transfer_len = XDMA_TRANSFER_MAX_BYTES;
		else
			transfer_len = remaining;

		/* build device-specific descriptor tables */
		transfer = transfer_create(engine, lro, transfer_addr, transfer_len, 0, engine->dir_to_dev, engine->non_incr_addr, 0, 1);
		
		// pr_info(DRV_NAME "transfer=0x%p.\n", transfer);

		if (!transfer)
		{
			remaining = 0;
			res = -EIO;
			break;
		}

		engine->transfer = transfer;

		// transfer_dump(transfer);

		/* last transfer for the given request? */
		if (transfer_len >= remaining)
		{
			transfer->last_in_request = 1;
			transfer->size_of_request = done + transfer_len;
		}

		/* let the device read from the host */
		transfer_queue(engine, transfer);

		rc = transfer_monitor(engine, transfer);
		rc = 0;

		/* transfer was taken off the engine? */
		if (transfer->state != TRANSFER_STATE_SUBMITTED)
		{
			/* transfer failed? */
			if (transfer->state != TRANSFER_STATE_COMPLETED)
			{
				pr_info(DRV_NAME "transfer %p failed\n", transfer);
				res = -EIO;
			}
			// pr_info(DRV_NAME "transfer %p completed\n", transfer);
			transfer_destroy(lro, transfer);
			transfer = NULL;
			/* interrupted by a signal / polling detected error */
		}
		else if (rc != 0)
		{
			/* transfer can still be in-flight */
			engine_status_read(engine, 0);
			read_interrupts(lro);

			res = -ERESTARTSYS;
		}

		/* If an error has occurred, clear counts tracking progress */
		if (res != 0)
		{
			transfer_len = 0;
			remaining = 0;
		}

		/* calculate the next transfer */
		transfer_addr += transfer_len;
		remaining -= transfer_len;
		done += transfer_len;
		// pr_info(DRV_NAME "remain=%lld, done=%lld\n", (s64)remaining, (s64)done);
	}
	/* return error or else number of bytes */
	res = res ? res : done;

	return res;
}


static ssize_t sm1_dma(struct xdma_dev *lro, u8 *tx_buf, u8 *rx_buf, ssize_t inputLength, ssize_t outputLength, u32 ssx_cmd)
{
	ssize_t res = 0;
	ssize_t done = 0;
	struct xdma_engine *tx_engine, *rx_engine;
	size_t transfer_len;
	u32 *usr_regs;
	struct interrupt_regs *irq_regs;

	BUG_ON(!lro);
	tx_engine = lro->engine[0][DIR_H2C];
	rx_engine = lro->engine[0][DIR_C2H];
	BUG_ON(!tx_engine);
	BUG_ON(!rx_engine);

	// pr_info(DRV_NAME "%s tx_engine channel %d direction %d engine name %s (engine num %d)= 0x%p\n", tx_engine->name, tx_engine->channel, tx_engine->dir_to_dev, tx_engine->name, tx_engine->number_in_channel, tx_engine);
	// pr_info(DRV_NAME "%s rx_engine channel %d direction %d engine name %s (engine num %d)= 0x%p\n", rx_engine->name, rx_engine->channel, rx_engine->dir_to_dev, rx_engine->name, rx_engine->number_in_channel, rx_engine);

	usr_regs = (u32 *)(lro->bar[lro->user_bar_idx]);
	irq_regs = (struct interrupt_regs *)(lro->bar[lro->config_bar_idx] + XDMA_OFS_INT_CTRL);

	// pr_info(DRV_NAME "sm1_dma ssx_cmd %d \n", ssx_cmd);
	write_register(ssx_cmd, usr_regs + (0x00008004 >> 2));

	/* still good and anything left to transfer? */
	while ((res == 0) && (inputLength > 0))
	{
		/* DMA transfer size, multiple if necessary */
		if (inputLength > XDMA_TRANSFER_MAX_BYTES)
			transfer_len = XDMA_TRANSFER_MAX_BYTES;
		else
			transfer_len = inputLength;

		// pr_info(DRV_NAME "sm1_dma inputLength = %ld", inputLength);
		// pr_info(DRV_NAME "sm1_dma outputLength = %ld", outputLength);
		write_register(inputLength, usr_regs + (0x00008010 >> 2));
		write_register(outputLength, usr_regs + (0x00008014 >> 2));
		

		/* build device-specific descriptor tables */
		tx_engine->transfer = transfer_create(tx_engine, lro, tx_buf, inputLength, 0, tx_engine->dir_to_dev, tx_engine->non_incr_addr, 0, 1);
		// pr_info(DRV_NAME "%s tx_engine channel %d direction %d transfer=0x%p.\n", tx_engine->name, tx_engine->channel, tx_engine->dir_to_dev, tx_engine->transfer);

		rx_engine->transfer = transfer_create(rx_engine, lro, rx_buf, outputLength, 0, rx_engine->dir_to_dev, rx_engine->non_incr_addr, 0, 1);
		// pr_info(DRV_NAME "%s rx_engine channel %d direction %d transfer=0x%p.\n", rx_engine->name, rx_engine->channel, rx_engine->dir_to_dev, rx_engine->transfer);

		if ((!tx_engine->transfer) || (!rx_engine->transfer)) {
			inputLength = 0;
			res = -EIO;
			break;
		}

		// transfer_dump(tx_engine->transfer);
		// transfer_dump(rx_engine->transfer);

		/* last transfer for the given request? */
		if (transfer_len >= inputLength) {
			tx_engine->transfer->last_in_request = 1;
			rx_engine->transfer->last_in_request = 1;
			tx_engine->transfer->size_of_request = done + transfer_len;
			rx_engine->transfer->size_of_request = done + transfer_len;
		}

		sm1_transfer_queue(tx_engine, tx_engine->transfer);  // 0920

		sm1_transfer_queue(rx_engine, rx_engine->transfer);	 // 0920

		res = sm1_transfer_monitor(tx_engine, rx_engine, rx_engine->transfer);

		/* transfer was taken off the engine? */
		if (rx_engine->transfer->state != TRANSFER_STATE_SUBMITTED)
		{
			/* transfer failed? */
			if (rx_engine->transfer->state != TRANSFER_STATE_COMPLETED) {
				pr_info(DRV_NAME "transfer %p failed\n", rx_engine->transfer);
				res = -EIO;
			}
			// pr_info(DRV_NAME "%s tx_engine channel %d direction %d transfer=0x%p completed.\n", tx_engine->name, tx_engine->channel, tx_engine->dir_to_dev, tx_engine->transfer);
			// pr_info(DRV_NAME "%s rx_engine channel %d direction %d transfer=0x%p completed.\n", rx_engine->name, rx_engine->channel, rx_engine->dir_to_dev, rx_engine->transfer);
			transfer_destroy(lro, tx_engine->transfer);
			transfer_destroy(lro, rx_engine->transfer);
			tx_engine->transfer = NULL;
			rx_engine->transfer = NULL;
			/* interrupted by a signal / polling detected error */
		}
		else if (res != 0)
		{
			/* transfer can still be in-flight */
			engine_status_read(tx_engine, 0);
			engine_status_read(rx_engine, 0);
			read_interrupts(lro);

			res = -ERESTARTSYS;
		}

		/* If an error has occurred, clear counts tracking progress */
		if (res != 0)
		{
			transfer_len = 0;
			inputLength = 0;
		}

		/* calculate the next transfer */
		tx_buf += transfer_len;
		rx_buf += transfer_len;
		inputLength -= transfer_len;
		done += transfer_len;
		
		// pr_info(DRV_NAME "remain=%lld, done=%lld\n", (s64)inputLength, (s64)done);
	}

	/* return error or else number of bytes */
	// pr_info(DRV_NAME "res = %x \n", (unsigned int)res);
	res = res ? res : done;

	return res;
}


static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, size_t count, int dir_to_dev)
{
	ssize_t res = 0;
	struct xdma_dev *lro;
	struct xdma_engine *engine;

	// /* fetch device specific data stored earlier during open */
	lro = (struct xdma_dev *)file->private_data;
	BUG_ON(!lro);
	BUG_ON(lro->magic != MAGIC_DEVICE);

	if (dir_to_dev)
		engine = lro->engine[1][DIR_H2C];
	else
		engine = lro->engine[1][DIR_C2H];

	/* XXX detect non-supported directions XXX */
	BUG_ON(!engine);
	BUG_ON(engine->magic != MAGIC_ENGINE);


	// pr_info(DRV_NAME "%s engine channel %d (engine num %d)= 0x%p\n", engine->name, engine->channel, engine->number_in_channel, engine);
	// pr_info(DRV_NAME "lro = 0x%p\n", lro);

	/* data direction does not match engine? */
	if (dir_to_dev != engine->dir_to_dev)
	{
		if (dir_to_dev)
			pr_info(DRV_NAME "FAILURE: Cannot write to C2H engine.\n");
		else
			pr_info(DRV_NAME "FAILURE: Cannot read from H2C engine.\n");

		return -EINVAL;
	}


	// pr_info(DRV_NAME "res = %ld, remaining = %ld\n", res, count);

	// pr_info(DRV_NAME "before transfer_data engine->lock = %x\n", engine->lock);
	res = transfer_data(engine, (char *)buf, count);
	// pr_info(DRV_NAME "char_sgdma_read_write() return=%lld.\n", (s64)res);

	return res;
}

static long decd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct xdma_dev *lro;
	struct decd_ioctl_ctx *nfhe_km;
	struct Write_Usr_ioctl_ctx *nfhe_write_usr;
	struct sm1_ioctl_ctx nfhe_sm1;
	
	u32 *usr_regs;
	int rc = 0, i;
	unsigned char *buf;
	u32 *kmbuf;
	unsigned char outbuf[128];
	unsigned int plaindata = 0;
	unsigned int value;

	// pr_info(DRV_NAME "Entering decd_ioctl \n");

	/* fetch device specific data stored earlier during open */
	lro = (struct xdma_dev *)file->private_data;
	BUG_ON(!lro);
	BUG_ON(lro->magic != MAGIC_DEVICE);

	//mutex_lock(&lro->decd_ioctl_mutex);

	usr_regs = (u32 *)(lro->bar[lro->user_bar_idx]);

	switch (cmd)
	{
	case IOCTL_RESET:
		write_register(0, usr_regs + (0x00008050 >> 2));
		write_register(1, usr_regs + (0x00008050 >> 2));
		break;

	case IOCTL_GETCARDNUM:
		if (  (rc = put_user(cardId, (unsigned int __user *)arg)) ) {
			printk("Failed to copy user space data to kernel space.\n");
			return -EINVAL;
		}

		break;

	case IOCTL_WRITE_USR:
		// pr_info(DRV_NAME "Entering IOCTL_WRITE_USR \n");
		nfhe_write_usr = kzalloc(sizeof(struct Write_Usr_ioctl_ctx), GFP_KERNEL);
		rc = copy_from_user(nfhe_write_usr, (struct Write_Usr_ioctl_ctx *)arg, sizeof(struct Write_Usr_ioctl_ctx));
		// pr_info(DRV_NAME "Input addr = %d, data = %d\n", nfhe_write_usr->addr, nfhe_write_usr->data);
		write_register(nfhe_write_usr->data, usr_regs + (nfhe_write_usr->addr >> 2));
		if (nfhe_write_usr)
			kfree(nfhe_write_usr);
		break;

	case IOCTL_FHE_GENKEY:
		// pr_info(DRV_NAME "Entering IOCTL_FHE_GENKEY \n");
		write_register(1, usr_regs + (0x00000000 >> 2));
		while ((read_register(usr_regs + (0x00000100 >> 2)) & 0x01) == 0x00);
		write_register(0, usr_regs + (0x00000000 >> 2));
		read_register(usr_regs + (0x00000100 >> 2));
		read_register(usr_regs + (0x00000100 >> 2));
		break;
	
	case IOCTL_NFHE_EXPORT_PUBKEY:
		// pr_info(DRV_NAME "Entering IOCTL_NFHE_EXPORT_PUBKEY \n");
		write_register(0, usr_regs + (0x00008008>>2));
		write_register(FHE_PUBKEY_BYTES, usr_regs + (0x00008024>>2));

		rc = char_sgdma_read_write(file, (char __user *)arg, FHE_PUBKEY_BYTES, 0);
		if(rc != FHE_PUBKEY_BYTES)
			return -EINVAL;

		break;

	case IOCTL_NFHE_IMPORT_PUBKEY:
		// pr_info(DRV_NAME "Entering IOCTL_NFHE_IMPORT_PUBKEY \n");
		write_register(0, usr_regs + (0x00008008>>2));
		write_register(4, usr_regs + (0x00000000>>2));

		rc = char_sgdma_read_write(file, (char __user *)arg, FHE_PUBKEY_BYTES, 1);
		if(rc != FHE_PUBKEY_BYTES)
			return -EINVAL;

		break;

	case IOCTL_FHE_ENC:
		// pr_info(DRV_NAME "Entering IOCTL_FHE_ENC \n");
		
		write_register(0b00000, usr_regs + (0x80FC>>2));
		write_register(16, usr_regs + (0x8310>>2));

		
		if ( (rc = get_user(plaindata, (unsigned int __user *)arg)) ) {
			printk("Failed to copy user space data to kernel space.\n");
			return -EINVAL;
		}

		pr_info(DRV_NAME "%u\n", plaindata);
		write_register(plaindata, usr_regs + (0x00000014>>2));
		write_register(0x0001, usr_regs + (0x00008008>>2));
		write_register(0x0008, usr_regs + (0x00000000>>2));
		// pr_info(DRV_NAME "before while\n");
		while ((read_register(usr_regs + (0x00000100>>2)) & 0x40) == 0x00);
		// pr_info(DRV_NAME "after while\n");
		write_register(0x0000, usr_regs + (0x00000000>>2));
		write_register(FHE_CYPHER_BYTES, usr_regs + (0x00008024>>2));

		rc = char_sgdma_read_write(file, (char __user *)arg, FHE_CYPHER_BYTES, 0);

		break;
	
	case IOCTL_FHE_DEC:
		
		// pr_info(DRV_NAME "Entering IOCTL_FHE_DEC \n");

		write_register(0b00000, usr_regs + (0x80FC>>2));
		write_register(16, usr_regs + (0x8310>>2));
		
		write_register(0x0001, usr_regs + (0x00008008>>2));		//Choose dma_1 mux channel

		rc = char_sgdma_read_write(file, (char __user *)arg, FHE_CYPHER_BYTES, 1);

		write_register(0x0001, usr_regs + (0x00008008>>2));
		write_register(0x0010, usr_regs + (0x00000000>>2));
		while ((read_register(usr_regs + (0x00000100>>2)) & 0x100) == 0);
		write_register(0x0000, usr_regs + (0x00000000>>2));

		value = read_register(usr_regs + (0x00000114>>2));
		pr_info(DRV_NAME "IOCTL_FHE_DEC %u\n", value);
		if (  (rc = put_user(value, (unsigned int __user *)arg)) ) {
			printk("Failed to copy user space data to kernel space.\n");
			return -EINVAL;
		}

		break;

	case IOCTL_DECD:
		// pr_info(DRV_NAME "Entering IOCTL_DECD \n");
		nfhe_km = kzalloc(sizeof(struct decd_ioctl_ctx), GFP_KERNEL);
		rc = copy_from_user(nfhe_km, (struct decd_ioctl_ctx *)arg, sizeof(struct decd_ioctl_ctx));

		kmbuf = kzalloc(max(nfhe_km->inLen, nfhe_km->outLen), GFP_KERNEL);
		if (!kmbuf)
		{
			pr_info(DRV_NAME "Failed to allocate kernel space for buffer.\n");
			return -EINVAL;
		}

		rc = copy_from_user(kmbuf, (u8 *)(nfhe_km->pDataBuf), nfhe_km->inLen);
		if (rc < 0)
		{
			pr_info(DRV_NAME "Failed to copy from user space 0x%lx\n", arg);
			return -EINVAL;
		}

		// pr_info(DRV_NAME "nfhe_km->inLen %ld\n", nfhe_km->inLen);
		// for (i = 0; i < nfhe_km->inLen / 4; i++)
		// 	pr_info(DRV_NAME "Input kmbuf[%d] = 0x%08x\n", i, kmbuf[i]);

		for (i = 0; i < nfhe_km->inLen / 4; i++)
			write_register(kmbuf[i], usr_regs + (0x00008038 >> 2));
		write_register(nfhe_km->inLen, usr_regs + (0x00008030 >> 2));

		while ((read_register(usr_regs + (0x00008044 >> 2)) & 0x1) == 0);

		// pr_info(DRV_NAME "nfhe_km->outLen %ld\n", nfhe_km->outLen);
		for (i = 0; i < nfhe_km->outLen / 4; i++)
		{
			kmbuf[i] = read_register(usr_regs + (0x0000803C >> 2));
			// pr_info(DRV_NAME "Output kmbuf[%d] = 0x%08x\n", i, kmbuf[i]);
		}

		rc = copy_to_user((unsigned char *)nfhe_km->pDataBuf, kmbuf, nfhe_km->outLen);
		if (rc < 0)
		{
			pr_info(DRV_NAME "Failed to copy to user space 0x%lx\n", arg);
			return -EINVAL;
		}

		if (kmbuf)
			kfree(kmbuf);

		if (nfhe_km)
			kfree(nfhe_km);

		break;

	case IOCTL_NFHE_DMA_SM1:
		// pr_info(DRV_NAME "Entering IOCTL_NFHE_DMA_SM1 \n");

		if ((rc = copy_from_user(&nfhe_sm1, (struct sm1_ioctl_ctx *)arg, sizeof(struct sm1_ioctl_ctx))) < 0)
		{ //??09/07/20, 1. should it call copy_from_user here?
			//2. nfhe_sm_ioctl -> sm1_ioctl_ctx ??
			dbg_init("Failed to copy user space data to kernel space.\n");
			return -EINVAL;
		}


		if ((rc = sm1_dma(lro, nfhe_sm1.inbuf, nfhe_sm1.outbuf, nfhe_sm1.inputLength, nfhe_sm1.outputLength, nfhe_sm1.ssx_cmd)) < 0)
		{
			dbg_init("Failed to copy to run sm1_dma 0x%lx\n", arg);
			return -EINVAL;
		}
		else
			dbg_init("Successed to copy to run sm1_dma 0x%lx\n", arg);
		break;


	case IOCTL_NFHE_TEST:
		// pr_info(DRV_NAME "Entering IOCTL_NFHE_TEST \n");
		nfhe_km = kzalloc(sizeof(struct decd_ioctl_ctx), GFP_KERNEL);
		rc = copy_from_user(nfhe_km, (struct decd_ioctl_ctx *)arg, sizeof(struct decd_ioctl_ctx));

		buf = kzalloc(max(nfhe_km->inLen, nfhe_km->outLen), GFP_KERNEL);
		if (!buf)
		{
			pr_info(DRV_NAME "Failed to allocate kernel space for buffer.\n");
			return -EINVAL;
		}

		rc = copy_from_user(buf, (unsigned char *)(nfhe_km->pDataBuf), nfhe_km->inLen);
		if (rc < 0)
		{
			pr_info(DRV_NAME "Failed to copy from user space 0x%lx\n", arg);
			return -EINVAL;
		}

		pr_info(DRV_NAME "nfhe_km->inLen 0x%lx\n", nfhe_km->inLen);
		for (i = 0; i < nfhe_km->inLen; i++)
		{
			pr_info(DRV_NAME "buf[%d] = 0x%x\n", i, buf[i]);
			outbuf[i] = buf[nfhe_km->inLen - 1 - i];
		}

		rc = copy_to_user((unsigned char *)nfhe_km->pDataBuf, outbuf, sizeof(struct decd_ioctl_ctx));
		if (rc < 0)
		{
			pr_info(DRV_NAME "Failed to copy to user space 0x%lx\n", arg);
			return -EINVAL;
		}

		if (buf)
			kfree(buf);

		if (nfhe_km)
			kfree(nfhe_km);

		break;

	default:
		// dbg_perf("Unsupported operation\n");
		rc = -EINVAL;
		break;
	}

	//mutex_unlock(&lro->decd_ioctl_mutex);
	// pr_info(DRV_NAME "decd_ioctl(), returns: %d\n", rc);
	return rc;
}

/* sg_write() -- Write to the device
 *
 * @buf userspace buffer
 * @count number of bytes in the userspace buffer
 * @pos byte-address in device
 */
static ssize_t char_sgdma_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
	int rv;
	char *kernel_buf;

    kernel_buf = kmalloc(count, GFP_KERNEL);
    if (!kernel_buf) {
        return -ENOMEM;
    }

    if (copy_from_user(kernel_buf, buf, count)) {
        kfree(kernel_buf);
        return -EFAULT;
    }
	
	rv = char_sgdma_read_write(file, kernel_buf, count, 1);

	kfree(kernel_buf);

	return rv;
}

/* char_sgdma_read() - Read from the device
 *
 * @buf userspace buffer
 * @count number of bytes in the userspace buffer
 *
 * Iterate over the userspace buffer, taking at most 255 * PAGE_SIZE bytes for
 * each DMA transfer.
 *
 * For each transfer, get the user pages, build a sglist, map, build a
 * descriptor table, submit the transfer, wait for the interrupt handler
 * to wake us on completion, free the sglist and descriptors.
 */
static ssize_t char_sgdma_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
	int rv;
	char *kernel_buf;

	kernel_buf = kmalloc(count, GFP_KERNEL);
	if (!kernel_buf) {
        return -ENOMEM;
    }

	rv = char_sgdma_read_write(file, kernel_buf, count, 0);

    if (copy_to_user(buf, kernel_buf, count)) {
        kfree(kernel_buf);
        return -EFAULT;
    }

	kfree(kernel_buf);

	return rv;
}

/*
 * Called when the device goes from unused to used.
 */
static int char_sgdma_open(struct inode *inode, struct file *file)
{
	int rc = 0;
	struct xdma_dev *lro;

	/* pointer to containing structure of the character device inode */
	pr_info(DRV_NAME "Entering char_sgdma_open\n");
	lro = container_of(inode->i_cdev, struct xdma_dev, cdev);
	if (lro->magic != MAGIC_DEVICE) {
		pr_err("xcdev 0x%p inode 0x%lx magic mismatch 0x%lx\n", lro, inode->i_ino, lro->magic);
		return -EINVAL;
	}

	/* create a reference to our char device in the opened file */
	file->private_data = lro;

	// pr_info(DRV_NAME "char_sgdma_open(0x%p, 0x%p)\n", inode, file);

	/* AXI ST C2H? Set up RX ring buffer on host with a cyclic transfer */
	// if (engine->streaming && !engine->dir_to_dev)
	// 	rc = cyclic_transfer_setup(engine);
	return rc;
}

/*
 * Called when the device goes from used to unused.
 */
static int char_sgdma_close(struct inode *inode, struct file *file)
{
	struct xdma_dev *lro = (struct xdma_dev *)file->private_data;
	struct xdma_engine *engine;
	int rc = 0;

	BUG_ON(!lro);
	BUG_ON(lro->magic != MAGIC_DEVICE);

	/* fetch device specific data stored earlier during open */
	engine = (struct xdma_engine *)lro->engine;
	// BUG_ON(!engine);
	// BUG_ON(engine->magic != MAGIC_ENGINE);

	pr_info(DRV_NAME "char_sgdma_close(0x%p, 0x%p)\n", inode, file);

	// if (engine->streaming && !engine->dir_to_dev)
	// 	rc = cyclic_transfer_teardown(engine);

	return rc;
}

static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev)
{
	int i;
	struct xdma_dev *lro;

	BUG_ON(!pdev);

	/* allocate zeroed device book keeping structure */
	lro = kzalloc(sizeof(struct xdma_dev), GFP_KERNEL);
	if (!lro)
	{
		pr_info(DRV_NAME "Could not kzalloc(xdma_dev).\n");
		return NULL;
	}

	lro->magic = MAGIC_DEVICE;
	lro->config_bar_idx = -1;
	lro->user_bar_idx = -1;
	lro->bypass_bar_idx = -1;
	lro->irq_line = -1;

	/* create a device to driver reference */
	dev_set_drvdata(&pdev->dev, lro);
	/* create a driver to device reference */
	lro->pci_dev = pdev;
	pr_info(DRV_NAME "probe() lro = 0x%p\n", lro);

	/* Set up data user IRQ data structures */
	for (i = 0; i < MAX_USER_IRQ; i++)
	{
		lro->user_irq[i].lro = lro;
		spin_lock_init(&lro->user_irq[i].events_lock);
		init_waitqueue_head(&lro->user_irq[i].events_wq);
	}

	return lro;
}

static int request_regions(struct xdma_dev *lro, struct pci_dev *pdev)
{
	int rc;

	BUG_ON(!lro);
	BUG_ON(!pdev);

	pr_info(DRV_NAME "pci_request_regions()\n");
	rc = pci_request_regions(pdev, DRV_NAME);
	/* could not request all regions? */
	if (rc)
	{
		pr_info(DRV_NAME "pci_request_regions() = %d, device in use?\n", rc);
		/* assume device is in use so do not disable it later */
		lro->regions_in_use = 1;
	}
	else
	{
		lro->got_regions = 1;
	}

	return rc;
}

static int set_dma_mask(struct pci_dev *pdev)
{
	int rc = 0;

	BUG_ON(!pdev);

	pr_info(DRV_NAME "sizeof(dma_addr_t) == %ld\n", sizeof(dma_addr_t));
	/* 64-bit addressing capability for XDMA? */

	#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
		if (!dma_set_mask(&(pdev->dev), DMA_BIT_MASK(64)))
		{
			/* query for DMA transfer */
			/* @see Documentation/DMA-mapping.txt */
			pr_info(DRV_NAME "pci_set_dma_mask()\n");
			/* use 64-bit DMA */
			pr_info(DRV_NAME "Using a 64-bit DMA mask.\n");
			/* use 32-bit DMA for descriptors */
			dma_set_coherent_mask(&(pdev->dev), DMA_BIT_MASK(32));
			/* use 64-bit DMA, 32-bit for consistent */
		}
		else if (!dma_set_mask(&(pdev->dev), DMA_BIT_MASK(32)))
		{
			pr_info(DRV_NAME "Could not set 64-bit DMA mask.\n");
			dma_set_coherent_mask(&(pdev->dev), DMA_BIT_MASK(32));
			/* use 32-bit DMA */
			pr_info(DRV_NAME "Using a 32-bit DMA mask.\n");
		}
		else
		{
			pr_info(DRV_NAME "No suitable DMA possible.\n");
			rc = -1;
		}
	#else
		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
		{
			/* query for DMA transfer */
			/* @see Documentation/DMA-mapping.txt */
			pr_info(DRV_NAME "pci_set_dma_mask()\n");
			/* use 64-bit DMA */
			pr_info(DRV_NAME "Using a 64-bit DMA mask.\n");
			/* use 32-bit DMA for descriptors */
			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
			/* use 64-bit DMA, 32-bit for consistent */
		}
		else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
		{
			pr_info(DRV_NAME "Could not set 64-bit DMA mask.\n");
			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
			/* use 32-bit DMA */
			pr_info(DRV_NAME "Using a 32-bit DMA mask.\n");
		}
		else
		{
			pr_info(DRV_NAME "No suitable DMA possible.\n");
			rc = -1;
		}
	#endif


	

	return rc;
}

static void irq_teardown(struct xdma_dev *lro)
{
	BUG_ON(!lro);

	if (lro->irq_line != -1)
	{
		pr_info(DRV_NAME "Releasing IRQ#%d\n", lro->irq_line);
		free_irq(lro->irq_line, lro);
	}
}

static int irqId = 0;

static int irq_setup(struct xdma_dev *lro, struct pci_dev *pdev)
{
	int rc = 0;
	u8 val;
	void *reg;
	u32 w;

	BUG_ON(!lro);

	pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &val);
	pr_info(DRV_NAME "Legacy Interrupt register value = %d\n", val);
	if (val > 1)
	{
		val--;
		w = (val << 24) | (val << 16) | (val << 8) | val;
		// Program IRQ Block Channel vactor and IRQ Block User vector with Legacy interrupt value
		reg = lro->bar[lro->config_bar_idx] + 0x2080; // IRQ user
		write_register(w, reg);
		write_register(w, reg + 0x4);
		write_register(w, reg + 0x8);
		write_register(w, reg + 0xC);
		reg = lro->bar[lro->config_bar_idx] + 0x20A0; // IRQ Block
		write_register(w, reg);
		write_register(w, reg + 0x4);
	}

	lro->irq_line = (int)pdev->irq;

	rc = request_irq(pdev->irq, xdma_isr, IRQF_SHARED, cdev_name[irqId++], lro);
	if (rc)
		pr_info(DRV_NAME "Couldn't use IRQ#%d, rc=%d\n", pdev->irq, rc);
	else
		pr_info(DRV_NAME "Using IRQ#%d with 0x%p\n", pdev->irq, lro);

	return rc;
}

static void enable_credit_feature(struct xdma_dev *lro)
{
	u32 w;
	struct sgdma_common_regs *reg = (struct sgdma_common_regs *)(lro->bar[lro->config_bar_idx] + (0x6 * TARGET_SPACING));
	pr_info(DRV_NAME "credit_feature_enable addr = %p", &reg->credit_feature_enable);

	w = 0xF0000U;
	write_register(w, &reg->credit_feature_enable);
}

static u32 get_engine_type(struct engine_regs *regs)
{
	u32 value;

	BUG_ON(!regs);

	value = read_register(&regs->identifier);
	return (value & 0x8000U) ? 1 : 0;
}

static u32 get_engine_channel_id(struct engine_regs *regs)
{
	u32 value;

	BUG_ON(!regs);

	value = read_register(&regs->identifier);

	return (value & 0x00000f00U) >> 8;
}

static u32 get_engine_id(struct engine_regs *regs)
{
	u32 value;

	BUG_ON(!regs);

	value = read_register(&regs->identifier);
	return (value & 0xffff0000U) >> 16;
}

static void remove_engines(struct xdma_dev *lro)
{
	int channel;
	struct xdma_engine *engine;

	BUG_ON(!lro);

	/* iterate over channels */
	for (channel = 0; channel < XDMA_CHANNEL_NUM_MAX; channel++)
	{
		engine = lro->engine[channel][0];
		if (engine)
		{
			dbg_sg("Remove %s%d", engine->name, channel);
			engine_destroy(lro, engine);
			dbg_sg("%s%d removed", engine->name, channel);
		}

		engine = lro->engine[channel][1];
		if (engine)
		{
			dbg_sg("Remove %s%d", engine->name, channel);
			engine_destroy(lro, engine);
			dbg_sg("%s%d removed", engine->name, channel);
		}
	}
}

static void destroy_interfaces(struct xdma_dev *lro)
{
	BUG_ON(!lro);
	BUG_ON(lro->magic != MAGIC_DEVICE);

	pr_info("Entering destroy_sg_char.\n");

	if (g_xdma_class)
	{
		device_destroy(g_xdma_class, lro->cdevno);
		// pr_info("class_destroy.\n");
		// class_destroy(g_xdma_class);
	}
	cdev_del(&lro->cdev);
	unregister_chrdev_region(lro->cdevno, 1);

	return;
}

static int probe_for_engine(struct xdma_dev *lro, int dir_to_dev, int channel)
{
	struct engine_regs *regs;
	int dir_from_dev;
	int offset;
	u32 engine_id;
	u32 engine_id_expected;
	u32 channel_id;
	int rc = 0;

	dir_from_dev = !dir_to_dev;

	/* register offset for the engine */
	/* read channels at 0x0000, write channels at 0x1000,
	 * channels at 0x100 interval */
	// H2C_CHANNEL_OFFSET 0x1000    CHANNEL_SPACING 0x100
	offset = (dir_from_dev * H2C_CHANNEL_OFFSET) + (channel * CHANNEL_SPACING);

	regs = lro->bar[lro->config_bar_idx] + offset;
	if (dir_to_dev)
	{
		pr_info(DRV_NAME "Probing for H2C %d engine at %p\n", channel, regs);
		engine_id_expected = XDMA_ID_H2C;
	}
	else
	{
		pr_info(DRV_NAME "Probing for C2H %d engine at %p\n", channel, regs);
		engine_id_expected = XDMA_ID_C2H;
	}

	engine_id = get_engine_id(regs);
	channel_id = get_engine_channel_id(regs);
	pr_info(DRV_NAME "engine ID = 0x%x\n", engine_id);
	pr_info(DRV_NAME "engine channel ID = 0x%x\n", channel_id);

	if (engine_id != engine_id_expected)
	{
		pr_info(DRV_NAME "Incorrect engine ID - skipping\n");
		return 0;
	}

	if (channel_id != channel)
	{
		pr_info(DRV_NAME "Expected ch ID%d, read %d\n", channel, channel_id);
		return 0;
	}

	if (dir_to_dev)
		pr_info(DRV_NAME "Found H2C %d AXI engine at %p\n", channel, regs);
	else
		pr_info(DRV_NAME "Found C2H %d AXI engine at %p\n", channel, regs);

	/* allocate and initialize engine */
	lro->engine[channel][dir_from_dev] = engine_create(lro, offset, dir_to_dev, channel);

	if (!lro->engine[channel][dir_from_dev])
	{
		pr_info(DRV_NAME "Error creating channel\n");
		rc = -1;
	}

	return rc;
}

static int probe_engines(struct xdma_dev *lro)
{
	int channel;
	int rc = 0;

	BUG_ON(!lro);

	/* iterate over channels */
	for (channel = 0; channel < XDMA_CHANNEL_NUM_MAX; channel++)
	{
		rc = probe_for_engine(lro, 1, channel);
		if (rc)
			goto fail;
	}

	for (channel = 0; channel < XDMA_CHANNEL_NUM_MAX; channel++)
	{
		rc = probe_for_engine(lro, 0, channel);
		if (rc)
			break;
	}

	return 0;

fail:
	pr_info(DRV_NAME "Engine probing failed - unwinding\n");
	remove_engines(lro);

	return -1;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
#else
static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
	u16 v;
	int pos;

	pos = pci_pcie_cap(dev);
	if (pos > 0)
	{
		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
		v |= PCI_EXP_DEVCTL_RELAX_EN;
		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
	}
}
#endif

static void pcie_check_extended_tag(struct xdma_dev *lro, struct pci_dev *pdev)
{
	u16 cap;
	u32 v;
	void *__iomem reg;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
	pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
#else
	int pos;

	pos = pci_pcie_cap(pdev);
	if (pos > 0)
		pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &cap);
	else
	{
		pr_info("pdev 0x%p, unable to access pcie cap.\n", pdev);
		return;
	}
#endif

	if ((cap & PCI_EXP_DEVCTL_EXT_TAG))
		return;

	/* extended tag not enabled */
	pr_info("0x%p EXT_TAG disabled.\n", pdev);

	if (lro->config_bar_idx < 0)
	{
		pr_info("pdev 0x%p, config bar UNKNOWN.\n", pdev);
		return;
	}

	reg = lro->bar[lro->config_bar_idx] + XDMA_OFS_CONFIG + 0x4C;
	v = read_register(reg);
	v = (v & 0xFF) | (((u32)32) << 8);
	write_register(v, reg);
}

static int setup_chrdev(struct xdma_dev *lro)
{
	int result;

	//动态分配设备号
	result = alloc_chrdev_region(&lro->cdevno, 0, 1, cdev_name[cardId]);
	if (result < 0)
	{
		pr_info(DRV_NAME "Err:failed in alloc_chrdev_region!\n");
		return result;
	}

	device_create(g_xdma_class, &lro->pci_dev->dev, lro->cdevno, NULL, cdev_name[cardId]);

	cdev_init(&lro->cdev, &fileOps);
	lro->cdev.owner = THIS_MODULE;
	lro->cdev.ops = &fileOps; //Create Dev and file_operations Connected
	result = cdev_add(&lro->cdev, lro->cdevno, 1);

	cardId++;
	return result;
}

static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	int rc = 0;
	struct xdma_dev *lro = NULL;
	void *reg = NULL;
	u32 w;
	int stream;

	pr_info(DRV_NAME "probe(pdev = 0x%p, pci_id = 0x%p)\n", pdev, id);

	/* allocate zeroed device book keeping structure */
	lro = alloc_dev_instance(pdev);
	if (!lro)
	{
		pr_info("%s: OOM.\n", __func__);
		return -ENOMEM;
	}

	rc = pci_enable_device(pdev);
	if (rc)
	{
		pr_info(DRV_NAME "pci_enable_device() failed, rc = %d.\n", rc);
		goto free_alloc;
	}

	/* enable relaxed ordering */
	enable_pcie_relaxed_ordering(pdev);

	/* enable bus master capability */
	pr_info(DRV_NAME "pci_set_master()\n");
	pci_set_master(pdev);

	rc = request_regions(lro, pdev);
	if (rc)
		goto rel_region;

	rc = map_bars(lro, pdev);
	if (rc)
		goto unmap_bar;

	pcie_check_extended_tag(lro, pdev);

	rc = set_dma_mask(pdev);
	if (rc)
		goto unmap_bar;

	rc = irq_setup(lro, pdev);
	if (rc)
		goto disable_irq;

	/* enable credit system only in AXI-Stream mode*/
	reg = lro->bar[lro->config_bar_idx];
	w = read_register(reg);
	stream = (w & 0x8000U) ? 1 : 0;
	if (enable_credit_mp & stream)
	{
		pr_info(DRV_NAME KERN_DEBUG "Design in Steaming mode enable Credit feature \n");
		enable_credit_feature(lro);
	}

	rc = probe_engines(lro);
	if (rc)
		goto rmv_engine;

	// pr_info(DRV_NAME " engine test\n");
	// engine = &(lro->engine[0][DIR_C2H]);
	// eng = *(engine - 1);
	// pr_info(DRV_NAME "engine_service_work(): sm1_rx_engine name: %s engine.\n", (*engine)->name);
	// pr_info(DRV_NAME "engine_service_work(): sm1_tx_engine name: %s engine.\n", eng->name);

	rc = setup_chrdev(lro);
	pr_info(DRV_NAME " setup_chrdev returns %x\n", rc);
	if (rc)
		goto rmv_interface;

	/* enable user interrupts */
	// user_interrupts_enable(lro, ~0);

	mutex_init(&lro->decd_ioctl_mutex);

	/* If not running in polled mode, enable engine interrupts */
	if (!poll_mode)
		// channel_interrupts_enable(lro, ~0);
		//Modified by ZJH 2019/08/02
		channel_interrupts_enable(lro, 0xfffffffeUL);

	/* Flush writes */
	read_interrupts(lro);

	rc = 0;

	if (rc == 0)
		return 0;

rmv_interface:
	destroy_interfaces(lro);
rmv_engine:
	remove_engines(lro);
disable_irq:
	irq_teardown(lro);
unmap_bar:
	unmap_bars(lro, pdev);
rel_region:
	if (lro->got_regions)
	{
		pci_release_regions(pdev);
		lro->got_regions = 0;
	}

	if (!lro->regions_in_use)
		pci_disable_device(pdev);
free_alloc:
	kfree(lro);

	pr_info(DRV_NAME "probe() returning %d\n", rc);
	return -1;
}

static void remove(struct pci_dev *pdev)
{
	struct xdma_dev *lro;

	dbg_sg("remove(0x%p)\n", pdev);
	if ((pdev == 0) || (dev_get_drvdata(&pdev->dev) == 0))
	{
		dbg_sg("remove(dev = 0x%p) pdev->dev.driver_data = 0x%p\n", pdev, dev_get_drvdata(&pdev->dev));
		return;
	}
	lro = (struct xdma_dev *)dev_get_drvdata(&pdev->dev);
	dbg_sg("remove(dev = 0x%p) where pdev->dev.driver_data = 0x%p\n", pdev, lro);
	if (lro->pci_dev != pdev)
	{
		dbg_sg("pdev->dev.driver_data->pci_dev(0x%lx) != pdev(0x%lx)\n", (unsigned long)lro->pci_dev, (unsigned long)pdev);
	}

	channel_interrupts_disable(lro, ~0);

	read_interrupts(lro);

	remove_engines(lro);
	irq_teardown(lro);
	unmap_bars(lro, pdev);

	if (lro->got_regions)
	{
		pr_info(DRV_NAME KERN_DEBUG "pci_release_regions \n");
		pci_release_regions(pdev);
	}

	if (!lro->regions_in_use)
	{
		pr_info(DRV_NAME KERN_DEBUG "pci_disable_device \n");
		pci_disable_device(pdev);
	}

	destroy_interfaces(lro);

	kfree(lro);
}

static int __init xdma_init(void)
{
	int rc = 0;
	int i;

	pr_info(DRV_NAME " v" DRV_MODULE_VERSION "\n");

	pr_info(DRV_NAME DRV_NAME " init()\n");
	pr_info(DRV_NAME DRV_NAME " built " __DATE__ " " __TIME__ "\n");

	#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
		g_xdma_class = class_create(DRV_NAME);
	#else
		g_xdma_class = class_create(THIS_MODULE, DRV_NAME);
	#endif
	
	if (IS_ERR(g_xdma_class))
	{
		pr_info(DRV_NAME DRV_NAME ": failed to create class");
		rc = -1;
		goto err_class;
	}
	rc = pci_register_driver(&pci_driver);
	pr_info(DRV_NAME " pci_register_driver returns %x\n", rc);

	for (i = 0; i < MAX_XDMA_DEVICES; i++)
	{
		dev_present[i] = 0;
	}
err_class:
	return rc;
}

static void __exit xdma_exit(void)
{
	pr_info(DRV_NAME DRV_NAME " exit()\n");
	/* unregister this driver from the PCI bus driver */

	pci_unregister_driver(&pci_driver);
	if (g_xdma_class)
	{
		pr_info("class_destroy.\n");
		class_destroy(g_xdma_class);
	}

}

module_init(xdma_init);
module_exit(xdma_exit);
