// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved.
 * Generated on: 2020/8/26
 * Function description: PCI MSI interrupt initialization
 */
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/spinlock_types.h>
#include "include/mach/pcie_msi.h"
#include "include/mach/pcie_core.h"
#include "include/mach/pcie_chip.h"

/* locks: read lock & write lock. */
static DEFINE_SPINLOCK(dbi_rlock);
static DEFINE_SPINLOCK(dbi_wlock);
static struct pcie_msi_enhance g_msi_enhance[PCIE_INDEX_MAX];
static uint32_t g_current_index;

/* function declaration */
static int hal_pcie_msi_map(struct irq_domain *domain, uint32_t irq, irq_hw_number_t hwirq);
static int32_t hal_msi_setup_irqs(struct msi_controller *chip,
	struct pci_dev *pdev, int32_t nvec, int32_t type);
static void hal_msi_teardown_irq(struct msi_controller *chip, uint32_t irq);

static const struct irq_domain_ops g_msi_domain_ops = {
	.map = hal_pcie_msi_map,
};

static struct irq_chip g_msi_irq_chip = {
	.name = "PCI-MSI",
	.irq_enable = pci_msi_unmask_irq,
	.irq_disable = pci_msi_mask_irq,
	.irq_mask = pci_msi_mask_irq,
	.irq_unmask = pci_msi_unmask_irq,
};

static struct msi_controller g_msi_ctrl = {
	.setup_irqs = hal_msi_setup_irqs,
	.teardown_irq = hal_msi_teardown_irq,
};

static int hal_pcie_read_from_dbi(const struct pcie_info *info,
	int32_t where, int32_t size, uint32_t *value)
{
	uint32_t v;
	unsigned long flag = 0;
	struct pcie_info *pcie_ctrl = NULL;

	pcie_get_ctrl_info(&pcie_ctrl);
	spin_lock_irqsave(&dbi_rlock, flag);
	set_reg_bit(pcie_ctrl[info->controller - 1].misc_base + PERI_PCIE7_5116, PCIE_BIT_13);

	v = (uint32_t)readl((void *)(pcie_ctrl[info->controller - 1].dbi_base + (where & (~0x3))));
	if (size == 4) {
		*value = v;
	} else if (size == 2) {
		*value = (v >> ((where & 0x3) << 3)) & 0xffff;
	} else if (size == 1) {
		*value = (v >> ((where & 0x3) << 3)) & 0xff;
	} else {
		pr_info("Unknown size for config read operation!\n");
		return -EIO;
	}

	clear_reg_bit(pcie_ctrl[info->controller - 1].misc_base + PERI_PCIE7_5116, PCIE_BIT_13);
	spin_unlock_irqrestore(&dbi_rlock, flag);
	return 0;
}

static int hal_pcie_write_to_dbi(const struct pcie_info *info,
	int32_t where, int32_t size, uint32_t value)
{
	unsigned long flag;
	uint32_t org;
	struct pcie_info *pcie_ctrl = NULL;

	pcie_get_ctrl_info(&pcie_ctrl);
	if (hal_pcie_read_from_dbi(info, where, 4, &org)) {
		pr_info("Cannot read from dbi! 0x%x:0x%x:0x%x!\n", 0, 0, where);
		return -EIO;
	}
	spin_lock_irqsave(&dbi_wlock, flag);
	set_reg_bit(pcie_ctrl[info->controller - 1].misc_base + PERI_PCIE7_5116, PCIE_BIT_13);

	if (size == 4) {
		org = value;
	} else if (size == 2) {
		org &= (~(0xffff << ((where & 0x3) << 3)));
		org |= (value << ((where & 0x3) << 3));
	} else if (size == 1) {
		org &= (~(0xff << ((where & 0x3) << 3)));
		org |= (value << ((where & 0x3) << 3));
	} else {
		pr_info("Unknown size(%d) for read ops\n", size);
		return -EIO;
	}
	writel(org, (void *)(pcie_ctrl[info->controller - 1].dbi_base + (where & (~0x3))));

	/* switch between accessed spaces */
	clear_reg_bit(pcie_ctrl[info->controller - 1].misc_base + PERI_PCIE7_5116, PCIE_BIT_13);
	spin_unlock_irqrestore(&dbi_wlock, flag);
	return 0;
}

static int hal_pcie_msi_map(struct irq_domain *domain, uint32_t irq, irq_hw_number_t hw_irq)
{
	int ret;

	irq_set_chip_and_handler(irq, &g_msi_irq_chip, handle_simple_irq);
	ret = irq_set_chip_data(irq, domain->host_data);
	if (ret != 0)
		pr_info("Failed to set irq chip data.\n");
	irq_clear_status_flags(irq, IRQ_NOREQUEST);
	return 0;
}

static void hal_pcie_clear_msi_irq(const struct pcie_info *info, int irq)
{
	uint32_t val = 0;
	uint32_t res = (irq / BIT_MOVE_32) * PCIE_MSI_EP_SHIFT;
	uint32_t bit = irq % BIT_MOVE_32;

	hal_pcie_read_from_dbi(info, PCIE_MSI_INTR0_ENABLE + res, sizeof(uint32_t), &val);
	val &= ~(1 << bit);
	hal_pcie_write_to_dbi(info, PCIE_MSI_INTR0_ENABLE + res, sizeof(uint32_t), val);
}

static void hal_pcie_set_msi_irq(const struct pcie_info *info, int irq)
{
	uint32_t val = 0;
	uint32_t res = (irq / BIT_MOVE_32) * PCIE_MSI_EP_SHIFT;
	uint32_t bit = irq % BIT_MOVE_32;

	hal_pcie_read_from_dbi(info, PCIE_MSI_INTR0_ENABLE + res, sizeof(uint32_t), &val);
	val |= 1 << bit;
	hal_pcie_write_to_dbi(info, PCIE_MSI_INTR0_ENABLE + res, sizeof(uint32_t), val);

	/* avoid controller trouble */
	hal_pcie_write_to_dbi(info, PCIE_MSI_INTR0_MASK + res, sizeof(uint32_t), val);
	val = 0;
	hal_pcie_write_to_dbi(info, PCIE_MSI_INTR0_MASK + res, sizeof(uint32_t), val);
}

static void hal_clear_irq_range(struct pcie_info *info,
	uint32_t irq_base, uint32_t nvec, uint32_t pos)
{
	uint32_t i;
	pcie_irq_set_msi_desc_off func_irq_set_msi_desc_off = NULL;

	func_irq_set_msi_desc_off = (void *)hw_kallsyms_lookup_name("irq_set_msi_desc_off");
	if (func_irq_set_msi_desc_off == NULL) {
		pr_info("Can not find symbol irq_set_msi_desc_off!\n");
		return;
	}

	for (i = 0; i < nvec; i++) {
		func_irq_set_msi_desc_off(irq_base, i, NULL);
		/* Disable corresponding interrupt on MSI controller */
		hal_pcie_clear_msi_irq(info, pos + i);
	}
	bitmap_release_region(info->msi_irq_in_use, pos, order_base_2(nvec));
}

static int hal_assign_irq(struct pcie_info *info, int nvec, struct msi_desc *desc, int *pos)
{
	int32_t pos0;
	int32_t irq;
	int32_t i;
	pcie_irq_set_msi_desc_off func_irq_set_msi_desc_off = NULL;

	func_irq_set_msi_desc_off = (void *)hw_kallsyms_lookup_name("irq_set_msi_desc_off");
	if (func_irq_set_msi_desc_off == NULL) {
		pr_info("Can not find symbol irq_set_msi_desc_off!\n");
		return -ENOSPC;
	}

	pos0 = bitmap_find_free_region(info->msi_irq_in_use, MAX_MSI_IRQS, order_base_2(nvec));
	*pos = pos0;
	if (pos0 < 0)
		return -ENOSPC;

	irq = irq_find_mapping(info->pcie_irq_domain, pos0);
	if (!irq)
		return -ENOSPC;

	/* enable sub-interrupt */
	for (i = 0; i < nvec; i++) {
		if (func_irq_set_msi_desc_off(irq, i, desc) != 0) {
			hal_clear_irq_range(info, irq, i, pos0);
			return -ENOSPC;
		}
		/* Enable corresponding interrupt in MSI interrupt controller */
		hal_pcie_set_msi_irq(info, pos0 + i);
	}

	desc->nvec_used = nvec;
	desc->msi_attrib.multiple = order_base_2(nvec);
	pr_info("assign irq: %d, pos=%d\n", irq, pos0);
	return irq;
}

static int32_t hal_msi_setup_irqs(struct msi_controller *chip,
	struct pci_dev *pdev, int32_t nvec, int32_t type)
{
	struct pcie_info *info = NULL;
	struct msi_desc *desc = NULL;
	int32_t pos = 0;
	struct msi_msg msg = {0};
	uint64_t msi_target;
	int32_t irq;

	pr_info("msi_setup_irqs nvec:%d, type:%d\n", nvec, type);

	/* MSI-X interrupts are not supported */
	if (type == PCI_CAP_ID_MSIX)
		return -EINVAL;

	info = bus_to_info(pdev->bus->number);
	if (info == NULL) {
		pr_info("Cannot find corresponding controller for appointed device!\n");
		return -ENXIO;
	}

	if (pdev->dev.msi_domain == NULL)
		pdev->dev.msi_domain = info->pcie_irq_domain;

	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
	irq = hal_assign_irq(info, nvec, desc, &pos);
	if (irq < 0)
		return irq;

	msi_target = virt_to_phys((void *)info->msi_data);
	msg.address_lo = (uint32_t)(msi_target & BITMASK_LOW32);
	msg.address_hi = (uint32_t)((msi_target >> BIT_MOVE_32) & BITMASK_LOW32);
	msg.data = pos;

	pr_info("msg.address_lo =%#x msg.data=%u\n", msg.address_lo, msg.data);
	pci_write_msi_msg(irq, &msg);
	return 0;
}

static void hal_msi_teardown_irq(struct msi_controller *chip, uint32_t irq)
{
	struct irq_data *data = irq_get_irq_data(irq);
	struct msi_desc *msi = irq_data_get_msi_desc(data);
	struct pci_dev *dev = msi_desc_to_pci_dev(msi);
	struct pcie_info *info = bus_to_info(dev->bus->number);

	hal_clear_irq_range(info, irq, 1, data->hwirq);
}

static uint32_t hal_msi_get_irq_map(struct device_node *np,
	uint32_t *irq_map, uint32_t map_length, uint32_t *map_cnt)
{
	uint32_t irq_len = 0;
	uint32_t index;
	uint32_t irq_cnt;
	struct irq_np *irq_list = (struct irq_np *)of_get_property(np, "interrupts", &irq_len);

	if (irq_list == NULL) {
		pr_info("get [interrupts] fail\n");
		return -EIO;
	}
	irq_cnt = irq_len / sizeof(struct irq_np);

	irq_cnt = irq_cnt < map_length ? irq_cnt : map_length;
	for (index = 0; index < irq_cnt; index++, irq_list++)
		irq_map[index] = be32_to_cpu(irq_list->irq) + IRQ_OFFSET;

	*map_cnt = irq_cnt;

	return 0;
}

uint32_t hal_msi_get_irq_num(uint32_t phy_num, uint32_t *irq_num)
{
	uint32_t ret;
	uint32_t map_cnt = 0;
	uint32_t irq_map[MAX_IRQS] = {0};
	uint32_t logical_num = INVALID_IRQ;
	uint32_t i;
	struct device_node *np = NULL;

	np = of_find_compatible_node(NULL, NULL, "hw,sd51xx-irqs");
	if (np == NULL) {
		pr_info("get [hw,sd51xx-irqs] fail\n");
		return -EIO;
	}

	ret = hal_msi_get_irq_map(np, irq_map, MAX_IRQS, &map_cnt);
	if (ret != HW_RET_SUCCESS) {
		pr_info("hal_msi_get_irq_map fail\n");
		return ret;
	}

	for (i = 0; i < map_cnt; i++) {
		if (phy_num == irq_map[i]) {
			logical_num = irq_of_parse_and_map(np, i);
			break;
		}
	}
	logical_num = ((logical_num == INVALID_IRQ) ? 0 : logical_num);
	*irq_num = logical_num;
	pr_info("pcie msi irp_num= %d !\n", logical_num);

	return 0;
}

/* hardware timer */
void hal_msi_hardware_proc(void *args)
{
	uint32_t index;
	uint32_t irq;
	uint32_t basic_irq;

	index = g_current_index;
	irq = g_msi_enhance[index].wait_proc_irq;
	basic_irq = g_msi_enhance[index].basic_irq;

	if (args != NULL) {
		index = *(uint32_t *)args;
		kfree(args);
	}

	/* verify the validity of irq */
	if ((irq < basic_irq) || (irq >= (basic_irq + MAX_MSI_IRQS)))
		return;

	generic_handle_irq(irq);
	g_msi_enhance[index].wait_proc_irq = 0;
}

#define HAL_MSI_TIMER_MAX   8

/* hardware timer */
void hal_msi_affinity_enhance(struct pcie_info *info, uint32_t irq)
{
	uint32_t ret;
	uint32_t timer_id;
	uint32_t *index;
	uint32_t pcie_index;
	struct dev_drv_func *dev_fun;
	struct hardware_timer_attr st_attr = {
		.enable = 1,                           /* enable hardware timer */
		.period = 0,                           /* single mode, start once */
		.usec = 1,                             /* delay 1us */
		.pf_call_back = hal_msi_hardware_proc, /* call back */
	};

	/*
	* 1. enabled pcie msi feature, devide the cores
	* 2. base on the separation point solution, deploy the separation points on different cores
	*/
	if ((info->controller >= PCIE_CTRL_TYPE_BUTT) ||
	    (info->controller == PCIE_CTRL_TYPE_NONE))
		return;

	pcie_index = info->controller - PCIE_CTRL_TYPE_0;
	if ((!pcie_is_support_msi_enhance(pcie_index)) ||
	    (irq <= g_msi_enhance[pcie_index].msi_split_irq)) {
		generic_handle_irq(irq);
		return;
	}

	timer_id = 0;
	/* check the interface */
	dev_fun = get_dev_drv_func();
	if ((dev_fun == NULL) || (dev_fun->set_hardware_time == NULL)) {
		pr_err("<%s: %d>g_dev_drv_hook or set_hardware_time is NULL\n",
			__FILE__, __LINE__);
		return;
	}

	g_current_index = pcie_index;
	g_msi_enhance[pcie_index].wait_proc_irq = irq;

	index = kmalloc(sizeof(uint32_t), GFP_ATOMIC);
	if (index == NULL)
		return;
	*index = g_current_index;
	st_attr.para = (void *)index;

	ret = dev_fun->set_hardware_time(&st_attr, &timer_id);
	if (ret != 0) {
		pr_info("PCIE start hardware time fail, ret:%#x\n", ret);
		kfree(st_attr.para);
	}

	if (timer_id == HAL_MSI_TIMER_MAX) {
		uint32_t i;

		st_attr.enable = 0;
		for (i = 1; i <= HAL_MSI_TIMER_MAX; i++) /* timer_id start from 1 */
			dev_fun->set_hardware_time(&st_attr, &i);
	}
}

/* pcie msi call back, queries and distributes sub-interrupt status */
irqreturn_t hal_handle_msi_irq(int msi_irq, void *data)
{
	unsigned long val = 0;
	uint32_t pos;
	struct pcie_info *info = (struct pcie_info *)data;

	hal_pcie_read_from_dbi(info, PCIE_MSI_INTR0_STATUS, sizeof(uint32_t), (uint32_t *)&val);
	if (val == 0)
		return IRQ_NONE;

	pos = 0;
	while ((pos = find_next_bit(&val, BIT_MOVE_32, pos)) != BIT_MOVE_32) {
		int irq = irq_find_mapping(info->pcie_irq_domain, pos);

		hal_pcie_write_to_dbi(info, PCIE_MSI_INTR0_STATUS, sizeof(uint32_t), 1 << pos);
		pr_debug("irq=%d, pos=%d\n", irq, pos);
		hal_msi_affinity_enhance(info, irq);
		pos++;
	}

	return IRQ_HANDLED;
}

/* init pcie interrupt, enable msi, disable INTx */
static void hal_pcie_init_msi_hardware_irq(struct pcie_info *info)
{
	uint64_t msi_target;
	uint32_t pcie_index;
	uint32_t val = 0;

	if ((info->controller >= PCIE_CTRL_TYPE_BUTT) ||
	    (info->controller == PCIE_CTRL_TYPE_NONE))
		return;

	info->msi_data = __get_free_pages(GFP_KERNEL, 0);
	msi_target = virt_to_phys((void *)info->msi_data);
	pr_info("[MSI]msi_target 0x%llx, msi_data 0x%lx\n", msi_target, info->msi_data);
	(void)memset((void *)info->msi_data, 0, PAGE_SIZE);

	/* program the msi_data */
	hal_pcie_write_to_dbi(info, PCIE_MSI_ADDR_LO, sizeof(uint32_t),
		(uint32_t)(msi_target & BITMASK_LOW32));
	hal_pcie_write_to_dbi(info, PCIE_MSI_ADDR_HI, sizeof(uint32_t),
		(uint32_t)((msi_target >> BIT_MOVE_32) & BITMASK_LOW32));

	/* the pcie only support INTx or MSI interrupt mode */
	/* disable INTx */
	hal_pcie_read_from_dbi(info, PCIE_CFG_HDR1, sizeof(uint32_t), &val);
	val |= 1 << PCIE_MSI_INTX_ENABLE;
	hal_pcie_write_to_dbi(info, PCIE_CFG_HDR1, sizeof(uint32_t), val);

	/* enable MSI */
	hal_pcie_read_from_dbi(info, PCIE_MSI_CAP0, sizeof(uint32_t), &val);
	val |= 1 << PCIE_MSI_SMI_ENABLE;
	hal_pcie_write_to_dbi(info, PCIE_MSI_CAP0, sizeof(uint32_t), val);

	/* get the basic msi irq number that controls the pcie */
	pcie_index = info->controller - PCIE_CTRL_TYPE_0;
	if (g_msi_enhance[pcie_index].basic_irq == 0) {
		g_msi_enhance[pcie_index].basic_irq = irq_find_mapping(info->pcie_irq_domain, 0);
		/* the split point is the largest and no core is divided by default */
		g_msi_enhance[pcie_index].msi_split_irq =
			g_msi_enhance[pcie_index].basic_irq + MAX_MSI_IRQS;
		pr_info("PCIE%u msi irq num start %d!\n",
			pcie_index, g_msi_enhance[pcie_index].basic_irq);
	}
}

/* pcie msi configuration init entry */
struct irq_domain *hal_pcie_init_msi_irq(struct pcie_info *info)
{
	uint32_t i;
	uint32_t ret;
	int rc;
	/*
	 * this is module entry, and check the validity of
	 * input paras. other inner interfaces will not check
	 */
	if (info == NULL)
		return NULL;

	info->pcie_irq_domain = irq_domain_add_linear(NULL, MAX_MSI_IRQS,
		&g_msi_domain_ops, &g_msi_ctrl);
	if (info->pcie_irq_domain == NULL) {
		pr_info("irq domain init failed\n");
		return NULL;
	}

	for (i = 0; i < MAX_MSI_IRQS; i++)
		irq_create_mapping(info->pcie_irq_domain, i);

	/* get kernel interrupt id based on physical interrupt id */
	info->msi_irq = INVALID_IRQ;
	ret = hal_msi_get_irq_num(info->hardware_irq, &(info->msi_irq));
	if (ret != HW_RET_SUCCESS)
		return NULL;

	rc = snprintf(info->irq_name, sizeof(info->irq_name), "pcie_msi:%u", info->hardware_irq);
	if (rc <= 0)
		return NULL;

	ret = request_irq(info->msi_irq, hal_handle_msi_irq, IRQF_SHARED, info->irq_name, info);
	if (ret) {
		pr_info("failed to request msi irq(%d), ret=%d\n", info->msi_irq, ret);
		return NULL;
	}

	/* init hardware control */
	hal_pcie_init_msi_hardware_irq(info);
	pr_info("PCIE: init MSI interrupt success!\n");
	return info->pcie_irq_domain;
}

struct msi_controller *hal_msi_get_msi_ctrl(void)
{
	return &g_msi_ctrl;
}

/* get pcie index id based on proc file name */
uint32_t pcie_msi_enhance_get_index(struct file *file)
{
	const char *file_name = file_dentry(file)->d_iname;
	uint32_t pcie_index = PCIE_INDEX_MAX;

	if (strcmp(PCIE0_MSI_ENHANCE_PROC_FILE, file_name) == 0)
		pcie_index = PCIE_INDEX_0;
	else if (strcmp(PCIE1_MSI_ENHANCE_PROC_FILE, file_name) == 0)
		pcie_index = PCIE_INDEX_1;

	return pcie_index;
}

int32_t pcie_msi_enhance_proc_open(struct inode *inode, struct file *file)
{
	return 0;
}

int32_t pcie_msi_enhance_proc_release(struct inode *inode, struct file *file)
{
	return 0;
}

/* write into proc file, core division interrupt split point */
int32_t pcie_msi_enhance_proc_write(struct file *file,
	const char __user *user_buffer, size_t buf_len, loff_t *fpos)
{
	int len;
	char ker_buf[PCIE_MSI_ENHANCE_PROC_LEN] = {0};

	len = (buf_len > PCIE_MSI_ENHANCE_PROC_LEN) ? PCIE_MSI_ENHANCE_PROC_LEN : buf_len;
	if ((user_buffer != NULL) && (copy_from_user(ker_buf, user_buffer, len) == 0)) {
		unsigned long msi_split_irq;
		uint32_t pcie_index;
		int ret;

		ret = kstrtoul(ker_buf, 10, &msi_split_irq);
		pcie_index = pcie_msi_enhance_get_index(file);
		if (pcie_index >= PCIE_INDEX_MAX) {
			pr_info("write msi_split_irq index err, (%u)\n", pcie_index);
			return -EFAULT;
		}

		g_msi_enhance[pcie_index].msi_split_irq =
			g_msi_enhance[pcie_index].basic_irq + msi_split_irq;
		pr_info("write msi_split_irq value(%lu)\n", msi_split_irq);
		return buf_len;
	}

	return -EFAULT;
}

/* read proc file, core division interrupt split point */
ssize_t pcie_msi_enhance_proc_read(struct file *file,
	char __user *user_buffer, size_t buflen, loff_t *fpos)
{
	ssize_t ret;
	uint32_t pcie_index;
	/* output cfg for now */
	int32_t rc;
	char ker_buf[PCIE_MSI_ENHANCE_PROC_LEN] = {0};

	pcie_index = pcie_msi_enhance_get_index(file);
	if (pcie_index >= PCIE_INDEX_MAX) {
		pr_info("write msi_split_irq index err, (%u)\n", pcie_index);
		return -EFAULT;
	}

	rc = snprintf(ker_buf, sizeof(ker_buf), "%u\n",
		g_msi_enhance[pcie_index].msi_split_irq - g_msi_enhance[pcie_index].basic_irq);
	if (rc <= 0)
		return -EFAULT;

	ret = simple_read_from_buffer(user_buffer, buflen, fpos, ker_buf, sizeof(buflen));

	return ret;
}

struct proc_ops config_proc_op = {
	.proc_open = pcie_msi_enhance_proc_open,
	.proc_read = pcie_msi_enhance_proc_read,
	.proc_release = pcie_msi_enhance_proc_release,
	.proc_write = pcie_msi_enhance_proc_write,
};

/* create proc file of pcie msi enhance feature */
void pcie_msi_create_enhance_proc(void)
{
	struct proc_dir_entry *file;
	struct proc_dir_entry *dir;

	/* make dir */
	if (pcie_is_support_msi_enhance(PCIE_INDEX_0) ||
	    pcie_is_support_msi_enhance(PCIE_INDEX_1)) {
		dir = proc_mkdir(PCIE0_MSI_ENHANCE_PROC, NULL);
		if (dir == NULL) {
			pr_info("pcie msi create proc dir fail\n");
			return;
		}
	}

	/* create pcie0 msi enhance file */
	if (pcie_is_support_msi_enhance(PCIE_INDEX_0))  {
		file = proc_create_data(PCIE0_MSI_ENHANCE_PROC_FILE,
			0640, dir, &config_proc_op, NULL);
		if (file == NULL) {
			pr_info("pcie msi create proc file %s fail\n",
				PCIE0_MSI_ENHANCE_PROC_FILE);
			return;
		}
	}

	/* create pcie1 msi enhance file */
	if (pcie_is_support_msi_enhance(PCIE_INDEX_1))  {
		file = proc_create_data(PCIE1_MSI_ENHANCE_PROC_FILE,
			0640, dir, &config_proc_op, NULL);
		if (file == NULL) {
			pr_info("pcie msi create proc file %s fail\n",
				PCIE1_MSI_ENHANCE_PROC_FILE);
		}
	}
}
