/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2021. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/rtc.h>

#include "vpc_interface.h"
#include "devdrv_interface.h"
#include "devdrv_util.h"
#include "devdrv_dma.h"
#include "devdrv_pci.h"
#include "devdrv_ctrl.h"
#include "devdrv_vpc.h"

STATIC struct devdrv_dma_dev *devdrv_get_dma_dev_by_devid(u32 dev_id)
{
    struct devdrv_pci_ctrl *pci_ctrl = NULL;
    struct devdrv_dma_dev *dma_dev = NULL;

    pci_ctrl = devdrv_get_top_half_pci_ctrl_by_id(dev_id);
    if (pci_ctrl == NULL) {
        devdrv_err("Get pci_ctrl failed. (devid=%u)\n", dev_id);
        return NULL;
    }

    dma_dev = pci_ctrl->dma_dev;
    if (dma_dev == NULL) {
        devdrv_err("Get dma_dev failed. (devid=%u)\n", dev_id);
        return NULL;
    }

    return dma_dev;
}

STATIC struct devdrv_dma_channel *devdrv_vpc_get_dma_chan_by_id(struct devdrv_dma_dev *dma_dev, u32 chan_id)
{
    u32 i;

    for (i = 0; i < dma_dev->remote_chan_num; i++) {
        if (chan_id == dma_dev->dma_chan[i].chan_id) {
            return &dma_dev->dma_chan[i];
        }
    }

    return NULL;
}

STATIC int devdrv_vpc_dma_iova_addr_check(u32 dev_id, struct devdrv_dma_node *dma_node_base, u32 node_cnt)
{
    struct devdrv_dma_node *dma_node = NULL;
    struct devdrv_pci_ctrl *pci_ctrl = NULL;
    u64 dma_src_start_addr;
    u64 dma_src_end_addr;
    int i;

    pci_ctrl = devdrv_get_top_half_pci_ctrl_by_id(dev_id);
    if ((pci_ctrl == NULL) || (pci_ctrl->iova_range == NULL)) {
        devdrv_err("Get pci_ctrl failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    if (pci_ctrl->iova_range->init_flag != DEVDRV_DMA_IOVA_RANGE_INIT) {
        return 0;
    }

    dma_src_start_addr = pci_ctrl->iova_range->start_addr;
    dma_src_end_addr = pci_ctrl->iova_range->end_addr;

    for (i = 0; i < node_cnt; i++) {
        dma_node = &dma_node_base[i];
        if (dma_node->direction == DEVDRV_DMA_HOST_TO_DEVICE) {
            if (((dma_node->src_addr >= dma_src_start_addr) && (dma_node->src_addr < dma_src_end_addr)) ||
                ((dma_node->src_addr < dma_src_start_addr) &&
                ((dma_node->src_addr + dma_node->size > dma_src_start_addr) ||
                (dma_node->src_addr + dma_node->size <= dma_node->src_addr)))) {
                devdrv_err("DMA H2D, sq src addr check fail. (devid=%u)\n", dev_id);
                return -EINVAL;
            }
        } else if (dma_node->direction == DEVDRV_DMA_DEVICE_TO_HOST) {
            if (((dma_node->dst_addr >= dma_src_start_addr) && (dma_node->dst_addr < dma_src_end_addr)) ||
                ((dma_node->dst_addr < dma_src_start_addr) &&
                ((dma_node->dst_addr + dma_node->size > dma_src_start_addr) ||
                (dma_node->dst_addr + dma_node->size <= dma_node->dst_addr)))) {
                devdrv_err("DMA D2H, sq dst addr check fail. (devid=%u)\n", dev_id);
                return -EINVAL;
            }
        } else {
            devdrv_err("DMA direction is invalid. (devid=%u)\n", dev_id);
            return -EINVAL;
        }
    }

    return 0;
}

STATIC int devdrv_vpc_dma_fill_sq_desc_and_submit(u32 dev_id, struct devdrv_vpc_cmd_sq_submit *sq_cmd)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;
    struct devdrv_dma_copy_para para = {0};
    struct devdrv_asyn_dma_para_info asyn_info = {0};
    int ret;

    dma_dev = devdrv_get_dma_dev_by_devid(dev_id);
    if (dma_dev == NULL) {
        devdrv_err("Get dma_dev failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    if (sq_cmd->node_cnt > DEVDRV_VPC_MAX_SQ_DMA_NODE_COUNT) {
        devdrv_err("Node cnt is invalid. (devid=%u, chan_id=%u, node_cnt=%u)\n",
            dev_id, sq_cmd->chan_id, sq_cmd->node_cnt);
        return -EINVAL;
    }

    dma_chan = devdrv_vpc_get_dma_chan_by_id(dma_dev, sq_cmd->chan_id);
    if (dma_chan == NULL) {
        devdrv_err("Chan id is invalid. (devid=%u, chan_id=%u)\n", dev_id, sq_cmd->chan_id);
        return -EINVAL;
    }

    ret = devdrv_vpc_dma_iova_addr_check(dev_id, &sq_cmd->dma_node[0], sq_cmd->node_cnt);
    if (ret != 0) {
        devdrv_err("Dma sq addr check failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    para.instance = sq_cmd->instance;
    para.type = sq_cmd->type;
    para.wait_type = sq_cmd->wait_type;
    para.copy_type = DEVDRV_DMA_ASYNC;
    para.pa_va_flag = DEVDRV_DMA_VA_COPY;
    if (sq_cmd->asyn_info_flag == DEVDRV_VPC_DMA_ASYN_INFO_IS_NULL) {
        para.asyn_info = NULL;
    } else {
        para.asyn_info = &asyn_info;
        para.asyn_info->trans_id = sq_cmd->asyn_info.trans_id;
        para.asyn_info->remote_msi_vector = sq_cmd->asyn_info.remote_msi_vector;
        para.asyn_info->interrupt_and_attr_flag = sq_cmd->asyn_info.interrupt_and_attr_flag;
        para.asyn_info->priv = sq_cmd->asyn_info.priv;
        para.asyn_info->finish_notify = sq_cmd->asyn_info.finish_notify;
    }

    ret = devdrv_dma_chan_copy(dev_id, dma_chan, &sq_cmd->dma_node[0], sq_cmd->node_cnt, &para);
    if (ret != 0) {
        devdrv_err("Dma copy failed. (devid=%u; ret=%d)\n", dev_id, ret);
        return -EINVAL;
    }

    return 0;
}

STATIC int devdrv_vpc_dma_sqcq_head_update(u32 dev_id, struct devdrv_vpc_cmd_sqcq_update *update_cmd)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;

    dma_dev = devdrv_get_dma_dev_by_devid(dev_id);
    if (dma_dev == NULL) {
        devdrv_err("Get dma_dev failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    dma_chan = devdrv_vpc_get_dma_chan_by_id(dma_dev, update_cmd->chan_id);
    if (dma_chan == NULL) {
        devdrv_err("Chan id is invalid. (devid=%u, chan_id=%u)\n", dev_id, update_cmd->chan_id);
        return -EINVAL;
    }

    if ((update_cmd->sq_head >= dma_chan->sq_depth) || (update_cmd->cq_head >= dma_chan->cq_depth)) {
        devdrv_err("Sq head or cq head is invalid. (devid=%u, sq_head=%u, cq_head=%u)\n",
            dev_id, update_cmd->sq_head, update_cmd->cq_head);
        return -EINVAL;
    }

    dma_chan->sq_head = update_cmd->sq_head;
    devdrv_set_dma_cq_head(dma_chan->io_base, update_cmd->cq_head);

    return 0;
}

STATIC int devdrv_vpc_dma_init_and_alloc_sq_cq(u32 dev_id, u32 chan_id)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;
    struct devdrv_pci_ctrl *pci_ctrl = NULL;
    int ret;

    pci_ctrl = devdrv_get_top_half_pci_ctrl_by_id(dev_id);
    if (pci_ctrl == NULL) {
        devdrv_err("Get pci_ctrl failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    dma_dev = pci_ctrl->dma_dev;
    if (dma_dev == NULL) {
        devdrv_err("Get dma_dev failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }
    (void)devdrv_sriov_dma_init_chan(dma_dev);

    dma_chan = devdrv_vpc_get_dma_chan_by_id(dma_dev, chan_id);
    if (dma_chan == NULL) {
        devdrv_err("Chan id is invalid. (devid=%u, chan_id=%u)\n", dev_id, chan_id);
        return -EINVAL;
    }

    ret = devdrv_alloc_dma_sq_cq(dma_chan);
    if (ret != 0) {
        devdrv_err("Alloc dma_sq_cq failed. (devid=%u; ret=%d)\n", dev_id, ret);
        return -EINVAL;
    }
    pci_ctrl->shr_para->sq_desc_dma = dma_chan->sq_desc_dma;

    return 0;
}

STATIC int devdrv_vpc_free_dma_sq_cq(u32 dev_id, u32 chan_id)
{
    struct devdrv_dma_dev *dma_dev = NULL;
    struct devdrv_dma_channel *dma_chan = NULL;
    struct devdrv_pci_ctrl *pci_ctrl = NULL;

    pci_ctrl = devdrv_get_top_half_pci_ctrl_by_id(dev_id);
    if (pci_ctrl == NULL) {
        devdrv_err("Get pci_ctrl failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    dma_dev = pci_ctrl->dma_dev;
    if (dma_dev == NULL) {
        devdrv_err("Get dma_dev failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    dma_chan = devdrv_vpc_get_dma_chan_by_id(dma_dev, chan_id);
    if (dma_chan == NULL) {
        devdrv_err("Chan id is invalid. (devid=%u, chan_id=%u)\n", dev_id, chan_id);
        return -EINVAL;
    }

    devdrv_free_dma_sq_cq(dma_chan);
    pci_ctrl->shr_para->sq_desc_dma = 0;

    return 0;
}

STATIC int devdrv_vpc_dma_link_prepare(u32 dev_id, struct devdrv_vpc_cmd_dma_desc_info *dma_info)
{
    struct devdrv_dma_prepare *dma_prepare = NULL;

    if (dma_info->node_cnt > DEVDRV_VPC_MAX_SQ_DMA_NODE_COUNT) {
        devdrv_err("Dma node cnt is invalid. (devid=%u; node_cnt=%u)\n", dev_id, dma_info->node_cnt);
        return -EINVAL;
    }

    dma_prepare = devdrv_dma_link_prepare(dev_id, dma_info->type, &dma_info->dma_node[0],
        dma_info->node_cnt, dma_info->fill_status);
    if (dma_prepare == NULL) {
        devdrv_err("Get dma_link_prepare failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    dma_info->dma_desc_info.cq_dma_addr = dma_prepare->cq_dma_addr;
    dma_info->dma_desc_info.cq_size = dma_prepare->cq_size;
    dma_info->dma_desc_info.sq_dma_addr = dma_prepare->sq_dma_addr;
    dma_info->dma_desc_info.sq_size = dma_prepare->sq_size;

    return 0;
}

STATIC int devdrv_vpc_dma_link_free(u32 dev_id, struct devdrv_vpc_cmd_dma_desc_info *dma_info)
{
    struct devdrv_dma_prepare dma_prepare = {0};
    int ret;

    dma_prepare.devid = dev_id;
    dma_prepare.cq_dma_addr = dma_info->dma_desc_info.cq_dma_addr;
    dma_prepare.cq_size = dma_info->dma_desc_info.cq_size;
    dma_prepare.sq_dma_addr = dma_info->dma_desc_info.sq_dma_addr;
    dma_prepare.sq_size = dma_info->dma_desc_info.sq_size;

    ret = devdrv_dma_link_free(&dma_prepare);
    if (ret != 0) {
        devdrv_err("Set dma_link_free failed. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    return 0;
}

int devdrv_vpc_msg_send(u32 dev_id, u32 cmd_type, struct devdrv_vpc_msg *vpc_msg, u32 data_len, u32 timeout)
{
#ifdef CFG_FEATURE_SRIOV
    struct vmng_tx_msg_proc_info tx_info;
    u32 retry_time = DEVDRV_VPC_RETRY_TIMES;
    int ret = 0;

    vpc_msg->cmd = cmd_type;
    vpc_msg->error_code = -1;

    tx_info.data = vpc_msg;
    tx_info.in_data_len = data_len;
    tx_info.out_data_len = sizeof(struct devdrv_vpc_msg);
    tx_info.real_out_len = 0;

    do {
        ret = vpc_msg_send(dev_id, VPC_VM_FID, VMNG_VPC_TYPE_PCIE, &tx_info, timeout);
        if (ret != -ENOSPC) {
            break;
        }

        usleep_range(100, 200);
        retry_time--;
    } while (retry_time != 0);

    return ret;
#else
    return 0;
#endif
}

STATIC int devdrv_vpc_msg_para_check(u32 dev_id, u32 fid, const struct vmng_rx_msg_proc_info *proc_info)
{
    if ((proc_info == NULL) || (proc_info->real_out_len == NULL) || (proc_info->data == NULL)) {
        devdrv_err("Vpc_msg_para_check. (devid=%u)\n", dev_id);
        return -EINVAL;
    }

    if ((proc_info->in_data_len < sizeof(struct devdrv_vpc_msg)) || (dev_id >= MAX_DEV_CNT) || (fid != 0)) {
        devdrv_err("Vpc_msg_para_check. (devid=%u; fid=%u; in_data_len=%u)\n", dev_id, fid, proc_info->in_data_len);
        return -EINVAL;
    }

    return 0;
}

STATIC int devdrv_vpc_msg_handle(u32 dev_id, u32 cmd, union devdrv_vpc_cmd *cmd_data)
{
    int ret = 0;

    switch (cmd) {
        case DEVDRV_VPC_MSG_TYPE_SQ_SUBMIT:
            ret = devdrv_vpc_dma_fill_sq_desc_and_submit(dev_id, &cmd_data->sq_cmd);
            break;
        case DEVDRV_VPC_MSG_TYPE_SQCQ_HEAD_UPDATE:
            ret = devdrv_vpc_dma_sqcq_head_update(dev_id, &cmd_data->update_cmd);
            break;
        case DEVDRV_VPC_MSG_TYPE_DMA_INIT_AND_ALLOC_SQCQ:
            ret = devdrv_vpc_dma_init_and_alloc_sq_cq(dev_id, cmd_data->dma_init.chan_id);
            break;
        case DEVDRV_VPC_MSG_TYPE_FREE_DMA_SQCQ:
            ret = devdrv_vpc_free_dma_sq_cq(dev_id, cmd_data->dma_init.chan_id);
            break;
        case DEVDRV_VPC_MSG_TYPE_DMA_LINK_PREPARE:
            ret = devdrv_vpc_dma_link_prepare(dev_id, &cmd_data->dma_info);
            break;
        case DEVDRV_VPC_MSG_TYPE_DMA_LINK_FREE:
            ret = devdrv_vpc_dma_link_free(dev_id, &cmd_data->dma_info);
            break;
        default:
            devdrv_err("Vpc msg type is illegal. (cmd=%u)\n", cmd);
            ret = -EINVAL;
            break;
    }

    return ret;
}

STATIC int devdrv_vpc_msg_recv(u32 dev_id, u32 fid, struct vmng_rx_msg_proc_info *proc_info)
{
    struct devdrv_vpc_msg *msg = NULL;
    int ret = -1;

    if (devdrv_vpc_msg_para_check(dev_id, fid, proc_info) != 0) {
        devdrv_err("Vpc msg check is fail. (dev_id=%u)\n", dev_id);
        goto VPC_HANDLE_OUT;
    }

    msg = (struct devdrv_vpc_msg *)proc_info->data;
    ret = devdrv_vpc_msg_handle(dev_id, msg->cmd, &msg->cmd_data);
    if (ret != 0) {
        devdrv_err("Vpc msg handle fail. (dev_id=%u; ret=%d)\n", dev_id, ret);
        goto VPC_HANDLE_OUT;
    }

VPC_HANDLE_OUT:
    *(proc_info->real_out_len) = (u32)sizeof(struct devdrv_vpc_msg);
    msg->error_code = ret;
    return 0;
}

struct vmng_vpc_client devdrv_vpc_client = {
    .vpc_type = VMNG_VPC_TYPE_PCIE,
    .init = NULL,
    .msg_recv = devdrv_vpc_msg_recv,
};

int devdrv_vpc_client_init(u32 devid)
{
#ifdef CFG_FEATURE_SRIOV
    int ret;

    ret = vpc_register_client(devid, 0, &devdrv_vpc_client);
    if (ret != 0) {
        devdrv_err("Calling vpc_register_client fail. (ret=%d)\n", ret);
        return ret;
    }

    devdrv_info("devdrv_vpc_client init success\n");
#endif
    return 0;
}
EXPORT_SYMBOL(devdrv_vpc_client_init);

int devdrv_vpc_client_uninit(u32 devid)
{
#ifdef CFG_FEATURE_SRIOV
    int ret;

    ret = vpc_unregister_client(devid, 0, &devdrv_vpc_client);
    if (ret != 0) {
        devdrv_err("Calling vpc_unregister_client fail. (ret=%d)\n", ret);
        return ret;
    }

    devdrv_info("devdrv_vpc_client uninit success\n");
#endif
    return 0;
}
EXPORT_SYMBOL(devdrv_vpc_client_uninit);

STATIC int devdrv_mdev_dma_iova_addr_range_init(struct devdrv_pci_ctrl *pci_ctrl)
{
    pci_ctrl->iova_range = kzalloc(sizeof(struct devdrv_dma_iova_addr_range), GFP_KERNEL);
    if (pci_ctrl->iova_range == NULL) {
        devdrv_err("Alloc iova_range fail. (devid=%u)\n", pci_ctrl->dev_id);
        return -EINVAL;
    }

    pci_ctrl->iova_range->init_flag = DEVDRV_DMA_IOVA_RANGE_UNINIT;
    pci_ctrl->iova_range->start_addr = 0;
    pci_ctrl->iova_range->end_addr = 0;

    return 0;
}

STATIC void devdrv_mdev_dma_iova_addr_range_uninit(struct devdrv_pci_ctrl *pci_ctrl)
{
    if (pci_ctrl->iova_range == NULL) {
        return;
    }

    kfree(pci_ctrl->iova_range);
    pci_ctrl->iova_range = NULL;
}

int devdrv_mdev_pm_init(struct devdrv_pci_ctrl *pci_ctrl)
{
    int ret;

    if ((pci_ctrl->env_boot_mode != DEVDRV_MDEV_FULL_SPEC_VF_PM_BOOT) &&
        (pci_ctrl->env_boot_mode != DEVDRV_MDEV_VF_PM_BOOT)) {
        return 0;
    }

    ret = devdrv_mdev_dma_iova_addr_range_init(pci_ctrl);
    if (ret != 0) {
        devdrv_err("Iova addr range init fail. (dev_id=%u)\n", pci_ctrl->dev_id);
        return ret;
    }

    devdrv_info("Calling mdev_pm_init success. (dev_id=%u)\n", pci_ctrl->dev_id);
    return 0;
}

int devdrv_mdev_vm_init(struct devdrv_pci_ctrl *pci_ctrl)
{
#ifdef CFG_FEATURE_SRIOV
    struct vmng_vpc_unit vpc_info;
    int ret;

    if ((pci_ctrl->env_boot_mode != DEVDRV_MDEV_FULL_SPEC_VF_VM_BOOT) &&
        (pci_ctrl->env_boot_mode != DEVDRV_MDEV_VF_VM_BOOT)) {
        return 0;
    }

    vpc_info.pdev = pci_ctrl->pdev;
    vpc_info.dev_id = pci_ctrl->dev_id;
    vpc_info.fid = 0;

    vpc_info.mmio.bar0_base = pci_ctrl->mdev_rsv_mem_phy_base;
    vpc_info.mmio.bar0_size = pci_ctrl->mdev_rsv_mem_phy_size;
    vpc_info.mmio.bar2_base = 0;
    vpc_info.mmio.bar2_size = 0;
    vpc_info.mmio.bar4_base = 0;
    vpc_info.mmio.bar4_size = 0;

    vpc_info.msix_info.entries = &pci_ctrl->msix_ctrl.entries[0];
    vpc_info.msix_info.msix_irq_num = DEVDRV_VPC_MSI_NUM;
    vpc_info.msix_info.msix_irq_offset = DEVDRV_VPC_MSI_BASE;

    ret = vmng_vpc_init(&vpc_info, SERVER_TYPE_VM_PCIE);
    if (ret != 0) {
        devdrv_err("Calling mdev_vm_init fail. (dev_id=%u;ret=%d)\n", pci_ctrl->dev_id, ret);
        return ret;
    }

    devdrv_info("Calling mdev_vm_init success. (dev_id=%u)\n", pci_ctrl->dev_id);
#endif
    return 0;
}

int devdrv_mdev_vpc_init(struct devdrv_pci_ctrl *pci_ctrl)
{
    int ret;

    ret = devdrv_mdev_pm_init(pci_ctrl);
    if (ret != 0) {
        devdrv_err("Calling mdev_pm_init fail. (ret=%d)\n", ret);
        return ret;
    }

    ret = devdrv_mdev_vm_init(pci_ctrl);
    if (ret != 0) {
        devdrv_err("Calling mdev_vm_init fail. (ret=%d)\n", ret);
        return ret;
    }

    devdrv_info("Calling mdev_vpc_init success\n");
    return 0;
}

void devdrv_mdev_pm_uninit(struct devdrv_pci_ctrl *pci_ctrl)
{
    if ((pci_ctrl->env_boot_mode != DEVDRV_MDEV_FULL_SPEC_VF_PM_BOOT) &&
        (pci_ctrl->env_boot_mode != DEVDRV_MDEV_VF_PM_BOOT)) {
        return;
    }

    devdrv_mdev_dma_iova_addr_range_uninit(pci_ctrl);
    devdrv_info("Calling mdev_pm_uninit success\n");
}

void devdrv_mdev_vm_uninit(struct devdrv_pci_ctrl *pci_ctrl)
{
#ifdef CFG_FEATURE_SRIOV
    struct vmng_vpc_unit vpc_info = {0};
    int ret;

    if ((pci_ctrl->env_boot_mode != DEVDRV_MDEV_FULL_SPEC_VF_VM_BOOT) &&
        (pci_ctrl->env_boot_mode != DEVDRV_MDEV_VF_VM_BOOT)) {
        return;
    }

    vpc_info.pdev = pci_ctrl->pdev;
    vpc_info.dev_id = pci_ctrl->dev_id;
    vpc_info.fid = 0;

    ret = vmng_vpc_uninit(&vpc_info, SERVER_TYPE_VM_PCIE);
    if (ret != 0) {
        devdrv_err("Calling vmng_vpc_uninit fail. (ret=%d)\n", ret);
        return;
    }
    devdrv_info("Calling mdev_vm_uninit success\n");
#endif
}

void devdrv_mdev_vpc_uninit(struct devdrv_pci_ctrl *pci_ctrl)
{
    devdrv_mdev_pm_uninit(pci_ctrl);
    devdrv_mdev_vm_uninit(pci_ctrl);

    return;
}