/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2021. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#include <linux/device.h>
#include <linux/iommu.h>

#include "devdrv_ctrl.h"
#include "devdrv_util.h"
#include "devdrv_msg.h"
#include "devdrv_smmu.h"

#ifdef CFG_FEATURE_AGENT_SMMU
STATIC int devdrv_dma_to_pa_by_agent_smmu(struct devdrv_msg_dev *msg_dev,
    struct devdrv_host_dma_addr_to_pa_cmd *cmd_data, struct devdrv_dma_node *dma_node, int addr_index, u32 addr_cnt)
{
    u32 real_out_len;
    u32 data_len;
    int ret;
    int j;

    cmd_data->cnt = addr_cnt;

    for (j = 0; j < addr_cnt; j++) {
        if (dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].direction == DEVDRV_DMA_HOST_TO_DEVICE) {
            cmd_data->dma_addr[j] = dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].src_addr;
        } else {
            cmd_data->dma_addr[j] = dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].dst_addr;
        }
    }

    data_len = sizeof(struct devdrv_host_dma_addr_to_pa_cmd) + sizeof(u64) * addr_cnt;
    ret = devdrv_sync_msg_send(msg_dev->agent_smmu_chan, cmd_data, data_len, data_len, &real_out_len);
    if ((ret != 0) || (real_out_len > data_len)) {
        devdrv_err("Dma addr map fail(devid=%u, ret=%d, i=%d, j=%d, data_len=%u, real_out_len=%u\n",
            msg_dev->pci_ctrl->dev_id, ret, addr_index, j, data_len, real_out_len);
        return -EINVAL;
    }

    for (j = 0; j < addr_cnt; j++) {
        if (dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].direction == DEVDRV_DMA_HOST_TO_DEVICE) {
            dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].src_addr = cmd_data->dma_addr[j];
        } else {
            dma_node[addr_index * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM + j].dst_addr = cmd_data->dma_addr[j];
        }
    }

    return 0;
}

STATIC int devdrv_smmu_non_trans_ctrl_msg_recv(void *msg_chan, void *data, u32 in_data_len,
    u32 out_data_len, u32 *real_out_len)
{
    devdrv_info("Smmu rx_msg_process\n");
    return 0;
}

STATIC struct devdrv_non_trans_msg_chan_info devdrv_smmu_non_trans_msg_chan_info = {
    .msg_type = devdrv_msg_client_smmu,
    .flag = 0,
    .level = DEVDRV_MSG_CHAN_LEVEL_HIGH,
    .s_desc_size = DEVDRV_SMMU_NON_TRANS_MSG_DESC_SIZE,
    .c_desc_size = DEVDRV_SMMU_NON_TRANS_MSG_DESC_SIZE,
    .rx_msg_process = devdrv_smmu_non_trans_ctrl_msg_recv,
};

struct devdrv_dma_node *devdrv_host_dma_addr_to_pa(int dev_id, struct devdrv_dma_node *dma_node, u32 node_cnt)
{
    struct devdrv_pci_ctrl *pci_ctrl = NULL;
    struct devdrv_host_dma_addr_to_pa_cmd *cmd = NULL;
    struct devdrv_dma_node *real_dma_node = NULL;
    u32 real_dma_node_len;
    u32 send_times = 0;
    u32 last_send_num = 0;
    u32 cmd_len = 0;
    int ret = 0;
    int i;

    pci_ctrl = devdrv_get_bottom_half_pci_ctrl_by_id((u32)dev_id);
    if (pci_ctrl == NULL) {
        devdrv_err("Get pci_ctrl failed. (dev_id=%d)\n", dev_id);
        return NULL;
    }

    /* pcie link no need transform dma addr to pa by agent smmu */
    if (pci_ctrl->connect_protocol == CONNECT_PROTOCOL_PCIE) {
        return dma_node;
    }

    /* only hccs peh's virt pass-through transform dma addr to pa by agent smmu */
    if ((pci_ctrl->shr_para->phy_match_flag != 0) || (pci_ctrl->env_boot_mode == DEVDRV_MDEV_VF_VM_BOOT)) {
        return dma_node;
    }

    cmd_len = sizeof(struct devdrv_host_dma_addr_to_pa_cmd) + sizeof(u64) * DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM;
    cmd = (struct devdrv_host_dma_addr_to_pa_cmd *)kzalloc(cmd_len, GFP_KERNEL | __GFP_ACCOUNT);
    if (cmd == NULL) {
        devdrv_err("Alloc cmd_data fail. (dev_id=%u)\n", pci_ctrl->dev_id);
        return NULL;
    }

    real_dma_node_len = sizeof(struct devdrv_dma_node) * node_cnt;
    real_dma_node = (struct devdrv_dma_node *)kzalloc(real_dma_node_len, GFP_KERNEL | __GFP_ACCOUNT);
    if (real_dma_node == NULL) {
        devdrv_err("Alloc real_dma_node fail. (dev_id=%u)\n", pci_ctrl->dev_id);
        goto free_cmd_data;
    }

    if (memcpy_s(real_dma_node, real_dma_node_len, dma_node, real_dma_node_len) != 0) {
        devdrv_err("Memcpy failed. (dev_id=%d)\n", dev_id);
        ret = -EINVAL;
        goto free_real_dma_node;
    }

    send_times = node_cnt / DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM;
    last_send_num = node_cnt % DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM;
    for (i = 0; i < send_times; i++) {
        ret = devdrv_dma_to_pa_by_agent_smmu(pci_ctrl->msg_dev, cmd, real_dma_node, i,
            DEVDRV_AGENT_SMMU_SUPPORT_MAX_NUM);
        if (ret != 0) {
            devdrv_err("Devdrv host dma remap fail.\n");
            goto free_real_dma_node;
        }
    }

    ret = devdrv_dma_to_pa_by_agent_smmu(pci_ctrl->msg_dev, cmd, real_dma_node, i, last_send_num);
    if (ret != 0) {
        devdrv_err("Devdrv last host dma remap fail.\n");
        goto free_real_dma_node;
    }

    kfree(cmd);
    cmd = NULL;
    return real_dma_node;

free_real_dma_node:
    kfree(real_dma_node);
    real_dma_node = NULL;
free_cmd_data:
    kfree(cmd);
    cmd = NULL;

    return NULL;
}

void devdrv_host_dma_node_free(int dev_id, struct devdrv_dma_node *real_dma_node)
{
    struct devdrv_pci_ctrl *pci_ctrl = NULL;

    if (real_dma_node == NULL) {
        return;
    }

    pci_ctrl = devdrv_get_bottom_half_pci_ctrl_by_id((u32)dev_id);
    if (pci_ctrl == NULL) {
        devdrv_err("Get pci_ctrl failed. (dev_id=%d)\n", dev_id);
        return;
    }

    /* pcie link no need transform dma addr to pa by agent smmu */
    if (pci_ctrl->connect_protocol == CONNECT_PROTOCOL_PCIE) {
        return;
    }

    /* only hccs peh's virt pass-through transform dma addr to pa by agent smmu */
    if ((pci_ctrl->shr_para->phy_match_flag != 0) || (pci_ctrl->env_boot_mode == DEVDRV_MDEV_VF_VM_BOOT)) {
        return;
    }

    kfree(real_dma_node);
    return;
}
#endif

void devdrv_pdev_sid_init(struct devdrv_pci_ctrl *pci_ctrl)
{
#ifdef CFG_FEATURE_AGENT_SMMU
    if (pci_ctrl->connect_protocol != CONNECT_PROTOCOL_HCCS) {
        return;
    }
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
    devdrv_warn("kernel version is low, not support pdev sid (devid=%u)\n", pci_ctrl->dev_id);
    return;
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
    if (pci_ctrl->pdev->dev.iommu_fwspec != NULL) {
        pci_ctrl->shr_para->sid = pci_ctrl->pdev->dev.iommu_fwspec->ids[0];
    } else {
        devdrv_warn("iommu not enabled, can not get host pdev sid (devid=%u)\n", pci_ctrl->dev_id);
        return;
    }
#else
    if ((pci_ctrl->pdev->dev.iommu != NULL) && (pci_ctrl->pdev->dev.iommu->fwspec != NULL)) {
        pci_ctrl->shr_para->sid = pci_ctrl->pdev->dev.iommu->fwspec->ids[0];
    } else {
        devdrv_warn("iommu not enabled, can not get host pdev sid (devid=%u)\n", pci_ctrl->dev_id);
        return;
    }
#endif
    devdrv_info("Get host pdev sid=%u, devid=%u.\n", pci_ctrl->shr_para->sid, pci_ctrl->dev_id);
#endif
    return;
}

void devdrv_pdev_sid_uninit(struct devdrv_pci_ctrl *pci_ctrl)
{
#ifdef CFG_FEATURE_AGENT_SMMU
    if (pci_ctrl->connect_protocol != CONNECT_PROTOCOL_HCCS) {
        return;
    }
    pci_ctrl->shr_para->sid = 0;
#endif
}
int devdrv_smmu_init_msg_chan(struct devdrv_pci_ctrl *pci_ctrl)
{
    devdrv_err("devdrv_smmu_init_msg_chan.\n");
#ifdef CFG_FEATURE_AGENT_SMMU
    /* pcie link no need transform dma addr to pa by agent smmu */
    if (pci_ctrl->connect_protocol != CONNECT_PROTOCOL_HCCS) {
        return 0;
    }

    /* only hccs peh's virt pass-through transform dma addr to pa by agent smmu */
    if ((pci_ctrl->connect_protocol == CONNECT_PROTOCOL_HCCS) &&
        ((pci_ctrl->shr_para->phy_match_flag != 0) || (pci_ctrl->env_boot_mode == DEVDRV_MDEV_VF_VM_BOOT))) {
        return 0;
    }

    pci_ctrl->msg_dev->agent_smmu_chan =
        devdrv_pcimsg_alloc_non_trans_queue(pci_ctrl->dev_id, &devdrv_smmu_non_trans_msg_chan_info);
    if (pci_ctrl->msg_dev->agent_smmu_chan == NULL) {
        devdrv_err("Calling agent_smmu_chan failed. (dev_id=%d)\n", pci_ctrl->dev_id);
        return -EINVAL;
    }
#endif
    return 0;
}

void devdrv_smmu_uninit_msg_chan(struct devdrv_pci_ctrl *pci_ctrl)
{
#ifdef CFG_FEATURE_AGENT_SMMU
    /* pcie link no need transform dma addr to pa by agent smmu */
    if (pci_ctrl->connect_protocol != CONNECT_PROTOCOL_HCCS) {
        return;
    }

    /* only hccs peh's virt pass-through transform dma addr to pa by agent smmu */
    if ((pci_ctrl->connect_protocol == CONNECT_PROTOCOL_HCCS) &&
        ((pci_ctrl->shr_para->phy_match_flag != 0) || (pci_ctrl->env_boot_mode == DEVDRV_MDEV_VF_VM_BOOT))) {
        return;
    }

    if (pci_ctrl->msg_dev->agent_smmu_chan != NULL) {
        devdrv_pcimsg_free_non_trans_queue(pci_ctrl->msg_dev->agent_smmu_chan);
        pci_ctrl->msg_dev->agent_smmu_chan = NULL;
    }
#endif
}
