/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#include "devmm_common.h"
#include "svm_msg_client.h"
#include "devmm_channel.h"
#include "svm_proc_mng.h"
#include "svm_master_dev_capability.h"

void devmm_set_dev_mem_size_info(u32 did, struct devmm_chan_exchange_pginfo *info)
{
    /* ddr_size and hbm_size are used for va distrubution, need 2^n size */
    devmm_svm->device_info.ddr_size[did][0] =
        (1ul << (u32)get_order(info->dev_mem[DEVMM_EXCHANGE_DDR_SIZE])) * PAGE_SIZE;
    devmm_svm->device_info.p2p_ddr_size[did][0] = info->dev_mem_p2p[DEVMM_EXCHANGE_DDR_SIZE];
    devmm_svm->device_info.hbm_size[did][0] =
        (1ul << (u32)get_order(info->dev_mem[DEVMM_EXCHANGE_HBM_SIZE])) * PAGE_SIZE;
    devmm_svm->device_info.p2p_hbm_size[did][0] = info->dev_mem_p2p[DEVMM_EXCHANGE_HBM_SIZE];
    devmm_svm->device_info.cluster_id[did] = info->cluster_id;
    atomic64_add(devmm_svm->device_info.ddr_size[did][0], &devmm_svm->device_info.total_ddr);
    atomic64_add(devmm_svm->device_info.hbm_size[did][0], &devmm_svm->device_info.total_hbm);
    devmm_drv_info("Memory info. (did=%u; cluster_id=%u; "
        "ddr_size=%llu; p2p_ddr_size=%llu; hbm_size=%llu; p2p_hbm_size=%llu; "
        "host_ddr=%llu; total_ddr=%llu; total_hbm=%llu)\n",
        did, info->cluster_id,
        devmm_svm->device_info.ddr_size[did][0], devmm_svm->device_info.p2p_ddr_size[did][0],
        devmm_svm->device_info.hbm_size[did][0], devmm_svm->device_info.p2p_hbm_size[did][0],
        devmm_svm->device_info.host_ddr,
        (u64)atomic64_read(&devmm_svm->device_info.total_ddr), (u64)atomic64_read(&devmm_svm->device_info.total_hbm));
}

void devmm_clear_dev_mem_size_info(u32 devid)
{
    atomic64_sub(devmm_svm->device_info.ddr_size[devid][0], &devmm_svm->device_info.total_ddr);
    atomic64_sub(devmm_svm->device_info.hbm_size[devid][0], &devmm_svm->device_info.total_hbm);
    devmm_svm->device_info.ddr_size[devid][0] = 0;
    devmm_svm->device_info.p2p_ddr_size[devid][0] = 0;
    devmm_svm->device_info.p2p_ddr_hugepage_size[devid][0] = 0;
    devmm_svm->device_info.hbm_size[devid][0] = 0;
    devmm_svm->device_info.p2p_hbm_size[devid][0] = 0;
    devmm_svm->device_info.p2p_hbm_hugepage_size[devid][0] = 0;
    devmm_svm->device_info.ts_ddr_size[devid][0] = 0;
    devmm_svm->device_info.ts_ddr_hugepage_size[devid][0] = 0;
}

int devmm_set_dev_capability(const u32 did, const u32 vfid, struct devmm_chan_exchange_pginfo *info)
{
    if (info->device_capability.dvpp_memsize != DEVMM_MAX_HEAP_MEM_FOR_DVPP_16G &&
        info->device_capability.dvpp_memsize != DEVMM_MAX_HEAP_MEM_FOR_DVPP_4G) {
        devmm_drv_err("Synchronize dvpp_memsize failed. (dvpp_memsize=%llu)\n", info->device_capability.dvpp_memsize);
        return -EINVAL;
    }

    devmm_svm->dev_capability[did].svm_offset_num = info->ts_shm_data_num;
    devmm_svm->dev_capability[did].dvpp_memsize = info->device_capability.dvpp_memsize;
    /*
     * device support alloc p2p mem,
     * if host PAGE_SIZE bigger than device PAGE_SIZE, remap continuously bar to user, will out-of-bounds memory access
     * vm can not use write combine page table properties, otherwise vm will hang
     */
    devmm_svm->dev_capability[did].feature_bar_mem =
        ((devmm_svm->device_page_size == devmm_svm->host_page_size) &&
        (devmm_get_host_run_mode(did) == DEVMM_HOST_IS_PHYS)) ?
        info->device_capability.feature_bar_mem : 0;
    devmm_svm->dev_capability[did].feature_bar_huge_mem =
        (devmm_get_host_run_mode(did) == DEVMM_HOST_IS_PHYS) ?
        info->device_capability.feature_bar_mem : 0;
    devmm_svm->dev_capability[did].feature_dev_mem_map_host =
        ((devmm_svm->device_page_size == devmm_svm->host_page_size) &&
        (vfid == 0)) ? info->device_capability.feature_dev_mem_map_host : 0;
    devmm_svm->dev_capability[did].feature_phycial_address = info->device_capability.feature_phycial_address;
    devmm_svm->dev_capability[did].feature_pcie_th = info->device_capability.feature_pcie_th;
    devmm_svm->dev_capability[did].feature_dev_read_only = info->device_capability.feature_dev_read_only;
    devmm_svm->dev_capability[did].feature_pcie_dma_support_sva = info->device_capability.feature_pcie_dma_support_sva;

    devmm_drv_info("Device capability info. (did=%u; vfid=%u; cluster_id=%u; ts_shm_map_bar=%u; ts_shm_data_num=%u; "
        "feature_phycial_address=0x%x; feature_pcie_th=%u; feature_bar_mem=%x; "
        "dvpp_memsize=%llu; svm_offset_num=%u; feature_read_mem=%u; feature_pcie_dma_support_sva=%u; "
        "feature_dev_mem_map_host=%u; feature_bar_huge_mem=%u)\n",
        did, vfid, info->cluster_id, info->ts_shm_support_bar_write, info->ts_shm_data_num,
        devmm_svm->dev_capability[did].feature_phycial_address,
        devmm_svm->dev_capability[did].feature_pcie_th, devmm_svm->dev_capability[did].feature_bar_mem,
        devmm_svm->dev_capability[did].dvpp_memsize, devmm_svm->dev_capability[did].svm_offset_num,
        devmm_svm->dev_capability[did].feature_dev_read_only,
        devmm_svm->dev_capability[did].feature_pcie_dma_support_sva,
        devmm_svm->dev_capability[did].feature_dev_mem_map_host,
        devmm_svm->dev_capability[did].feature_bar_huge_mem);
    return 0;
}

void devmm_clear_dev_capability(const u32 did)
{
    devmm_svm->dev_capability[did].svm_offset_num = 0;
    devmm_svm->dev_capability[did].dvpp_memsize = 0;
    devmm_svm->dev_capability[did].feature_phycial_address = 0;
}

