/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2023-02-06
 */

#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/spinlock_types.h>
#include <linux/preempt.h>

#include "devdrv_interface.h"

#include "svm_proc_mng.h"
#include "svm_srcu_work.h"
#include "svm_dev_res_mng.h"
#include "svm_master_proc_mng.h"
#include "svm_master_convert.h"
#include "svm_task_dev_res_mng.h"
#include "svm_master_dma_desc_mng.h"

struct devmm_dma_desc_node_info {
    struct svm_id_inst id_inst;
    struct DMA_ADDR dma_addr;
    u64 src_va;
    u64 dst_va;
    u64 size;

    u32 key;
    u32 subkey;

    pid_t host_pid;
};

struct devmm_dma_desc_node {
    struct devmm_svm_process *svm_proc;

    struct rb_node task_node;
    u64 rb_handle;

    struct kref ref;

    struct devmm_dma_desc_node_info info;
};

static int devmm_dma_desc_res_create(struct devmm_svm_process *svm_proc,
    struct svm_dma_desc_addr_info *addr_info, struct devmm_copy_res **res)
{
    struct devmm_mem_convrt_addr_para convert_para = {0};
    struct devmm_copy_res *tmp = NULL;
    int ret;

    convert_para.pSrc = addr_info->src_va;
    convert_para.pDst = addr_info->dst_va;
    convert_para.len = addr_info->size;
    convert_para.direction = DEVMM_COPY_INVILED_DIRECTION;
    ret = devmm_convert_one_addr(svm_proc, &convert_para);
    if (ret != 0) {
        devmm_drv_err("Convert one addr failed.\n");
        return ret;
    }

    tmp = (struct devmm_copy_res *)convert_para.dmaAddr.phyAddr.priv;
    tmp->dma_prepare = devdrv_dma_link_prepare((u32)tmp->dev_id, DEVDRV_DMA_DATA_TRAFFIC,
        tmp->dma_node, tmp->dma_node_num, DEVDRV_DMA_DESC_FILL_FINISH);
    if (tmp->dma_prepare == NULL) {
        devmm_drv_err("Dma_link_prepare alloc failed.\n");
        devmm_destroy_one_addr(tmp);
        return -ENOMEM;
    }

    *res = tmp;
    return 0;
}

static void devmm_dma_prepare_destroy_srcu_work(u64 *arg, u64 arg_size)
{
    struct devdrv_dma_prepare **dam_parpare = (struct devdrv_dma_prepare **)arg;

    devdrv_dma_link_free(*dam_parpare);
}

static void devmm_dma_desc_res_destroy(struct devmm_svm_process *svm_proc, struct devmm_copy_res *res)
{
    if (in_softirq()) {
        devmm_srcu_subwork_add(&svm_proc->srcu_work, DEVMM_SRCU_SUBWORK_ENSURE_EXEC_TYPE,
            devmm_dma_prepare_destroy_srcu_work, (u64 *)&res->dma_prepare, sizeof(struct devdrv_dma_prepare));
    } else {
        devdrv_dma_link_free(res->dma_prepare);
    }
    devmm_destroy_one_addr(res);
}

static inline u64 keys_to_rb_handle(u32 key, u32 subkey)
{
    return (((u64)key << 32) | (u64)subkey);  /* high 32 is key, low 32 is subkey */
}

static u64 rb_handle_of_dma_desc_node(struct rb_node *node)
{
    struct devmm_dma_desc_node *tmp = rb_entry(node, struct devmm_dma_desc_node, task_node);

    return tmp->rb_handle;
}

static int devmm_dma_desc_node_create(struct devmm_svm_process *svm_proc, struct devmm_dma_desc_node_info *info)
{
    struct devmm_svm_proc_master *master_data = (struct devmm_svm_proc_master *)svm_proc->priv_data;
    struct devmm_dma_desc_node_rb_info *rb_info = &master_data->dma_desc_rb_info;
    struct devmm_dma_desc_node *node = NULL;
    int ret;

    node = kzalloc(sizeof(struct devmm_dma_desc_node), __GFP_ACCOUNT | GFP_ATOMIC);
    if (node == NULL) {
        devmm_drv_err("Kzalloc failed.\n");
        return -ENOMEM;
    }

    node->svm_proc = svm_proc;
    node->info = *info;
    kref_init(&node->ref);

    node->rb_handle = keys_to_rb_handle(node->info.key, node->info.subkey);
    RB_CLEAR_NODE(&node->task_node);

    spin_lock_bh(&rb_info->spinlock);
    ret = devmm_rb_insert(&rb_info->root, &node->task_node, rb_handle_of_dma_desc_node);
    spin_unlock_bh(&rb_info->spinlock);
    if (ret != 0) {
        devmm_drv_err("Insert fail. (key=%u; subkey=%u)\n", info->key, info->subkey);
        kfree(node);
    }
    return ret;
}

static void devmm_dma_desc_node_release(struct kref *kref)
{
    struct devmm_dma_desc_node *node = container_of(kref, struct devmm_dma_desc_node, ref);

    kfree(node);
}

static void devmm_dma_desc_node_destroy(struct devmm_dma_desc_node *node)
{
    struct devmm_svm_proc_master *master_data = (struct devmm_svm_proc_master *)node->svm_proc->priv_data;
    struct devmm_dma_desc_node_rb_info *rb_info = &master_data->dma_desc_rb_info;

    spin_lock_bh(&rb_info->spinlock);
    (void)devmm_rb_erase(&rb_info->root, &node->task_node);
    spin_unlock_bh(&rb_info->spinlock);
    devmm_dma_desc_res_destroy(node->svm_proc, (struct devmm_copy_res *)node->info.dma_addr.phyAddr.priv);

    kref_put(&node->ref, devmm_dma_desc_node_release);
}

static void _devmm_dma_desc_node_get(struct devmm_dma_desc_node *node)
{
    kref_get(&node->ref);
}

static struct devmm_dma_desc_node *devmm_dma_desc_node_get(struct devmm_svm_process *svm_proc, u64 handle)
{
    struct devmm_svm_proc_master *master_info = (struct devmm_svm_proc_master *)svm_proc->priv_data;
    struct devmm_dma_desc_node_rb_info *rb_info = &master_info->dma_desc_rb_info;
    struct devmm_dma_desc_node *dma_desc_node = NULL;
    struct rb_node *node = NULL;

    spin_lock_bh(&rb_info->spinlock);
    node = devmm_rb_search(&rb_info->root, handle, rb_handle_of_dma_desc_node);
    if (node != NULL) {
        dma_desc_node = rb_entry(node, struct devmm_dma_desc_node, task_node);
        _devmm_dma_desc_node_get(dma_desc_node);
    }
    spin_unlock_bh(&rb_info->spinlock);

    return dma_desc_node;
}

static void devmm_dma_desc_node_put(struct devmm_dma_desc_node *node)
{
    kref_put(&node->ref, devmm_dma_desc_node_release);
}

static struct devmm_dma_desc_node *devmm_dma_desc_node_get_one_by_key(
    struct devmm_svm_process *svm_proc, u32 key)
{
    struct devmm_svm_proc_master *master_info = (struct devmm_svm_proc_master *)svm_proc->priv_data;
    struct devmm_dma_desc_node_rb_info *rb_info = &master_info->dma_desc_rb_info;
    struct devmm_dma_desc_node *pos = NULL;
    struct devmm_dma_desc_node *tmp = NULL;

    spin_lock_bh(&rb_info->spinlock);
    rbtree_postorder_for_each_entry_safe(pos, tmp, &rb_info->root, task_node) {
        if (pos->info.key == key) {
            _devmm_dma_desc_node_get(pos);
            spin_unlock_bh(&rb_info->spinlock);
            return pos;
        }
    }
    spin_unlock_bh(&rb_info->spinlock);

    return NULL;
}

static void devmm_dma_desc_node_info_pack(struct svm_dma_desc_addr_info *addr_info,
    struct svm_dma_desc_handle *handle, struct devmm_copy_res *res, struct devmm_dma_desc_node_info *info)
{
    svm_id_inst_pack(&info->id_inst, res->dev_id, 0);

    info->dma_addr.phyAddr.priv = (void *)res;
    info->dma_addr.phyAddr.src = (void *)(uintptr_t)res->dma_prepare->sq_dma_addr;
    info->dma_addr.phyAddr.dst = (void *)(uintptr_t)res->dma_prepare->cq_dma_addr;
    info->dma_addr.phyAddr.len = res->dma_node_alloc_num;
    info->dma_addr.phyAddr.flag = 1;

    info->src_va = addr_info->src_va;
    info->dst_va = addr_info->dst_va;
    info->size = addr_info->size;

    info->key = handle->key;
    info->subkey = handle->subkey;

    info->host_pid = handle->pid;
}

static int devmm_dma_desc_create(struct devmm_svm_process *svm_proc, struct svm_dma_desc_addr_info *addr_info,
    struct svm_dma_desc_handle *handle, struct svm_dma_desc *dma_desc)
{
    struct devmm_dma_desc_node_info info = {{0}};
    struct devmm_copy_res *res = NULL;
    int ret;

    ret = devmm_dma_desc_res_create(svm_proc, addr_info, &res);
    if (ret != 0) {
        return ret;
    }

    devmm_dma_desc_node_info_pack(addr_info, handle, res, &info);
    ret = devmm_dma_desc_node_create(svm_proc, &info);
    if (ret != 0) {
        devmm_dma_desc_res_destroy(svm_proc, res);
        return ret;
    }

    dma_desc->sq_addr = (void *)(uintptr_t)res->dma_prepare->sq_dma_addr;
    dma_desc->sq_tail = res->dma_node_num;
    devmm_drv_debug("Dma desc info. (sq_addr=0x%llx; sq_tail=%u)\n", (u64)dma_desc->sq_addr, dma_desc->sq_tail);
    return 0;
}

static void devmm_dma_desc_destroy_batch(struct devmm_svm_process *svm_proc, u32 key)
{
    struct devmm_dma_desc_node *node = NULL;
    u32 stamp = (u32)jiffies;
    u32 num = 0;

    while (1) {
        node = devmm_dma_desc_node_get_one_by_key(svm_proc, key);
        if (node == NULL) {
            break;
        }

        devmm_dma_desc_node_destroy(node);
        devmm_dma_desc_node_put(node);
        devmm_try_cond_resched(&stamp);
        num++;
    }

    devmm_drv_debug("Destroy dma_desc info. (num=%u)\n", num);
}

static void devmm_dma_desc_destroy_one(struct devmm_svm_process *svm_proc, u32 key, u32 subkey)
{
    struct devmm_dma_desc_node *node = NULL;

    node = devmm_dma_desc_node_get(svm_proc, keys_to_rb_handle(key, subkey));
    if (node == NULL) {
        devmm_drv_err("Key is invaild or node has been destroyed. (pid=%d; key=%u; subkey=%u)\n",
            svm_proc->process_id.hostpid, key, subkey);
        return;
    }

    devmm_dma_desc_node_destroy(node);
    devmm_dma_desc_node_put(node);
}

static bool is_destroy_one_dma_desc(u32 subkey)
{
    return (subkey != SVM_DMA_DESC_INVALID_SUB_KEY);
}

static void devmm_dma_desc_destroy(struct devmm_svm_process *svm_proc, u32 key, u32 subkey)
{
    if (is_destroy_one_dma_desc(subkey)) {
        devmm_dma_desc_destroy_one(svm_proc, key, subkey);
    } else {
        devmm_dma_desc_destroy_batch(svm_proc, key);
    }
}

static struct devmm_dma_desc_node *devmm_erase_one_dma_desc_node(struct devmm_dma_desc_node_rb_info *rb_info)
{
    struct rb_node *node = NULL;

    spin_lock_bh(&rb_info->spinlock);
    node = devmm_rb_erase_one_node(&rb_info->root, NULL);
    spin_unlock_bh(&rb_info->spinlock);

    return ((node == NULL) ? NULL : rb_entry(node, struct devmm_dma_desc_node, task_node));
}

void devmm_dma_desc_nodes_destroy_by_task_release(struct devmm_svm_process *svm_proc)
{
    struct devmm_svm_proc_master *master_data = (struct devmm_svm_proc_master *)svm_proc->priv_data;
    struct devmm_dma_desc_node_rb_info *rb_info = &master_data->dma_desc_rb_info;
    struct devmm_dma_desc_node *node = NULL;
    u32 stamp = (u32)jiffies;
    u32 num = 0;

    while (1) {
        node = devmm_erase_one_dma_desc_node(rb_info);
        if (node == NULL) {
            break;
        }

        num++;
        devmm_dma_desc_node_destroy(node);
        devmm_try_cond_resched(&stamp);
    }

    if (num != 0) {
        devmm_drv_info("Destroy dma_desc nodes info. (destroyed_num=%u)\n", num);
    }
}

int hal_kernel_svm_dma_desc_create(struct svm_dma_desc_addr_info *addr_info,
    struct svm_dma_desc_handle *handle, struct svm_dma_desc *dma_desc)
{
    struct devmm_svm_process_id process_id = {.hostpid = handle->pid, .devid = 0, .vfid = 0};
    struct devmm_svm_process *svm_proc = NULL;
    int ret;

    if (handle->subkey == SVM_DMA_DESC_INVALID_SUB_KEY) {
        return -EINVAL;
    }

    svm_proc = devmm_svm_proc_get_by_process_id(&process_id);
    if (svm_proc == NULL) {
        return -ESRCH;
    }

    ret = devmm_dma_desc_create(svm_proc, addr_info, handle, dma_desc);
    devmm_svm_proc_put(svm_proc);
    return ret;
}
EXPORT_SYMBOL(hal_kernel_svm_dma_desc_create);

/* destroy will be called in tasklet */
void hal_kernel_svm_dma_desc_destroy(struct svm_dma_desc_handle *handle)
{
    struct devmm_svm_process_id process_id = {.hostpid = handle->pid, .devid = 0, .vfid = 0};
    struct devmm_svm_process *svm_proc = NULL;

    svm_proc = devmm_svm_proc_get_by_process_id(&process_id);
    if (svm_proc == NULL) {
        return;
    }

    devmm_dma_desc_destroy(svm_proc, handle->key, handle->subkey);
    devmm_svm_proc_put(svm_proc);
}
EXPORT_SYMBOL(hal_kernel_svm_dma_desc_destroy);

