/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2023-10-10
 */

#include "svm_msg_client.h"
#include "svm_define.h"
#include "devmm_channel.h"
#include "svm_rbtree.h"
#include "svm_dev_res_mng.h"
#include "svm_master_proc_mng.h"
#include "svm_phy_addr_blk_mng.h"
#include "svm_master_mem_create.h"
#include "svm_master_mem_share.h"

struct devmm_pid_list_node {
    struct rb_node node;
    int pid;
    u64 set_time;
    bool is_share[DEVMM_MAX_DEVICE_NUM];
    u32 permission;
};

struct devmm_pid_list_mng {
    struct rw_semaphore rw_sem;
    struct rb_root rbtree;
    u32 pid_cnt;
};

struct devmm_share_mem_info {
    int id;
    int share_id;
    int hostpid;
    u32 devid;

    u64 pg_num; /* for record p2p msg alloc size */
    u32 module_id;
    u32 side;
    u32 pg_type;
    u32 mem_type;
};

struct devmm_share_phy_addr_agent_blk {
    struct rb_node dev_res_mng_node;

    struct kref ref;
    int share_id;

    u32 devid;
    int id;

    u64 pg_num; /* for record p2p msg alloc size */
    u32 module_id;
    u32 side;
    u32 type;
    u32 pg_type;
    u32 mem_type;

    spinlock_t status_lock;
    u32 occupied_cnt;
    u32 status;

    int export_pid;
    struct devmm_pid_list_mng pid_list_mng;
};

#define SHARE_AGENT_BLK_STATUS_IDLE 0
#define SHARE_AGENT_BLK_STATUS_OCCUPIED 1
#define SHARE_AGENT_BLK_STATUS_RELEASING 2

static void devmm_share_agent_blk_put(struct devmm_share_phy_addr_agent_blk *blk);
static int devmm_share_mem_release(u32 share_devid, int share_id, u32 total_pg_num, u32 free_type);

static u64 rb_handle_of_pid_list_node(struct rb_node *node)
{
    struct devmm_pid_list_node *list_node = rb_entry(node, struct devmm_pid_list_node, node);
    return (u64)list_node->pid;
}

static int devmm_pid_list_insert(struct devmm_pid_list_mng *mng, int pid)
{
    u64 current_time = (u64)ktime_to_ns(ktime_get());
    struct devmm_pid_list_node *new_node = NULL;
    struct devmm_pid_list_node *exist_node = NULL;
    struct rb_node *node = NULL;
    u32 i;

    new_node = kvzalloc(sizeof(struct devmm_pid_list_node), GFP_KERNEL | __GFP_ACCOUNT);
    if (new_node == NULL) {
        devmm_drv_err("Alloc pid_list_node fail.\n");
        return -ENOMEM;
    }
    new_node->pid = pid;
    new_node->set_time = current_time;
    for (i = 0; i < DEVMM_MAX_DEVICE_NUM; i++) {
        new_node->is_share[i] = false;
    }

    /* same pid insert, should update set time. */
    down_write(&mng->rw_sem);
    if (mng->pid_cnt >= DEVMM_SHARE_MEM_MAX_PID_CNT) {
        devmm_drv_err("Pid_num is larger than 32768. (pid_num=%u; pid=%d)\n", mng->pid_cnt, pid);
        up_write(&mng->rw_sem);
        kvfree(new_node);
        return -EINVAL;
    }
    node = devmm_rb_search(&mng->rbtree, (u64)pid, rb_handle_of_pid_list_node);
    if (node != NULL) {
        exist_node = rb_entry(node, struct devmm_pid_list_node, node);
        exist_node->set_time = current_time;
        up_write(&mng->rw_sem);
        kvfree(new_node);
        devmm_drv_debug("Update pid set time. (pid=%d)\n", pid);
        return 0;
    }

    mng->pid_cnt++;
    (void)devmm_rb_insert(&mng->rbtree, &new_node->node, rb_handle_of_pid_list_node);
    up_write(&mng->rw_sem);
    return 0;
}

static int devmm_pid_set_share_status(struct devmm_pid_list_mng *mng, int pid, u32 devid, bool is_share)
{
    u64 start_time = devmm_get_tgid_start_time();
    struct devmm_pid_list_node *list_node = NULL;
    struct rb_node *node = NULL;

    down_write(&mng->rw_sem);
    node = devmm_rb_search(&mng->rbtree, (u64)pid, rb_handle_of_pid_list_node);
    if (node == NULL) {
        up_write(&mng->rw_sem);
        return -EACCES;
    }

    list_node = rb_entry(node, struct devmm_pid_list_node, node);
    if (is_share) {
        if (list_node->is_share[devid]) {
            up_write(&mng->rw_sem);
            devmm_drv_err("Can not import multi times. (pid=%d; devid=%u)\n", pid, devid);
            return -EEXIST;
        }

        if (list_node->set_time < start_time) {
            up_write(&mng->rw_sem);
            devmm_drv_err("Time check fail. (thread=%d; thread_start_time=%llu)\n", pid, start_time);
            return -EACCES;
        }
    }

    list_node->is_share[devid] = is_share;
    up_write(&mng->rw_sem);
    return 0;
}

static void devmm_pid_list_erase(struct devmm_pid_list_mng *mng, int pid)
{
    struct devmm_pid_list_node *list_node = NULL;
    struct rb_node *node = NULL;

    down_write(&mng->rw_sem);
    node = devmm_rb_search(&mng->rbtree, (u64)pid, rb_handle_of_pid_list_node);
    if (node != NULL) {
        mng->pid_cnt--;
        list_node = rb_entry(node, struct devmm_pid_list_node, node);
        (void)devmm_rb_erase(&mng->rbtree, node);
        kvfree(list_node);
    }
    up_write(&mng->rw_sem);
}

static void devmm_pid_list_release_func(struct rb_node *node)
{
    struct devmm_pid_list_node *list_node = rb_entry(node, struct devmm_pid_list_node, node);

    kvfree(list_node);
}

static void devmm_pid_list_erase_all(struct devmm_pid_list_mng *mng)
{
    down_write(&mng->rw_sem);
    devmm_rb_erase_all_node(&mng->rbtree, devmm_pid_list_release_func);
    up_write(&mng->rw_sem);
}

static void devmm_share_agent_blk_status_init(struct devmm_share_phy_addr_agent_blk *blk)
{
    spin_lock_init(&blk->status_lock);
    blk->occupied_cnt = 0;
    blk->status = SHARE_AGENT_BLK_STATUS_IDLE;
}

static void _devmm_share_agent_blk_occupied_cnt_inc(struct devmm_share_phy_addr_agent_blk *blk)
{
    blk->status = SHARE_AGENT_BLK_STATUS_OCCUPIED;
    blk->occupied_cnt++;
}

static int devmm_share_agent_blk_occupied_cnt_inc(struct devmm_share_phy_addr_agent_blk *blk)
{
    spin_lock(&blk->status_lock);
    if (blk->status == SHARE_AGENT_BLK_STATUS_RELEASING) {
        return -EBUSY;
    }
    _devmm_share_agent_blk_occupied_cnt_inc(blk);
    spin_unlock(&blk->status_lock);
    return 0;
}

static void devmm_share_agent_blk_occupied_cnt_dec(struct devmm_share_phy_addr_agent_blk *blk,
    bool *is_blk_no_occupied)
{
    spin_lock(&blk->status_lock);
    blk->occupied_cnt--;
    if (blk->occupied_cnt == 0) {
        blk->status = SHARE_AGENT_BLK_STATUS_RELEASING;
        *is_blk_no_occupied = true;
    }
    spin_unlock(&blk->status_lock);
}

static u64 rb_handle_of_share_id_map_node(struct rb_node *node)
{
    struct devmm_share_id_map_node *map_node = rb_entry(node, struct devmm_share_id_map_node, proc_node);
    return (u64)map_node->shid_map_node_info.id;
}

static struct devmm_share_id_map_node *devmm_erase_one_share_id_map_node(struct devmm_share_id_map_mng *mng,
    rb_erase_condition condition)
{
    struct rb_node *node = NULL;

    down_write(&mng->sem);
    node = devmm_rb_erase_one_node(&mng->rbtree, condition);
    up_write(&mng->sem);

    return ((node == NULL) ? NULL : rb_entry(node, struct devmm_share_id_map_node, proc_node));
}

void devmm_share_id_map_node_destroy_all(struct devmm_svm_process *svm_proc)
{
    struct devmm_svm_proc_master *master_data = svm_proc->priv_data;
    struct devmm_share_id_map_mng *mng = NULL;
    struct devmm_share_id_map_node *map_node = NULL;
    u32 stamp = (u32)jiffies;
    u32 devid;

    for (devid = 0; devid < DEVMM_MAX_DEVICE_NUM; devid++) {
        mng = &master_data->share_id_map_mng[devid];

        while (1) {
            map_node = devmm_erase_one_share_id_map_node(mng, NULL);
            if (map_node == NULL) {
                break;
            }

            (void)devmm_share_agent_blk_put_with_share_id(map_node->shid_map_node_info.share_devid,
                map_node->shid_map_node_info.share_id, map_node->hostpid, map_node->shid_map_node_info.devid, true);
            devmm_share_id_map_node_destroy(svm_proc, map_node->shid_map_node_info.devid, map_node);
            devmm_try_cond_resched(&stamp);
        }
    }
}

static void devmm_share_id_map_node_release(struct kref *kref)
{
    struct devmm_share_id_map_node *map_node = container_of(kref, struct devmm_share_id_map_node, ref);

    kvfree(map_node);
}

void devmm_share_id_map_node_put(struct devmm_share_id_map_node *map_node)
{
    kref_put(&map_node->ref, devmm_share_id_map_node_release);
}

struct devmm_share_id_map_node *devmm_share_id_map_node_get(struct devmm_svm_process *svm_proc, u32 devid, int id)
{
    struct devmm_svm_proc_master *master_data = svm_proc->priv_data;
    struct devmm_share_id_map_mng *mng = &master_data->share_id_map_mng[devid];
    struct devmm_share_id_map_node *map_node = NULL;
    struct rb_node *node = NULL;

    down_read(&mng->sem);
    node = devmm_rb_search(&mng->rbtree, (u64)id, rb_handle_of_share_id_map_node);
    if (node != NULL) {
        map_node = rb_entry(node, struct devmm_share_id_map_node, proc_node);
        kref_get(&map_node->ref);
    }
    up_read(&mng->sem);
    return map_node;
}

void devmm_share_id_map_node_destroy(struct devmm_svm_process *svm_proc,
    u32 devid, struct devmm_share_id_map_node *map_node)
{
    struct devmm_svm_proc_master *master_data = svm_proc->priv_data;
    struct devmm_share_id_map_mng *mng = &master_data->share_id_map_mng[devid];

    down_write(&mng->sem);
    devmm_rb_erase(&mng->rbtree, &map_node->proc_node);
    up_write(&mng->sem);

    kref_put(&map_node->ref, devmm_share_id_map_node_release);
}

static struct devmm_share_id_map_node *devmm_share_id_map_node_create(struct devmm_svm_process *svm_proc,
    struct devmm_shid_map_node_info *info, u32 blk_type)
{
    struct devmm_svm_proc_master *master_data = svm_proc->priv_data;
    struct devmm_share_id_map_mng *mng = &master_data->share_id_map_mng[info->devid];
    struct devmm_share_id_map_node *map_node = NULL;
    int ret;

    map_node = kvzalloc(sizeof(struct devmm_share_id_map_node), GFP_KERNEL | __GFP_ACCOUNT);
    if (map_node == NULL) {
        devmm_drv_err("Alloc devmm_share_id_map_node fail.\n");
        return NULL;
    }
    kref_init(&map_node->ref);
    map_node->shid_map_node_info = *info;
    map_node->blk_type = blk_type;
    map_node->hostpid = svm_proc->process_id.hostpid;

    down_write(&mng->sem);
    ret = devmm_rb_insert(&mng->rbtree, &map_node->proc_node, rb_handle_of_share_id_map_node);
    up_write(&mng->sem);
    if (ret != 0) {
        devmm_drv_err("Current proc export or import repeatly. (devid=%u; id=%d; blk_type=%u)\n",
            info->devid, info->id, blk_type);
        kvfree(map_node);
        map_node = NULL;
    }
    return map_node;
}

static u64 rb_handle_of_share_agent_blk_node(struct rb_node *node)
{
    struct devmm_share_phy_addr_agent_blk *blk = rb_entry(node, struct devmm_share_phy_addr_agent_blk,
        dev_res_mng_node);
    return (u64)blk->share_id;
}

static void devmm_share_agent_blk_release_func(struct rb_node *node)
{
    struct devmm_share_phy_addr_agent_blk *blk = rb_entry(node, struct devmm_share_phy_addr_agent_blk,
        dev_res_mng_node);
    kvfree(blk);
}

void devmm_share_agent_blk_destroy_all(struct devmm_share_phy_addr_agent_blk_mng *blk_mng)
{
    down_write(&blk_mng->rw_sem);
    devmm_rb_erase_all_node(&blk_mng->rbtree, devmm_share_agent_blk_release_func);
    up_write(&blk_mng->rw_sem);
}

static void devmm_share_agent_blk_release(struct kref *kref)
{
    struct devmm_share_phy_addr_agent_blk *blk = container_of(kref, struct devmm_share_phy_addr_agent_blk, ref);

    kvfree(blk);
}

static void devmm_share_agent_blk_destroy(struct devmm_share_phy_addr_agent_blk_mng *blk_mng,
    struct devmm_share_phy_addr_agent_blk *blk)
{
    down_write(&blk_mng->rw_sem);
    devmm_rb_erase(&blk_mng->rbtree, &blk->dev_res_mng_node);
    up_write(&blk_mng->rw_sem);

    kref_put(&blk->ref, devmm_share_agent_blk_release);
}

static void devmm_share_agent_blk_node_init(struct devmm_share_mem_info *info,
    struct devmm_share_phy_addr_agent_blk *blk)
{
    kref_init(&blk->ref);
    blk->id = info->id;
    blk->devid = info->devid;
    blk->pg_num = info->pg_num;
    blk->side = info->side;
    blk->module_id = info->module_id;
    blk->pg_type = info->pg_type;
    blk->mem_type = info->mem_type;
    devmm_share_agent_blk_status_init(blk);

    blk->share_id = info->share_id;

    blk->export_pid = devmm_get_current_pid();
    blk->pid_list_mng.rbtree = RB_ROOT;
    init_rwsem(&blk->pid_list_mng.rw_sem);
    blk->pid_list_mng.pid_cnt = 0;
}

static int devmm_share_agent_blk_create(struct devmm_share_mem_info *info)
{
    struct devmm_share_phy_addr_agent_blk *blk = NULL;
    struct devmm_dev_res_mng *dev_res_mng = NULL;
    struct svm_id_inst id_inst;
    int ret;

    blk = kvzalloc(sizeof(struct devmm_share_phy_addr_agent_blk), GFP_KERNEL | __GFP_ACCOUNT);
    if (blk == NULL) {
        devmm_drv_err("Alloc devmm_share_phy_addr_agent_blk fail.\n");
        return -ENOMEM;
    }
    devmm_share_agent_blk_node_init(info, blk);

    svm_id_inst_pack(&id_inst, blk->devid, 0);
    dev_res_mng = devmm_dev_res_mng_get(&id_inst);
    if (dev_res_mng == NULL) {
        devmm_drv_err("Get dev res mng fail. (devid=%u)\n", blk->devid);
        kvfree(blk);
        return -ENODEV;
    }

    _devmm_share_agent_blk_occupied_cnt_inc(blk);
    down_write(&dev_res_mng->share_agent_blk_mng.rw_sem);
    ret = devmm_rb_insert(&dev_res_mng->share_agent_blk_mng.rbtree, &blk->dev_res_mng_node,
        rb_handle_of_share_agent_blk_node);
    up_write(&dev_res_mng->share_agent_blk_mng.rw_sem);
    devmm_dev_res_mng_put(dev_res_mng);
    if (ret != 0) {
        devmm_drv_err("Share handle already exists. (devid=%u; share_id=%d)\n",
            blk->devid, blk->share_id);
        kvfree(blk);
    }
    return ret;
}

static struct devmm_share_phy_addr_agent_blk *devmm_share_agent_blk_get(u32 devid, int share_id)
{
    struct devmm_share_phy_addr_agent_blk *blk = NULL;
    struct devmm_dev_res_mng *dev_res_mng = NULL;
    struct rb_node *node = NULL;
    struct svm_id_inst id_inst;

    svm_id_inst_pack(&id_inst, devid, 0);
    dev_res_mng = devmm_dev_res_mng_get(&id_inst);
    if (dev_res_mng == NULL) {
        devmm_drv_err("Get dev res mng fail. (devid=%u)\n", devid);
        return NULL;
    }

    down_read(&dev_res_mng->share_agent_blk_mng.rw_sem);
    node = devmm_rb_search(&dev_res_mng->share_agent_blk_mng.rbtree, (u64)share_id,
        rb_handle_of_share_agent_blk_node);
    if (node == NULL) {
        devmm_drv_err("Share handle doesn't exist. (share_id=%u; devid=%u)\n", share_id, devid);
        goto get_from_dev_fail;
    }

    blk = rb_entry(node, struct devmm_share_phy_addr_agent_blk, dev_res_mng_node);
    kref_get(&blk->ref);

get_from_dev_fail:
    up_read(&dev_res_mng->share_agent_blk_mng.rw_sem);
    devmm_dev_res_mng_put(dev_res_mng);
    return blk;
}

static void devmm_share_agent_blk_put(struct devmm_share_phy_addr_agent_blk *blk)
{
    kref_put(&blk->ref, devmm_share_agent_blk_release);
}

int devmm_share_agent_blk_put_with_share_id(u32 share_devid, int share_id, int hostpid,
    u32 devid, bool need_pid_erase)
{
    struct devmm_share_phy_addr_agent_blk *blk = NULL;
    struct devmm_dev_res_mng *dev_res_mng = NULL;
    struct svm_id_inst id_inst;
    bool is_blk_no_occupied = false;

    devmm_drv_debug("Share agent blk release. (devid=%u; share_id=%d)\n", share_devid, share_id);
    svm_id_inst_pack(&id_inst, share_devid, 0);
    dev_res_mng = devmm_dev_res_mng_get(&id_inst);
    if (dev_res_mng == NULL) {
        devmm_drv_err("Get dev res mng fail. (devid=%u)\n", share_devid);
        return -ENXIO;
    }

    blk = devmm_share_agent_blk_get(share_devid, share_id);
    if (blk == NULL) {
        devmm_dev_res_mng_put(dev_res_mng);
        devmm_drv_err("Share handle doesn't exist. (share_id=%u; devid=%u)\n", share_id, share_devid);
        return -ENXIO;
    }

    (void)devmm_pid_set_share_status(&blk->pid_list_mng, hostpid, devid, false);
    if (need_pid_erase && (hostpid != blk->export_pid)) {
        /* only erase in process release */
        devmm_pid_list_erase(&blk->pid_list_mng, hostpid);
    }

    devmm_share_agent_blk_occupied_cnt_dec(blk, &is_blk_no_occupied);
    if (is_blk_no_occupied) {
        int ret = devmm_share_mem_release(blk->devid, blk->share_id, (u32)blk->pg_num, SVM_PYH_ADDR_BLK_NORMAL_FREE);
        if (ret != 0) {
            devmm_drv_err("Share mem release fail. (devid=%u; share_id=%d)\n", blk->devid, blk->share_id);
        }
        devmm_pid_list_erase_all(&blk->pid_list_mng);
        devmm_share_agent_blk_destroy(&dev_res_mng->share_agent_blk_mng, blk);
    }

    devmm_share_agent_blk_put(blk);
    devmm_dev_res_mng_put(dev_res_mng);
    return 0;
}

static int devmm_get_share_devid(struct devmm_svm_process *svm_proc, u32 share_logic_devid, u32 *share_devid)
{
    u32 share_vfid;

    if (share_logic_devid >= DEVMM_MAX_DEVICE_NUM) {
        devmm_drv_err("Invalid devid. (share_logic_devid=%u)\n", share_logic_devid);
        return -ENODEV;
    }
    return devmm_container_vir_to_phs_devid(share_logic_devid, share_devid, &share_vfid);
}

static int devmm_agent_mem_import(struct devmm_svm_process *svm_proc, struct devmm_devid *devids,
    struct devmm_mem_import_para *para, struct devmm_share_phy_addr_agent_blk *blk, int *id)
{
    struct devmm_chan_mem_import msg = {{{0}}};
    u64 total_pg_num, created_num, tmp_num;
    int ret;

    msg.head.msg_id = DEVMM_CHAN_MEM_IMPORT_H2D_ID;
    msg.head.process_id.hostpid = svm_proc->process_id.hostpid;
    msg.head.process_id.vfid = (u16)devids->vfid;
    msg.head.dev_id = (u16)devids->devid;
    msg.share_id = para->share_id;
    msg.host_did = blk->devid;
    msg.total_pg_num = (u32)blk->pg_num;
    msg.module_id = blk->module_id;
    msg.pg_type = blk->pg_type;
    msg.mem_type = blk->mem_type;

    total_pg_num = blk->pg_num;
    for (created_num = 0; created_num < total_pg_num; created_num += tmp_num) {
        tmp_num = min((u64)DEVMM_PAGE_NUM_PER_MSG, (total_pg_num - created_num));

        msg.to_create_pg_num = (u32)tmp_num;
        msg.is_create_to_new_blk = (created_num == 0) ? 1 : 0;
        ret = devmm_chan_msg_send(&msg, sizeof(struct devmm_chan_mem_import), sizeof(struct devmm_chan_mem_import));
        if (ret != 0) {
            devmm_drv_err("Import msg send failed. (ret=%d; hostpid=%d; devid=%u; id=%d; host_did=%u)\n",
                ret, svm_proc->process_id.hostpid, devids->devid, para->share_id, blk->devid);
            goto agent_mem_release;
        }
        *id = msg.id;
    }
    return 0;

agent_mem_release:
    if (created_num != 0) {
        (void)_devmm_agent_mem_release(svm_proc, devids, total_pg_num, *id, SVM_PYH_ADDR_BLK_FREE_NO_PAGE);
    }
    return ret;
}

static void devmm_shid_map_node_info_pack(struct devmm_shid_map_node_info *info, u32 devid, int id,
    u32 share_devid, int share_id)
{
    info->devid = devid;
    info->id = id;
    info->share_devid = share_devid;
    info->share_id = share_id;
}

int devmm_ioctl_mem_import(struct devmm_svm_process *svm_proc, struct devmm_ioctl_arg *arg)
{
    struct devmm_mem_import_para *para = &arg->data.mem_import_para;
    struct devmm_share_phy_addr_agent_blk *blk = NULL;
    struct devmm_share_id_map_node *map_node = NULL;
    struct devmm_shid_map_node_info info;
    bool is_blk_no_occupied = false;
    u32 share_devid;
    int ret, id = -1;

    devmm_drv_debug("Mem import enter. (devid=%u; share_logic_devid=%u; share_id=%d)\n",
        arg->head.devid, para->share_devid, para->share_id);
    ret = devmm_get_share_devid(svm_proc, para->share_devid, &share_devid);
    if (ret != 0) {
        return ret;
    }

    blk = devmm_share_agent_blk_get(share_devid, para->share_id);
    if (blk == NULL) {
        return -ENXIO;
    }

    if (blk->export_pid == devmm_get_current_pid()) {
        devmm_drv_err("Export process can not import. (pid=%d)\n", blk->export_pid);
        ret = -EACCES;
        goto share_agent_blk_put;
    }

    ret = devmm_share_agent_blk_occupied_cnt_inc(blk);
    if (ret != 0) {
        devmm_drv_err("Share handle is released. (share_id=%u; share_logic_devid=%u)\n",
            para->share_id, para->share_devid);
        goto share_agent_blk_put;
    }

    ret = devmm_agent_mem_import(svm_proc, &arg->head, para, blk, &id);
    if (ret != 0) {
        goto mem_import_fail;
    }

    devmm_shid_map_node_info_pack(&info, arg->head.devid, id, share_devid, blk->share_id);
    map_node = devmm_share_id_map_node_create(svm_proc, &info, SVM_PYH_ADDR_BLK_IMPORT_TYPE);
    if (map_node == NULL) {
        ret = -EINVAL;
        goto share_id_map_node_fail;
    }

    ret = devmm_pid_set_share_status(&blk->pid_list_mng, devmm_get_current_pid(), arg->head.devid, true);
    if (ret != 0) {
        devmm_drv_err("Current process not add in pid list. (pid=%d)\n", devmm_get_current_pid());
        goto set_share_status_fail;
    }

    para->id = id;
    para->module_id = blk->module_id;
    para->pg_num = blk->pg_num;
    para->side = blk->side;
    devmm_share_agent_blk_put(blk);
    devmm_drv_debug("Mem import success. (devid=%u; share_devid=%u; share_id=%d; id=%d)\n",
        arg->head.devid, share_devid, para->share_id, id);
    return 0;

set_share_status_fail:
    devmm_share_id_map_node_destroy(svm_proc, info.devid, map_node);
share_id_map_node_fail:
    (void)_devmm_agent_mem_release(svm_proc, &arg->head, blk->pg_num, id, SVM_PYH_ADDR_BLK_FREE_NO_PAGE);
mem_import_fail:
    devmm_share_agent_blk_occupied_cnt_dec(blk, &is_blk_no_occupied);
share_agent_blk_put:
    devmm_share_agent_blk_put(blk);
    return ret;
}

static void devmm_share_mem_info_pack(struct devmm_share_mem_info *info,
    struct devmm_devid *devids, struct devmm_mem_export_para *para, struct devmm_chan_mem_export *msg)
{
    info->hostpid = msg->head.process_id.hostpid;
    info->devid = devids->devid;
    info->id = para->id;
    info->share_id = msg->share_id;
    info->pg_num = msg->pg_num;
    info->side = msg->side;
    info->module_id = msg->module_id;
    info->pg_type = msg->pg_type;
    info->mem_type = msg->mem_type;
}

static int devmm_agent_mem_export(struct devmm_svm_process *svm_proc, struct devmm_devid *devids,
    struct devmm_mem_export_para *para, struct devmm_share_mem_info *info)
{
    struct devmm_chan_mem_export msg = {{{0}}};
    int ret;

    msg.head.msg_id = DEVMM_CHAN_MEM_EXPORT_H2D_ID;
    msg.head.process_id.hostpid = svm_proc->process_id.hostpid;
    msg.head.process_id.vfid = (u16)devids->vfid;
    msg.head.dev_id = (u16)devids->devid;
    msg.id = para->id;
    ret = devmm_chan_msg_send(&msg, sizeof(struct devmm_chan_mem_export), sizeof(struct devmm_chan_mem_export));
    if (ret != 0) {
        devmm_drv_err("Msg send failed. (ret=%d; hostpid=%d; devid=%u; id=%d)\n",
            ret, info->hostpid, info->devid, info->id);
        return ret;
    }

    devmm_share_mem_info_pack(info, devids, para, &msg);
    return 0;
}

int devmm_ioctl_mem_export(struct devmm_svm_process *svm_proc, struct devmm_ioctl_arg *arg)
{
    struct devmm_mem_export_para *para = &arg->data.mem_export_para;
    struct devmm_share_id_map_node *map_node = NULL;
    struct devmm_share_mem_info info = {0};
    struct devmm_shid_map_node_info node_info;
    int ret;

    devmm_drv_debug("Mem export enter. (logic_devid=%u; devid=%u; id=%d)\n",
        arg->head.logical_devid, arg->head.devid, para->id);
    ret = devmm_agent_mem_export(svm_proc, &arg->head, para, &info);
    if (ret != 0) {
        return ret;
    }

    devmm_shid_map_node_info_pack(&node_info, info.devid, info.id, info.devid, info.share_id);
    map_node = devmm_share_id_map_node_create(svm_proc, &node_info, SVM_PYH_ADDR_BLK_EXPORT_TYPE);
    if (map_node == NULL) {
#ifndef EMU_ST
        (void)devmm_share_mem_release(info.devid, info.share_id, (u32)info.pg_num,
            SVM_PYH_ADDR_BLK_FREE_NO_PAGE);
        return ret;
#endif
    }

    ret = devmm_share_agent_blk_create(&info);
    if (ret != 0) {
#ifndef EMU_ST
        devmm_share_id_map_node_destroy(svm_proc, info.devid, map_node);
        (void)devmm_share_mem_release(info.devid, info.share_id, (u32)info.pg_num,
            SVM_PYH_ADDR_BLK_FREE_NO_PAGE);
        return ret;
#endif
    }
    devmm_drv_debug("Mem export success. (hostpid=%d; devid=%u; id=%d; share_id=%d)\n",
        info.hostpid, info.devid, info.id, info.share_id);
    para->share_id = info.share_id;
    return 0;
}

static int devmm_share_mem_release(u32 share_devid, int share_id, u32 total_pg_num, u32 free_type)
{
    struct devmm_chan_mem_release msg = {{{0}}};

    msg.head.msg_id = DEVMM_CHAN_SHARE_MEM_RELEASE_H2D_ID;
    msg.head.dev_id = (u16)share_devid;
    msg.id = share_id;
    msg.free_type = free_type;
    msg.to_free_pg_num = total_pg_num;
    return devmm_agent_mem_release_public(&msg);
}

static int devmm_set_pid_para_check(struct devmm_mem_set_pid_para *para)
{
    if (para->pid_num == 0) {
        devmm_drv_err("Pid_num is zero.\n");
        return -EINVAL;
    }

    if (para->pid_num > DEVMM_SHARE_MEM_MAX_PID_CNT) {
        devmm_drv_err("Pid_num is invalid. (pid_num=%u)\n", para->pid_num);
        return -EINVAL;
    }

    if (para->pid_list == NULL) {
        devmm_drv_err("Pid list is NULL.\n");
        return -EINVAL;
    }
    return 0;
}

static int devmm_set_pids(struct devmm_share_phy_addr_agent_blk *blk, int *pid_list, u32 pid_num)
{
    u32 stamp = (u32)jiffies;
    u32 i;

    for (i = 0; i < pid_num; i++) {
        int ret;

        if (pid_list[i] == blk->export_pid) {
            ret = -EACCES;
        } else {
            ret = devmm_pid_list_insert(&blk->pid_list_mng, pid_list[i]);
            devmm_try_cond_resched(&stamp);
        }
        if (ret != 0) {
            /* cannot erase pid, will delete pid which is set.
               devmm_pid_list_erase_all will recycle resource. */
            devmm_drv_err("Set pid fail. (export_pid=%d; ret=%d; i=%u; pid=%d)\n",
                blk->export_pid, ret, i, pid_list[i]);
            return ret;
        }
    }
    return 0;
}

int devmm_ioctl_mem_set_pid(struct devmm_svm_process *svm_proc, struct devmm_ioctl_arg *arg)
{
    struct devmm_mem_set_pid_para *para = &arg->data.mem_set_pid_para;
    struct devmm_share_phy_addr_agent_blk *blk = NULL;
    int *pid_list = NULL;
    u64 size;
    int ret;

    ret = devmm_set_pid_para_check(para);
    if (ret != 0) {
        return ret;
    }

    size = para->pid_num * (u64)sizeof(int);
    pid_list = kvzalloc(size, GFP_KERNEL | __GFP_ACCOUNT);
    if (pid_list == NULL) {
        devmm_drv_err("Alloc pid_list fail.\n");
        return -ENOMEM;
    }

    if (copy_from_user(pid_list, (void __user *)para->pid_list, size) != 0) {
        devmm_drv_err("Copy_from_user fail. (size=%llu)\n", size);
        ret = -EFAULT;
        goto free_pid_list;
    }

    blk = devmm_share_agent_blk_get(arg->head.devid, para->share_id);
    if (blk == NULL) {
        ret = -EINVAL;
        goto free_pid_list;
    }

    if (blk->export_pid != devmm_get_current_pid()) {
        devmm_drv_err("Not export process, can not set pid. (export_pid=%d)\n", blk->export_pid);
        ret = -EACCES;
        goto blk_put;
    }
    ret = devmm_set_pids(blk, pid_list, para->pid_num);

blk_put:
    devmm_share_agent_blk_put(blk);
free_pid_list:
    kvfree(pid_list);
    return ret;
}
