/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2022-09-23
 */
#include <linux/errno.h>
#include <linux/kernel.h>
#include "securec.h"
#include "ts_agent_common.h"
#include "tsch/task_struct.h"
#include "tsch/mb_struct.h"
#include "ts_agent_log.h"
#include "ts_agent_dvpp.h"
#include "pcie_host/devdrv_pci.h"
#ifndef TS_AGENT_UT
#include <linux/io.h>
#include "trs_adapt.h"
#include "devdrv_interface.h"
#endif
#include "ts_agent_update_sqe.h"
#include "dbl/uda.h"

typedef int (*sqe_hook_proc_t)(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe);
static sqe_hook_proc_t g_sqe_proc_fn[TS_STARS_SQE_TYPE_END] = {NULL};
static ts_agent_dvpp_ops_t g_dvpp_ops = {NULL};
#if defined(CFG_SOC_PLATFORM_CLOUD_V2)
void __iomem *g_warning_bit_addr[TSAGENT_MAX_DEV_ID] = {NULL};
#endif

void tsagent_dvpp_register(ts_agent_dvpp_ops_t *ops)
{
    if (ops != NULL) {
        g_dvpp_ops.dvpp_sqe_update = ops->dvpp_sqe_update;
        ts_agent_info("dvpp reg successful.");
    }
}
#ifndef TS_AGENT_UT
EXPORT_SYMBOL(tsagent_dvpp_register);
#endif

void tsagent_dvpp_unregister(void)
{
    g_dvpp_ops.dvpp_sqe_update = NULL;
    ts_agent_info("dvpp unreg successful.");
}
#ifndef TS_AGENT_UT
EXPORT_SYMBOL(tsagent_dvpp_unregister);
#endif

static void sqe_error_dump(u32 devid, const ts_stars_sqe_t *sqe)
{
    size_t i;
    ts_agent_err("dump error sqe, dev_id=%u, stream_id=%u, task_id=%u, sqe_type=%u",
        devid, sqe->stream_id, sqe->task_id, sqe->type);
    for (i = 0U; i < (sizeof(ts_stars_sqe_t) / sizeof(u32)); i++) {
        ts_agent_err("sqe addr[0x%p] word[%zu]: 0x%08x.", &((u32 *)sqe)[i], i, ((u32 *)sqe)[i]);
    }
}

static int sqe_proc_dvpp(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    int ret;
    if (g_dvpp_ops.dvpp_sqe_update == NULL) {
        ts_agent_warn("dvpp ops is not register!");
        return EOK;
    }

    ret = g_dvpp_ops.dvpp_sqe_update(devid, tsid, pid, sqe);
    if (ret != EOK) {
        ts_agent_err("dvpp update is error, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, sqe_type=%u",
            ret, devid, sqe->stream_id, sqe->task_id, pid, sqe->type);
    }
    return ret;
}

static int sqe_proc_event(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    bool is_match;
    ts_stars_event_sqe_t *evt_sqe = (ts_stars_event_sqe_t *)sqe;

#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_EVENT, evt_sqe->event_id);
    if (is_match == false) {
        ts_agent_err("event sqe check failed, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, event_id=%u",
            is_match, devid, sqe->stream_id, sqe->task_id, pid, evt_sqe->event_id);
        return -EINVAL;
    }
#endif
    return EOK;
}

static int sqe_proc_notify(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    bool is_match;
    ts_stars_notify_sqe_t *nty_sqe = (ts_stars_notify_sqe_t *)sqe;

#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_NOTIFY, nty_sqe->notify_id);
    if (is_match == false) {
        ts_agent_err("notify sqe check failed, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, notify_id=%u",
            is_match, devid, sqe->stream_id, sqe->task_id, pid, nty_sqe->notify_id);
        return -EINVAL;
    }
#endif
    return EOK;
}

static int sqe_proc_cdqm(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    bool is_match;
    ts_stars_cdqm_sqe_t *cdq_sqe = (ts_stars_cdqm_sqe_t *)sqe;

#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_CDQ, cdq_sqe->cdq_id);
    if (is_match == false) {
        ts_agent_err("cdq sqe check failed, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, cdq_id=%u",
            is_match, devid, sqe->stream_id, sqe->task_id, pid, cdq_sqe->cdq_id);
        return -EINVAL;
    }
#endif
    return EOK;
}

#ifndef CFG_DEVICE_ENV
// vm vpc agent should check dma desc after converted, while pf will not get here
static int pcie_dma_desc_check(u32 devid, ts_stars_pciedma_sqe_t *sqe)
{
#if (!defined CFG_HOST_VIRTUAL_MACHINES) && (!defined CFG_DEVICE_ENV) && (!defined TS_AGENT_UT)
    int ret;
    struct devdrv_dma_desc_info check_dma = {
        .sq_dma_addr = ((u64)sqe->sq_addr_high << 32U) | (u64)sqe->sq_addr_low,
        .sq_size = (u64)sqe->sq_tail_ptr,
    };
    ret = devdrv_dma_sqcq_desc_check(devid, &check_dma);
    if (ret != 0) {
        ts_agent_err("pciedma desc check failed, ret=%d, devid=%u, stream_id=%u, task_id=%u, src=%#llx, dst=%#llx, "
            "len=%#llx, dma sq addr=%#llx, dma sq tail=%llu.", ret, devid, sqe->header.rt_stream_id,
            sqe->header.task_id, sqe->src, sqe->dst, sqe->length, check_dma.sq_dma_addr, check_dma.sq_size);
        return ret;
    }
#endif
    return EOK;
}
#endif

#if (defined CFG_SOC_PLATFORM_CLOUD_V2) && (!defined TS_AGENT_UT)
static int sqe_proc_dsa_vm_pciedma(u32 devid, u32 tsid, int pid, u32 sqid, ts_stars_pciedma_sqe_t *pcie_sqe)
{
#if (defined CFG_SOC_PLATFORM_CLOUD_V2) && (defined CFG_HOST_VIRTUAL_MACHINES)
    int ret;
    u32 passid;
    u32 phy_sq_id;

    ret = hal_kernel_trs_get_ssid(devid, tsid, pid, &passid);
    if (ret != 0) {
        ts_agent_err("get ssid failed, ret=%d, devid=%u, pid=%d, tsid=%u, sq_id=%u",
            ret, devid, pid, tsid, sqid);
        return -EINVAL;
    }
    pcie_sqe->is_converted = 1U;
    pcie_sqe->passid = passid;

    ts_agent_info("get ssid success passid =%u", passid);
    ret = trs_res_trans_v2p(devid, tsid, TRS_HW_SQ, sqid, &phy_sq_id);
    if (ret != 0) {
        ts_agent_err("trans v2p failed, ret=%d, devid=%u, tsid=%d, sq_id=%u",
            ret, devid, tsid, sqid);
        return -EINVAL;
    }
    ts_agent_info("physical sq_id=%u", phy_sq_id);
    pcie_sqe->dst = ((u64)phy_sq_id << 32U) + (pcie_sqe->dst & 0x00000000FFFFFFFFULL);
#endif
    return EOK;
}

static int sqe_proc_dsa_pm_pciedma(u32 devid, u32 tsid, int pid, u32 sqid, ts_stars_pciedma_sqe_t *pcie_sqe)
{
#if (defined CFG_SOC_PLATFORM_CLOUD_V2) && (!defined CFG_HOST_VIRTUAL_MACHINES)
    int ret;
    u32 passid;
    u32 pos;
    u64 cnt;
    u64 dsa_cp_size = 40ULL;
    struct trs_dma_desc_addr_info addr_info = {};
    struct trs_dma_desc dma = {};
    // if vm kernel, vm kernel has been convert to 1
    if (pcie_sqe->is_converted == 1U) {
        ts_agent_info("get passid from pcie sqe");
        passid = pcie_sqe->passid;
    } else if (pcie_sqe->is_converted == 0U) {
        ret = hal_kernel_trs_get_ssid(devid, tsid, pid, &passid);
        if (ret != 0) {
            ts_agent_err("get ssid failed, ret=%d, devid=%u, pid=%d, tsid=%u, sq_id=%u",
                ret, devid, pid, tsid, sqid);
            return -EINVAL;
        }
        ts_agent_info("get passid success, passid = %u", passid);
    }

    pos = (u32)(pcie_sqe->dst & 0x00000000FFFFFFFFULL);
    cnt = (u16)(pcie_sqe->length);
    if (cnt != dsa_cp_size) {
        ts_agent_err("dsa cp size is invalid, invalid_size=%llu, correct_size=%llu",
            cnt, dsa_cp_size);
        return -EINVAL;
    }

    addr_info.src_va = pcie_sqe->src;
    addr_info.passid = passid;
    addr_info.sqid = sqid;
    addr_info.sqeid = pos;
    addr_info.offset = 16U;
    addr_info.size = cnt;

    ret = hal_kernel_sqe_update_desc_create(devid, tsid, &addr_info, &dma);
    if (ret != 0) {
        ts_agent_err("pciedma addr create failed, ret=%d, devid=%u, pid=%d, sq_id=%u, passid=%u",
            ret, devid, pid, sqid, passid);
        return ret;
    }
    ts_agent_info("pciedma addr create success");

    pcie_sqe->sq_addr_low = (uintptr_t)dma.sq_addr & U32_MAX;
    pcie_sqe->sq_addr_high = (u32)((uintptr_t)dma.sq_addr >> 32U);
    pcie_sqe->sq_tail_ptr = (u16)dma.sq_tail;
    ts_agent_info("devid=%u, sq_id=%u, pos=%u, pid=%d, src=%#llx, cnt=%llu, dma sq addr=%#lx, "
        "dma sq tail=%u, type=%u, ssid=%u", devid, sqid, pos, pid, pcie_sqe->src, cnt,
        (uintptr_t)dma.sq_addr, dma.sq_tail, pcie_sqe->header.type, passid);
#endif
    return EOK;
}
#endif

#if (defined CFG_SOC_PLATFORM_CLOUD_V2) && (!defined TS_AGENT_UT)
static int sqe_proc_dsa_pciedma(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    bool is_match;
    int ret;
#ifdef CFG_DEVICE_ENV
    /* device rc has no pcie dma sqe task */
    ts_agent_err("task is invaild, devid=%u, stream_id=%u, task_id=%u, pid=%d, tsid=%u, type=%u",
        devid, sqe->stream_id, sqe->task_id, pid, tsid, sqe->type);
    return -EINVAL;
#endif
    ts_stars_pciedma_sqe_t *pcie_sqe = (ts_stars_pciedma_sqe_t *)sqe;
    u32 sqid = (u32)((pcie_sqe->dst & 0xFFFFFFFF00000000ULL) >> 32U);
    ts_agent_info("sq_id=%u", sqid);

#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_HW_SQ, sqid);
    if (is_match == false) {
        ts_agent_err("sq_id check failed, devid=%u, pid=%d, sq_id=%u",
            devid, pid, sqid);
        return -EINVAL;
    }
#endif

    ret = sqe_proc_dsa_vm_pciedma(devid, tsid, pid, sqid, pcie_sqe);
    if (ret != 0) {
        ts_agent_err("dsa update failed, ret=%d, sqe_type=%u, devid=%u, sq_id=%u, pid=%d.",
            ret, sqe->type, devid, sqid, pid);
        sqe_error_dump(devid, sqe);
        return  ret;
    }
    ts_agent_info("dsa pm_pciedma in");
    return sqe_proc_dsa_pm_pciedma(devid, tsid, pid, sqid, pcie_sqe);
}
#endif

static int sqe_proc_pciedma(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
#ifdef CFG_DEVICE_ENV
    /* device rc has no pcie dma sqe task */
    ts_agent_err("task is invalid, devid=%u, stream_id=%u, task_id=%u, pid=%d, tsid=%u, type=%u",
        devid, sqe->stream_id, sqe->task_id, pid, tsid, sqe->type);
    return -EINVAL;
#else
    int ret;
    ts_stars_pciedma_sqe_t *pcie_sqe = (ts_stars_pciedma_sqe_t *)sqe;
    struct svm_dma_desc_addr_info addr = {
        .src_va = pcie_sqe->src,
        .dst_va = pcie_sqe->dst,
        .size = pcie_sqe->length,
    };
    struct svm_dma_desc_handle handle = {
        .pid = pid,
        .key = (devid << 16U) | (u32)sqe->stream_id,
        .subkey = (u32)sqe->task_id,
    };
    struct svm_dma_desc dma = {};

#if (defined CFG_SOC_PLATFORM_CLOUD_V2) && (!defined TS_AGENT_UT)
    if (pcie_sqe->is_dsa_update == 1U) {
        return sqe_proc_dsa_pciedma(devid, tsid, pid, sqe);
    }
#endif

    if (pcie_sqe->is_converted == 0U) {
    #ifndef TS_AGENT_UT
        ret = hal_kernel_svm_dma_desc_create(&addr, &handle, &dma);
        if (ret != 0) {
            ts_agent_err("pciedma addr not delete, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, src=%#llx, "
                "dst=%#llx, len=%#llx.",
                ret, devid, sqe->stream_id, sqe->task_id, pid, pcie_sqe->src, pcie_sqe->dst, pcie_sqe->length);
            hal_kernel_svm_dma_desc_destroy(&handle);
            ret = hal_kernel_svm_dma_desc_create(&addr, &handle, &dma);
        }
        if (ret != 0) {
            ts_agent_err("pciedma addr convert failed, ret=%d, devid=%u, stream_id=%u, task_id=%u, pid=%d, src=%#llx, "
                "dst=%#llx, len=%#llx.",
                ret, devid, sqe->stream_id, sqe->task_id, pid, pcie_sqe->src, pcie_sqe->dst, pcie_sqe->length);
            return ret;
        }
    #endif
        pcie_sqe->sq_addr_low = (uintptr_t)dma.sq_addr & U32_MAX;
        pcie_sqe->sq_addr_high = (u32)((uintptr_t)dma.sq_addr >> 32U);
        pcie_sqe->sq_tail_ptr = (u16)dma.sq_tail;
        pcie_sqe->is_converted = 1UL;
        ts_agent_debug("devid=%u, stream_id=%u, task_id=%u, pid=%d, src=%#llx, dst=%#llx, len=%#llx, dma sq addr=%#lx, "
            "dma sq tail=%u.", devid, sqe->stream_id, sqe->task_id, pid, pcie_sqe->src, pcie_sqe->dst, pcie_sqe->length,
            (uintptr_t)dma.sq_addr, dma.sq_tail);
        return EOK;
    }
    return pcie_dma_desc_check(devid, pcie_sqe);
#endif
}

static int sqe_proc_sdma(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    ts_stars_memcpy_async_sqe_t *sdma_sqe = (ts_stars_memcpy_async_sqe_t *)sqe;
    if (sdma_sqe->ptr_mode == 0U) {
        if ((sdma_sqe->sssv == 1U) && (sdma_sqe->dssv == 1U)) {
            return EOK;
        }
        ts_agent_err("sdma sqe check failed, devid=%u, stream_id=%u, task_id=%u, pid=%d, sssv=%u, dssv=%u",
            devid, sqe->stream_id, sqe->task_id, pid, sdma_sqe->sssv, sdma_sqe->dssv);
        return -EINVAL;
    }

    return EOK;
}

static int sqe_proc_write_value_check_event_addr(u32 devid, u32 tsid, int pid, uint64_t reg_addr)
{
    bool is_match;
    u64 event_table_id;
    u64 event_num;
    u32 event_id;

    if ((reg_addr & TS_STARS_SINGLE_DEV_ADDR_MASK & (~(TS_STARS_EVENT_TABLE_MASK | TS_STARS_EVENT_MASK))) !=
        (TS_STARS_BASE_ADDR + TS_STARS_EVENT_BASE_ADDR)) {
        ts_agent_err("stars addr base check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    if (((reg_addr & TS_STARS_EVENT_MASK) % TS_STARS_EVENT_OFFSET) != 0U) {
        ts_agent_err("event offset check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    event_num = (reg_addr & TS_STARS_EVENT_MASK) / TS_STARS_EVENT_OFFSET;
    event_table_id = (reg_addr & TS_STARS_EVENT_TABLE_MASK) / TS_STARS_EVENT_TABLE_OFFSET;
    event_id = (u32)((event_table_id * TS_STARS_EVENT_NUM_OF_SINGLE_TABLE) + event_num);

#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, (int)TRS_EVENT, (int)event_id);
    if (is_match == false) {
        ts_agent_err("write value event check failed, ret=%d, devid=%u, pid=%d, event_id=%u",
            is_match, devid, pid, event_id);
        return -EINVAL;
    }
#endif

    return EOK;
}

static int sqe_proc_write_value_check_notify_addr_no_pcie(u32 devid, u32 tsid, int pid, uint64_t reg_addr)
{
    bool is_match;
    uint64_t notify_table_id;
    uint64_t notify_num;
    uint64_t notify_id;

    if ((reg_addr & TS_STARS_SINGLE_DEV_ADDR_MASK & (~(TS_STARS_NOTIFY_TABLE_MASK | TS_STARS_NOTIFY_MASK))) !=
        (TS_STARS_BASE_ADDR + TS_STARS_NOTIFY_BASE_ADDR)) {
        ts_agent_err("stars addr base check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    if (((reg_addr & TS_STARS_NOTIFY_MASK) % TS_STARS_NOTIFY_OFFSET) != 0ULL) {
        ts_agent_err("notify offset check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    notify_num = (reg_addr & TS_STARS_NOTIFY_MASK) / TS_STARS_NOTIFY_OFFSET;
    notify_table_id = (reg_addr & TS_STARS_NOTIFY_TABLE_MASK) / TS_STARS_NOTIFY_TABLE_OFFSET;
    notify_id = (notify_table_id * TS_STARS_NOTIFY_NUM_OF_SINGLE_TABLE) + notify_num;
#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_NOTIFY, (int)notify_id);
    if (is_match == false) {
        ts_agent_err("notifyid check failed,pid=%d,notify_id=%#llx,tsid=%u,addr=%#llx,devid=%u",
            pid, notify_id, tsid, reg_addr, devid);
        return -EINVAL;
    }
#endif

    ts_agent_debug("notify_id=%#llx check success!pid=%d,devid=%u", notify_id, pid, devid);
    return EOK;
}

static int sqe_proc_write_value_check_notify_addr_pcie(u32 devid, u32 tsid, int pid, uint64_t reg_addr)
{
    bool is_match;
    uint32_t notify_table_id;
    uint32_t notify_num;
    uint64_t notify_id;

    if ((reg_addr & TS_STARS_PCIE_BASE_MASK & (~(TS_STARS_NOTIFY_TABLE_MASK | TS_STARS_NOTIFY_MASK))) !=
        (TS_STARS_PCIE_BASE_ADDR + TS_STARS_NOTIFY_BASE_ADDR)) {
        ts_agent_err("notify pcie addr base check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    if (((reg_addr & TS_STARS_NOTIFY_MASK) % TS_STARS_NOTIFY_OFFSET) != 0ULL) {
        ts_agent_err("notify offset check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    notify_num = (uint32_t)((reg_addr & TS_STARS_NOTIFY_MASK) / TS_STARS_NOTIFY_OFFSET);
    notify_table_id = (uint32_t)((reg_addr & TS_STARS_NOTIFY_TABLE_MASK) / TS_STARS_NOTIFY_TABLE_OFFSET);
    notify_id = (notify_table_id * TS_STARS_NOTIFY_NUM_OF_SINGLE_TABLE) + notify_num;
#ifndef TS_AGENT_UT
    is_match = trs_is_proc_has_res(devid, tsid, pid, TRS_NOTIFY, (int)notify_id);
    if (is_match == false) {
        ts_agent_err("notifyid check failed,pid=%d,notify_id=%#llx,tsid=%u,addr=%#llx,devid=%u",
            pid, notify_id, tsid, reg_addr, devid);
        return -EINVAL;
    }
#endif

    ts_agent_debug("notify_id=%#llx check success!pid=%d,devid=%u", notify_id, pid, devid);
    return EOK;
}

static int sqe_proc_write_value_check_rdma_addr(u32 devid, u32 tsid, int pid, uint64_t reg_addr)
{
    if ((reg_addr & TS_STARS_SINGLE_DEV_ADDR_MASK) != (TS_ROCEE_BASE_ADDR + TS_ROCEE_VF_DB_CFG0_REG)) {
        ts_agent_err("rdma addr base check failed, devid=%u, pid=%d, addr=%#llx", devid, pid, reg_addr);
        return -EINVAL;
    }

    return EOK;
}

static int sqe_proc_write_value(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    int ret = EINVAL;
    uint64_t reg_addr;
    uint32_t remote_dev_id;

    ts_stars_write_value_sqe_t *wv_sqe = (ts_stars_write_value_sqe_t *)sqe;
    remote_dev_id = wv_sqe->res3;
    if (wv_sqe->ptr_mode == 1U) {
        return EOK;
    }

    if (wv_sqe->awsize > 5U) { // 5: awsize max value
        ts_agent_err("awsize check failed, awsize=%u", wv_sqe->awsize);
        goto ERR_PROC;
    }

    if (!IS_ALIGNED((wv_sqe->write_addr_low), (1U << wv_sqe->awsize))) {
        ts_agent_err("addr aligned check failed, awsize=%u, write_addr_low=%#x",
            wv_sqe->awsize, wv_sqe->write_addr_low);
        goto ERR_PROC;
    }

    // 1: virtual address
    if (wv_sqe->va == 1U) {
        return EOK;
    }

    if (wv_sqe->sub_type >= TS_STARS_WRITE_VALUE_SUB_TYPE_MAX) {
        ts_agent_err("sub type check failed, sub_type=%u", wv_sqe->sub_type);
        goto ERR_PROC;
    }

    reg_addr = ((uint64_t)wv_sqe->write_addr_high << 32U) | wv_sqe->write_addr_low;
    if (wv_sqe->sub_type == TS_STARS_WRITE_VALUE_SUB_TYPE_EVENT_RESET) {
        ret = sqe_proc_write_value_check_event_addr(devid, tsid, pid, reg_addr);
    } else if (wv_sqe->sub_type == TS_STARS_WRITE_VALUE_SUB_TYPE_NOTIFY_RECORD_IPC_NO_PCIE) {
        ret = sqe_proc_write_value_check_notify_addr_no_pcie(remote_dev_id, tsid, pid, reg_addr);
    } else if (wv_sqe->sub_type == TS_STARS_WRITE_VALUE_SUB_TYPE_NOTIFY_RECORD_IPC_PCIE) {
        ret = sqe_proc_write_value_check_notify_addr_pcie(remote_dev_id, tsid, pid, reg_addr);
    } else if (wv_sqe->sub_type == TS_STARS_WRITE_VALUE_SUB_TYPE_RDMA_DB_SEND) {
        ret = sqe_proc_write_value_check_rdma_addr(devid, tsid, pid, reg_addr);
    } else {
        // reserve
    }

    if (ret == EOK) {
        return EOK;
    }

ERR_PROC:
    ts_agent_err("write value check failed, devid=%u, stream_id=%u, task_id=%u, pid=%d, addr_h=%#x, addr_l=%#x, "
        "sub_type=%u", devid, sqe->stream_id, sqe->task_id, pid, wv_sqe->write_addr_high, wv_sqe->write_addr_low,
        wv_sqe->sub_type);
    return -EINVAL;
}

static int cmo_sqe_check(u32 devid, u32 tsid, int pid, int cmo_id)
{
#ifndef TS_AGENT_UT
    bool is_match = trs_is_proc_has_res(devid, tsid, pid, (int)TRS_CMO, (int)cmo_id);
    if (!is_match) {
        ts_agent_err("CMO sqe check failed, devid = %u, tsid = %u, pid = %d, cmo_id = %u", devid, tsid, pid, cmo_id);
        return -EINVAL;
    }
#endif
    return EOK;
}

static int sqe_proc_barrier(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    ts_stars_barrier_sqe_t *barrier_sqe = (ts_stars_barrier_sqe_t *)sqe;
    uint32_t cmo_bitmap = (uint32_t)(barrier_sqe->cmo_bitmap);
    uint32_t index;
    for (index = 0U; index < MAX_CMO_INFO_NUM; index++) { // barrier task contains 6 cmoinfo
        if (((cmo_bitmap >> index) & 1U) != 0U) { // get the index bit of com_bitmap
            int res = cmo_sqe_check(devid, tsid, pid, barrier_sqe->cmo_info[index].cmo_id);
            if (res == -EINVAL) {
                return -EINVAL;
            }
        }
    }
    return EOK;
}

static int sqe_proc_cmo(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    ts_stars_cmo_sqe_t *cmo_sqe = (ts_stars_cmo_sqe_t *)sqe;
    if (cmo_sqe->cmo_type != 0U) { // 0 is barrier
        return cmo_sqe_check(devid, tsid, pid, cmo_sqe->cmo_id);
    }
    return sqe_proc_barrier(devid, tsid, pid, sqe);
}

static int sqe_proc_ffts(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    ts_ffts_plus_sqe_t *ffts_sqe = (ts_ffts_plus_sqe_t *)sqe;

    // CMO SQE reuses FFTS SQE, bit3 of word2 identifies whether task type is CMO.
    if (ffts_sqe->cmo == 0x1U) {
        return sqe_proc_cmo(devid, tsid, pid, sqe);
    }

    if (ffts_sqe->ffts_type != TS_FFTS_TYPE_FFTS_PLUS) {
        return -EOK;
    }

    if (!IS_ALIGNED((ffts_sqe->context_address_base_l), 128U)) { // 128 aligned
        ts_agent_err("addr aligned check failed, devid=%u, stream_id=%u, task_id=%u, pid=%d, address_base_l=%#x",
            devid, sqe->stream_id, sqe->task_id, pid, ffts_sqe->context_address_base_l);
        return -EINVAL;
    }
#ifndef TS_AGENT_UT
    if (uda_is_phy_dev(devid) == false) {
        ts_agent_debug("sqe pid update success, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            devid, sqe->stream_id, sqe->task_id, pid);
        ffts_sqe->pid = (u32)pid;
    }
#endif
    return EOK;
}

static int sqe_proc_topic(u32 devid, u32 tsid, int pid, ts_stars_sqe_t *sqe)
{
    ts_stars_aicpu_sqe_t *topic_sqe = (ts_stars_aicpu_sqe_t *)sqe;

    #ifdef CFG_SOC_PLATFORM_MINIV3_EP
    if ((topic_sqe->topic_type == TOPIC_TYPE_HOST_AICPU_ONLY) ||
        (topic_sqe->topic_type == TOPIC_TYPE_HOST_AICPU_FIRST)) {
        ts_agent_err("Chip type not support, return error, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            topic_sqe->topic_type, devid, sqe->stream_id, sqe->task_id, pid);
        sqe_error_dump(devid, sqe);
        topic_sqe->type = TS_STARS_SQE_TYPE_INVALID; /* set invalid flag */
        /* 1911 ep do not support host aicpu topic message, ts_agent will still sends this sqe to stars,
           when stars finds invalid flag, it will report error info to runtime by cqe.
        */
        return EOK;
    }
    #endif

    #ifdef CFG_SOC_PLATFORM_MINIV3_RC
    if ((topic_sqe->topic_type == TOPIC_TYPE_HOST_AICPU_ONLY) ||
        (topic_sqe->topic_type == TOPIC_TYPE_HOST_AICPU_FIRST) ||
        (topic_sqe->topic_type == TOPIC_TYPE_HOST_CTRL_CPU)) {
        ts_agent_err("Chip type not support, return error, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            topic_sqe->topic_type, devid, sqe->stream_id, sqe->task_id, pid);
        sqe_error_dump(devid, sqe);
        topic_sqe->type = TS_STARS_SQE_TYPE_INVALID; /* set invalid flag */
        /* 1911 rc do not support host aicpu and host ctrl topic message, ts_agent will still sends this sqe to stars,
           when stars finds invalid flag, it will report error info to runtime by cqe.
        */
        return EOK;
    }
    #endif

    if (uda_is_phy_dev(devid) == false) {
        ts_stars_callback_sqe_t *callback_sqe = (ts_stars_callback_sqe_t *)sqe;

        if (callback_sqe->topic_id == 26U) {
            callback_sqe->res5 = (u32)pid;
            ts_agent_debug("sqe pid update success, topic_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
                callback_sqe->topic_type, devid, sqe->stream_id, sqe->task_id, pid);
        } else if (topic_sqe->topic_type <= TOPIC_TYPE_HOST_AICPU_FIRST) {
            topic_sqe->p_l2ctrl_low = (u32)pid;
            ts_agent_debug("sqe pid update success, topic_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
                topic_sqe->topic_type, devid, sqe->stream_id, sqe->task_id, pid);
        } else {
            // reserve
        }
    }

    return EOK;
}

int tsagent_sqe_update(u32 devid, u32 tsid, int pid, void *sqe_buf)
{
    int ret = EOK;
    ts_stars_sqe_t *sqe = (ts_stars_sqe_t *)sqe_buf;
    sqe_hook_proc_t proc_fn = NULL;

    if (sqe->type >= TS_STARS_SQE_TYPE_END) {
        ts_agent_err("sqe type is invalid, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            sqe->type, devid, sqe->stream_id, sqe->task_id, pid);
        sqe_error_dump(devid, sqe);
        sqe->type = TS_STARS_SQE_TYPE_INVALID; /* set invalid flag */
        /* if sqe is invalid, ts_agent will still sends this sqe to stars,
           when stars finds invalid flag, it will report error info to runtime by cqe.
        */
        return EOK;
    }

#ifndef TS_AGENT_UT
    if ((sqe->kernel_credit == 0xFFU) &&
        (sqe->type != (uint8_t)TS_STARS_SQE_TYPE_EVENT_WAIT) &&
        (sqe->type != (uint8_t)TS_STARS_SQE_TYPE_NOTIFY_WAIT)) {
        ts_agent_err("kernel credit can't be 0xFF, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            sqe->type, devid, sqe->stream_id, sqe->task_id, pid);
        sqe_error_dump(devid, sqe);
        sqe->type = TS_STARS_SQE_TYPE_INVALID;
        return -EINVAL;
    }
#endif

    proc_fn = g_sqe_proc_fn[sqe->type];
    if (proc_fn == NULL) {
        ts_agent_debug("sqe no need update, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            sqe->type, devid, sqe->stream_id, sqe->task_id, pid);
        return EOK;
    }
    ret = proc_fn(devid, tsid, pid, sqe);
    if (ret != 0) {
        ts_agent_err("sqe update failed, ret=%d, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
            ret, sqe->type, devid, sqe->stream_id, sqe->task_id, pid);
        sqe_error_dump(devid, sqe);
        sqe->type = TS_STARS_SQE_TYPE_INVALID; /* set invalid flag */
        /* if sqe is invalid, ts_agent will still sends this sqe to stars,
           when stars finds invalid flag, it will report error info to runtime by cqe.
        */
        return EOK;
    }
    ts_agent_debug("sqe update success, sqe_type=%u, devid=%u, stream_id=%u, task_id=%u, pid=%d.",
        sqe->type, devid, sqe->stream_id, sqe->task_id, pid);
    return EOK;
}

void init_task_convert_func(void)
{
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_EVENT_RECORD] = sqe_proc_event;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_NOTIFY_RECORD] = sqe_proc_notify;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_CDQM] = sqe_proc_cdqm;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_VPC] = sqe_proc_dvpp;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_JPEGE] = sqe_proc_dvpp;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_JPEGD] = sqe_proc_dvpp;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_PCIE_DMA] = sqe_proc_pciedma;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_WRITE_VALUE] = sqe_proc_write_value;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_SDMA] = sqe_proc_sdma;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_FFTS] = sqe_proc_ffts;
    g_sqe_proc_fn[TS_STARS_SQE_TYPE_AICPU] = sqe_proc_topic;
}

#if defined(CFG_SOC_PLATFORM_CLOUD_V2)
static void tsagent_set_stream_warning(u32 devid, u16 stream_id)
{
    u32 bit_offset = stream_id % 32U;  /* 32U: Bits/unit */
    u32 reg_offset = stream_id / 32U;  /* 32U: Bits/unit */
    u32 unit = TSAGENT_WARNING_BIT_UNIT;
    u32 value = 0U;
    u32 read_value = 0U;

    if (stream_id >= TS_AGENT_MAX_SQ_NUM) {
        ts_agent_err("invalid stream, stream_id=%u.", stream_id);
        return;
    }

    if (devid >= TSAGENT_MAX_DEV_ID) {
        ts_agent_err("device id check failed, devid=%u.", devid);
        return;
    }
#ifndef TS_AGENT_UT
    if (g_warning_bit_addr[devid] != NULL) {
        /* read sram addr + offset */
        memcpy_fromio_pcie(&value, (g_warning_bit_addr[devid] + reg_offset * unit), unit);
        rmb();

        value |= 1U << bit_offset;
        /* write value to addr */
        memcpy_toio_pcie(g_warning_bit_addr[devid] + reg_offset * unit, &value, unit);
        wmb();

        /* check value is write sucess */
        memcpy_fromio_pcie(&read_value, (g_warning_bit_addr[devid] + reg_offset * unit), unit);
        rmb();
        ts_agent_debug("set_warning_bit, stream_id=%u, writeValue=0x%x, readValue=0x%x.", stream_id, value, read_value);
    } else {
        ts_agent_err("g_warning_bit_addr[%u] is null.", devid);
    }
#endif
    return;
}

int tsagent_device_init(u32 devid, u32 tsid, struct trs_sqcq_agent_para *para)
{
    u64 ts_sram_rsv;
    ts_agent_info("tsagent device init, devid=%u, tsid=%u, phy_addr=0x%llx, size=0x%llx.", devid, tsid,
        para->rsv_phy_addr, (u64)(para->rsv_size));
    if (devid >= TSAGENT_MAX_DEV_ID) {
        ts_agent_warn("device id check failed, devid=%u.", devid);
        return EOK;
    }
    ts_sram_rsv = para->rsv_phy_addr + TSAGENT_WARNING_BIT_SRAM_OFFSET;
#ifndef TS_AGENT_UT
    g_warning_bit_addr[devid] = ioremap(ts_sram_rsv, TSAGENT_WARNING_BIT_SIZE);
    if (g_warning_bit_addr[devid] == NULL) {
        ts_agent_err("ioremap sram fail, sram addr=0x%llx, size=%u.", ts_sram_rsv, TSAGENT_WARNING_BIT_SIZE);
        return -ENOMEM;
    }
#endif
    ts_agent_info("tsagent device init, ioremap_addr=0x%llx", (u64)g_warning_bit_addr[devid]);
    return EOK;
}

int tsagent_device_uninit(u32 devid, u32 tsid)
{
    if (devid >= TSAGENT_MAX_DEV_ID) {
        ts_agent_err("device id check failed, devid=%u.", devid);
        return EOK;
    }
#ifndef TS_AGENT_UT
    if (g_warning_bit_addr[devid] != NULL) {
        iounmap(g_warning_bit_addr[devid]);
        g_warning_bit_addr[devid] = NULL;
    }
#endif
    ts_agent_info("tsagent device uninit success, devid=%u.", devid);
    return EOK;
}
#endif

#if defined(CFG_SOC_PLATFORM_CLOUD_V2) || defined(CFG_SOC_PLATFORM_MINIV3_EP)
static bool is_cqe_status_syscnt(ts_stars_cqe_t *cqe)
{
    if ((cqe->evt || cqe->place_hold) && (cqe->error_bit == 0U)) {
        return true;
    }
    return false;
}

static void cqe_set_drop_flag(ts_stars_cqe_t *cqe)
{
    if (is_cqe_status_syscnt(cqe)) {
        /* syscnt cqe need to always dispatch to logic cq */
        return;
    }

    if (cqe->error_bit) {
        /* no drop, runtime need to proc error cqe */
        cqe->drop_flag = 0U;
        return;
    }

    /* dvpp or soft dvpp need check cqe in runtime */
    if ((cqe->sqe_type == TS_STARS_SQE_TYPE_VPC) || (cqe->sqe_type == TS_STARS_SQE_TYPE_JPEGE)
        || (cqe->sqe_type == TS_STARS_SQE_TYPE_JPEGD) || (cqe->sqe_type == TS_STARS_SQE_TYPE_AICPU)) {
        cqe->drop_flag = 0U;
        return;
    }

    if (cqe->warn || (cqe->sqe_type == TS_STARS_SQE_TYPE_PCIE_DMA)) {
        /* cqe has been processed in ts_agent, no need to send to runtime */
        cqe->drop_flag = 1U;
        return;
    }
    cqe->drop_flag = 0U;
    return;
}

int tsagent_cqe_update(u32 devid, u32 tsid, int pid, void *cqe)
{
    ts_stars_cqe_t *stars_cqe = (ts_stars_cqe_t *)cqe;
    cqe_set_drop_flag(stars_cqe);

#ifndef CFG_SOC_PLATFORM_MINIV3_EP
    if (stars_cqe->warn != 0U) {
        ts_agent_debug("stars warning happened, devid=%u, pid=%d, stream_id=%u, task_id=%u, sqid=%u.", devid, pid,
            stars_cqe->stream_id, stars_cqe->task_id, stars_cqe->sq_id);
        tsagent_set_stream_warning(devid, stars_cqe->stream_id);
    }
#endif
#if defined(CFG_SOC_PLATFORM_CLOUD_V2)
    ts_agent_debug("cqe debug, devid=%u, pid=%d, stream_id=%u, task_id=%u, sqid=%u, sqetype=%u,sq_head=%u.", devid,
        pid, stars_cqe->stream_id, stars_cqe->task_id, stars_cqe->sq_id, stars_cqe->sqe_type, stars_cqe->sq_head);
#endif

    if (stars_cqe->place_hold || stars_cqe->evt) {
        return EOK;
    }

    if (stars_cqe->sqe_type == TS_STARS_SQE_TYPE_PCIE_DMA) {
        struct svm_dma_desc_handle handle = {
            .pid = pid,
            .key = (devid << 16U) | (u32)stars_cqe->stream_id,
            .subkey = (u32)stars_cqe->task_id,
        };
#ifndef TS_AGENT_UT
        hal_kernel_svm_dma_desc_destroy(&handle);
#endif
        ts_agent_debug("destroy dma desc, devid=%u, pid=%d, stream_id=%u, task_id=%u, sqid=%u.", devid, pid,
            stars_cqe->stream_id, stars_cqe->task_id, stars_cqe->sq_id);
    }
    return EOK;
}

int tsagent_mailbox_update(u32 devid, u32 tsid, int pid, void *data, u32 size)
{
    ts_mailbox_t *mb = (ts_mailbox_t *)data;
    ts_agent_debug("mb_valid=%u, mb_cmd_type=%u", mb->valid, mb->cmd_type);
    if ((mb->valid == TS_DRV_MAILBOX_VALID_VALUE) && (mb->cmd_type == RELEASE_TASK_CMD_SQCQ)) {
        u32 stream_id = mb->u.cmd_sqcq_info.info[0];
        struct svm_dma_desc_handle handle = {
            .pid = pid,
            .key = (devid << 16U) | stream_id,
            .subkey = U32_MAX,
        };
#if (!defined TS_AGENT_UT) && (defined CFG_HOST_VIRTUAL_MACHINES)
        ts_agent_info("vm destroy pcie dma handle devid=%u, tsid=%u, pid=%d, sqid=%hu",
            devid, tsid, pid, mb->u.cmd_sqcq_info.sq_idx);
        hal_kernel_svm_dma_desc_destroy(&handle);
#endif
#if (!defined TS_AGENT_UT) && (defined CFG_SOC_PLATFORM_CLOUD_V2) && (!defined CFG_HOST_VIRTUAL_MACHINES)
        ts_agent_info("phy destroy pcie dma handle devid=%u, tsid=%u, pid=%d, sqid=%hu",
            devid, tsid, pid, mb->u.cmd_sqcq_info.sq_idx);
        hal_kernel_sqe_update_desc_destroy(devid, tsid, mb->u.cmd_sqcq_info.sq_idx);
#endif
        ts_agent_debug("try to destroy stream all dma desc, devid=%u, pid=%d, stream_id=%u, sqid=%u, key=%u",
            devid, pid, stream_id, mb->u.cmd_sqcq_info.sq_idx, handle.key);
    }
    return EOK;
}
#endif

