/**
 * Copyright 2019-2021 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <mutex>
#include "taskdown_common.h"
#include "cce/kernel_params.hpp"
#include "cce/cce_opiddef.hpp"
#include "comm_log.h"

using namespace std;

namespace fe {
struct tagKernelOp
{
  uint32_t     opId;
  uint32_t     kernelFuncId;
  ccKernelType kernelType;
  bool         isFlowtable;
  bool         available;
  uint32_t     opIndex;
  bool         isRcSet;
  std::map <uint64_t, uint32_t>  tensorRc;
};

using kKernelOp = struct tagKernelOp;
using kernelOpMap_t = std::map<rtStream_t, kKernelOp *>;
using streaml2Map_t = std::map<rtStream_t, fusion::TaskL2InfoMap_t>;

kernelOpMap_t kernelOpMap_;
streaml2Map_t streaml2Map_;

std::mutex taskdownMtx_;

ccStatus_t GetStream(ccHandle_t handle, rtStream_t *streamId)
{
  if (handle == nullptr)
  {
    CM_LOGE("handle is NULL!");
    return CC_STATUS_NOT_INITIALIZED;
  }
  *streamId = handle->streamId;
  return CC_STATUS_SUCCESS;
};

ccStatus_t ccClearOpMap(ccHandle_t handle)
{
  rtStream_t streamId;

  ccStatus_t ret = GetStream(handle, &streamId);
  if (ret != CC_STATUS_SUCCESS) {
    CM_LOGE("ccClearOpMap Fail.");
    return CC_STATUS_SUCCESS;
  }

  taskdownMtx_.lock();
  kernelOpMap_t::iterator it = kernelOpMap_.find(streamId);
  if (it == kernelOpMap_.end()) {
    taskdownMtx_.unlock();
    return CC_STATUS_SUCCESS;
  }

  kKernelOp *kernelOpContext = it->second;
  if (kernelOpContext != nullptr) {
    delete kernelOpContext;
    kernelOpContext = nullptr;
  }
  kernelOpMap_.erase(streamId);

  taskdownMtx_.unlock();
  return CC_STATUS_SUCCESS;
}


/*lint -e429*/
ccStatus_t ccSetKernelOpMap(ccHandle_t handle)
{
  rtStream_t streamId;
  ccStatus_t ret = GetStream(handle, &streamId);
  if (ret != CC_STATUS_SUCCESS) {
    CM_LOGE("ccSetKernelOpMap Fail.");
    return ret;
  }

  taskdownMtx_.lock();
  kernelOpMap_t::iterator it = kernelOpMap_.find(streamId);
  if (it != kernelOpMap_.end()) {
    kKernelOp *kernelOpCnt = it->second;
    kernelOpCnt->available = false;
    taskdownMtx_.unlock();
    return CC_STATUS_SUCCESS;
  }

  auto kernelOpContext = new (std::nothrow) kKernelOp;
  CHECK_KERNEL_OP_CONTEXT_NULL(kernelOpContext);

  kernelOpContext->available = false;
  kernelOpContext->opId = 0;
  kernelOpContext->opIndex = 0;
  kernelOpContext->isRcSet = 0;
  kernelOpContext->tensorRc.clear();

  kernelOpMap_[streamId] = kernelOpContext;
  CM_LOGI("ccSetKernelOpMap streamId is %lu.", (uint64_t)(uintptr_t)streamId);

  taskdownMtx_.unlock();
  return CC_STATUS_SUCCESS;
}
/*lint +e429*/
ccStatus_t ccSetKernelContextSoftmax(kKernelOp *kernelOpContext, AttrList &attrList)
{
  kernelOpContext->kernelFuncId = (uint32_t)cce::CC_SOFTMAX_MODE_RESERVED;
  uint32_t attrLen = 0;
  const void *attrValue = nullptr;
  attrList.Get(SOFTMAX_KERNEL_PARAM, attrLen, attrValue);
  CHECK_NULL_WITH_RET(attrValue, CC_STATUS_BAD_PARAM);
  JUDGE_IF_ZERO_RETURN_VALUE(attrLen, CC_STATUS_BAD_PARAM);
  cce::kernel::kSoftmaxParam_t *param = (cce::kernel::kSoftmaxParam_t *)attrValue;
  (void) attrList.Get(SOFTMAX_KERNEL_TENSOR_INPUT, attrLen, attrValue);
  CHECK_NULL_WITH_RET(attrValue, CC_STATUS_BAD_PARAM);
  JUDGE_IF_ZERO_RETURN_VALUE(attrLen, CC_STATUS_BAD_PARAM);
  cce::kernel::kTensor_t *inputTensor = (cce::kernel::kTensor_t*)attrValue;

  if ((inputTensor->format == cce::kernel::KERNEL_TENSOR_FORMAT_ND) &&
      (param->softmaxAxis + 1 == inputTensor->dimCnt)) {
    kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_SOFTMAX_LOWEST;
  } else {
    kernelOpContext->kernelFuncId = (uint32_t)(param->softmaxAxis);
  }
  return CC_STATUS_SUCCESS;
}

ccStatus_t ccSetKernelContextNeighbor(kKernelOp *kernelOpContext, AttrList &attrList)
{
  uint32_t attrLen = 0;
  const void *attrValue = nullptr;

  attrList.Get(RESIZE_NEAREST_NEIGHBOR_COMMON_KERNEL_TENSOR_INPUT, attrLen, attrValue);
  CHECK_ONLY_RET(attrValue, CC_STATUS_SUCCESS);
  cce::kernel::kTensor_t *inputTensor = (cce::kernel::kTensor_t *)attrValue;

  attrList.Get(RESIZE_NEAREST_NEIGHBOR_COMMON_KERNEL_TENSOR_OUTPUT, attrLen, attrValue);
  CHECK_ONLY_RET(attrValue, CC_STATUS_SUCCESS);
  cce::kernel::kTensor_t *outputTensor = (cce::kernel::kTensor_t *)attrValue;

  attrList.Get(RESIZE_NEAREST_NEIGHBOR_COMMON_KERNEL_DATA_PARAM, attrLen, attrValue);
  bool alignCorners = *reinterpret_cast<bool *>(const_cast<void *>(attrValue));

  uint32_t input_w = inputTensor->width;
  uint32_t input_h = inputTensor->height;
  uint32_t output_w = outputTensor->width;
  uint32_t output_h = outputTensor->height;

  kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_RESIZE_NEAREST_NEIGHBOR;  // 定制模板
  if ((output_w % input_w) || (output_h % input_h))                               // 通用模板
  {
    kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_RESIZE_NEAREST_NEIGHBOR_COMMON;
  }

  if (alignCorners == true) {
    kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_RESIZE_NEAREST_NEIGHBOR_COMMON;
  }

  return CC_STATUS_SUCCESS;
}

ccStatus_t ccSetKernelContext(ccHandle_t handle, uint32_t opId, AttrList &attrList, bool isFlowtable,
                              ccKernelType kernelType, void* pgraph)
{
  UNUSED(pgraph);
  rtStream_t streamId;
  ccStatus_t ret = GetStream(handle, &streamId);
  CM_CHECK(ret != CC_STATUS_SUCCESS, CM_LOGE("ccSetKernelContext GetStream Fail."), return CC_STATUS_INTERNAL_ERROR);
  uint32_t opIndex;
  ret = ccGetOpIndex(handle, opIndex);
  CM_CHECK(ret != CC_STATUS_SUCCESS, CM_LOGE("ccSetKernelContext GetOpIndex Fail."), return CC_STATUS_INTERNAL_ERROR);
  taskdownMtx_.lock();
  kernelOpMap_t::iterator it = kernelOpMap_.find(streamId);

  if (it == kernelOpMap_.end()) {
    taskdownMtx_.unlock();
    CM_LOGI("ccSetKernelContext kernelOpMap_ Get Fail, not taskdown mode.");
    return CC_STATUS_SUCCESS;
  }
  taskdownMtx_.unlock();

  kKernelOp *kernelOpContext = it->second;
  kernelOpContext->opId = opId;
  kernelOpContext->opIndex = opIndex;
  kernelOpContext->isFlowtable = isFlowtable;
  kernelOpContext->kernelType = kernelType;
  kernelOpContext->available = true;
  kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_RESERVED;
  CM_LOGI("in ccSetKernelContext get opId=%u, opIndex = %u.", opId, opIndex);
  if (opId == CCE_DNN_OP_POOLING) {
    kernelOpContext->kernelFuncId = (uint32_t)cce::CC_POOLING_RESERVED;
    uint32_t  attrLen = 0;
    const void  *attrValue = nullptr;
    attrList.Get(POOLING_KERNEL_PARAM, attrLen, attrValue);
    CHECK_NULL_WITH_RET(attrValue, CC_STATUS_BAD_PARAM);
    JUDGE_IF_ZERO_RETURN_VALUE(attrLen, CC_STATUS_BAD_PARAM);
	cce::kernel::kPoolingParam_t* param = (cce::kernel::kPoolingParam_t*)attrValue;
    kernelOpContext->kernelFuncId = (uint32_t)(param->poolingMode);
    CM_LOGI("in ccSetKernelContext kernelOpContext->kernelFuncId is %d", kernelOpContext->kernelFuncId);
  } else if (opId == CCE_DNN_OP_SOFTMAX) {
    (void)ccSetKernelContextSoftmax(kernelOpContext, attrList);
  } else if (opId == CCE_DNN_OP_ELTWISE) {
    uint32_t attrLen = 0;
    const void *attrValue = nullptr;
	cce::kernel::kTensor_t* outputTensor = nullptr;
    attrList.Get(ELTWISE_KERNEL_TENSOR_OUTPUT, attrLen, attrValue);
    CHECK_NULL_WITH_RET(attrValue, CC_STATUS_BAD_PARAM);
    JUDGE_IF_ZERO_RETURN_VALUE(attrLen, CC_STATUS_BAD_PARAM);
    outputTensor = (cce::kernel::kTensor_t*)attrValue;
    kernelOpContext->kernelFuncId = (uint32_t)(outputTensor->dataType);
  } else if (opId == CCE_DNN_OP_FC) {
    kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_FC;
    uint32_t attrLen = 0;
    const void *attrValue = nullptr;
    attrList.Get(FC_KERNEL_UNZIP_PARAM, attrLen, attrValue);
    CHECK_ONLY_RET(attrValue, CC_STATUS_SUCCESS);
    JUDGE_IF_ZERO_RETURN_VALUE(attrLen, CC_STATUS_BAD_PARAM);
	cce::kernel::kFcUnzipParam_t *param = (cce::kernel::kFcUnzipParam_t *)attrValue;
    if (param->isUnzip == true) {
       kernelOpContext->kernelFuncId = (uint32_t)CC_TASKDOWN_FC_COMPRESS;
    }
  } else if (opId == CCE_DNN_OP_CONV) {
    kernelOpContext->kernelFuncId = (uint32_t)cce::CC_CONV_MODE_RESERVED;
    uint32_t attrLen = 0;
    const void *attrValue = nullptr;
    attrList.Get(CONV_KERNEL_PARAM, attrLen, attrValue);
    CHECK_ONLY_RET(attrValue, CC_STATUS_SUCCESS);
	cce::kernel::kConvParam_t *param = (cce::kernel::kConvParam_t *)attrValue;
    if (cce::CC_CONV_MODE_DEPTHWISE == param->mode) {
      kernelOpContext->kernelFuncId = (uint32_t)cce::CC_CONV_MODE_DEPTHWISE;
      CM_LOGI("in ccSetKernelContext kernelOpContext->kernelFuncId is %d", kernelOpContext->kernelFuncId);
    }
  }

  return CC_STATUS_SUCCESS;
}

ccStatus_t ccGetKernelContext(rtStream_t streamId, ccOpContext &opContext)
{
  taskdownMtx_.lock();
  kernelOpMap_t::const_iterator it = kernelOpMap_.find(streamId);
  if (it == kernelOpMap_.end()) {
    opContext.kernelType = INVALID;
    taskdownMtx_.unlock();
    return CC_STATUS_SUCCESS;
  }

  kKernelOp *kernelOpContext = it->second;

  if (kernelOpContext != nullptr && kernelOpContext->available == true) {
    opContext.opId = kernelOpContext->opId;
    opContext.kernelFuncId = kernelOpContext->kernelFuncId;
    opContext.isFlowtable = kernelOpContext->isFlowtable;
    opContext.kernelType = kernelOpContext->kernelType;
    kernelOpContext->available = false;
  } else {
  opContext.kernelType = INVALID;
  }

  taskdownMtx_.unlock();

  return CC_STATUS_SUCCESS;
}

ccStatus_t ccGetOpIndexByStream(rtStream_t streamId, uint32_t &opIndex)
{
  if (streamId == nullptr) {
    CM_LOGE("ccGetOpIndexByStream handle is NULL!");
    return CC_STATUS_INTERNAL_ERROR;
  }

  taskdownMtx_.lock();
  kernelOpMap_t::const_iterator it = kernelOpMap_.find(streamId);
  if (it == kernelOpMap_.end()) {
    taskdownMtx_.unlock();
    CM_LOGI("ccGetOpIndexByStream can not find streamId!");
    return CC_STATUS_INTERNAL_ERROR;
  }

  kKernelOp* kernelOp = it->second;
  if (kernelOp == nullptr) {
    taskdownMtx_.unlock();
    CM_LOGI("ccGetOpIndexByStream kernelOp is NULL!");
    return CC_STATUS_INTERNAL_ERROR;
  }

  opIndex = kernelOp->opIndex;
  taskdownMtx_.unlock();

  CM_LOGI("ccGetOpIndexByStream streamId is %lu, opIndex is %u.", (uint64_t)(uintptr_t)streamId, opIndex);
  return CC_STATUS_SUCCESS;
}

ccStatus_t ccGetOpIndex(ccHandle_t handle, uint32_t &opIndex)
{
  if (handle == nullptr) {
    CM_LOGE("ccGetOpIndex handle is NULL!");
    return CC_STATUS_INTERNAL_ERROR;
  }

  opIndex = handle->opIndex;
  return CC_STATUS_SUCCESS;
}

ccStatus_t ccSetOpIndex(ccHandle_t handle, uint32_t opIndex)
{
  if (handle == nullptr) {
    CM_LOGE("ccGetOpIndex handle is NULL!");
    return CC_STATUS_INTERNAL_ERROR;
  }

  handle->opIndex = opIndex;
  return CC_STATUS_SUCCESS;
}

ccStatus_t ccGetStreamL2Map(rtStream_t streamId, uint32_t opIndex, fusion::TaskL2Info_t *&l2Data)
{
  taskdownMtx_.lock();
  streaml2Map_t::iterator streamiter = streaml2Map_.find(streamId);
  if (streamiter == streaml2Map_.end()) {
    CM_LOGD("streamiter find Fail, streamId %lu, opIndex %u.", (uint64_t)(uintptr_t)streamId, opIndex);
    taskdownMtx_.unlock();
    return CC_STATUS_INTERNAL_ERROR;
  }

  fusion::TaskL2InfoMap_t &l2infomap = streamiter->second;
  fusion::TaskL2InfoMap_t::iterator opiter = l2infomap.find(opIndex);
  if (opiter == l2infomap.end()) {
    CM_LOGE("opiter find Fail opIndex is %u.", opIndex);
    taskdownMtx_.unlock();
    return CC_STATUS_INTERNAL_ERROR;
  }

  l2Data = &(opiter->second);

  taskdownMtx_.unlock();

  return CC_STATUS_SUCCESS;
}

ccStatus_t ccClearStreamL2Map(ccHandle_t handle)
{
  rtStream_t streamId;

  ccStatus_t ret = GetStream(handle, &streamId);
  if (ret != CC_STATUS_SUCCESS) {
    CM_LOGE("ccClearOpMap Fail.");
    return CC_STATUS_SUCCESS;
  }

  taskdownMtx_.lock();
  streaml2Map_.erase(streamId);
  taskdownMtx_.unlock();
  return CC_STATUS_SUCCESS;
}

/*lint -e429*/
ccStatus_t ccSetStreamL2Map(ccHandle_t handle, fusion::TaskL2InfoMap_t &l2AllocRes)
{
  rtStream_t streamId;
  ccStatus_t ret = GetStream(handle, &streamId);
  if (ret != CC_STATUS_SUCCESS) {
    CM_LOGE("ccSetStreamL2Map Fail.");
    return ret;
  }

  taskdownMtx_.lock();
  streaml2Map_t::iterator it = streaml2Map_.find(streamId);
  if (it != streaml2Map_.end()) {
    CM_LOGE("ccSetStreamL2Map steamMap has been set.");
    it->second = l2AllocRes;
    taskdownMtx_.unlock();
    return CC_STATUS_SUCCESS;
  }

  for (fusion::TaskL2InfoMap_t::iterator iter = l2AllocRes.begin(); iter != l2AllocRes.end(); iter++) {
    fusion::TaskL2Info_t *l2task = &(iter->second);
    l2task->isUsed = 0;
  }

  streaml2Map_[streamId] = l2AllocRes;
  taskdownMtx_.unlock();
  CM_LOGD("ccSetStreamL2Map streamId is %lu.", (uint64_t)(uintptr_t)streamId);

  return CC_STATUS_SUCCESS;
}

ccStatus_t ccSetKernelReadCount(rtStream_t streamId, map <uint64_t, uint32_t>  &tensorRc)
{
  kKernelOp *kernelOpContext = nullptr;

  if (tensorRc.empty()) {
    return CC_STATUS_SUCCESS;
  }

  taskdownMtx_.lock();
  kernelOpMap_t::iterator it = kernelOpMap_.find(streamId);
  if (it == kernelOpMap_.end()) {
    taskdownMtx_.unlock();
    CM_LOGE("ccSetReadCount kernelOpMap Get Fail.");
    return CC_STATUS_INTERNAL_ERROR;
  }
  taskdownMtx_.unlock();

  kernelOpContext = it->second;
  if (!kernelOpContext || kernelOpContext->available != true) {
    CM_LOGE("ccSetReadCount KernelContext isn't available.");
    return CC_STATUS_INTERNAL_ERROR;
  }

  kernelOpContext->isRcSet = true;
  kernelOpContext->tensorRc.clear();
  kernelOpContext->tensorRc.swap(tensorRc);

  CM_LOGI("ccSetReadCount opId=%u, rc count = %lu.", kernelOpContext->opId, kernelOpContext->tensorRc.size());
  return CC_STATUS_SUCCESS;
}


ccStatus_t ccGetKernelReadCount(rtStream_t streamId, ccOpReadCount &rc)
{
  rc.isEnable = false;
  taskdownMtx_.lock();
  kernelOpMap_t::const_iterator it = kernelOpMap_.find(streamId);
  if (it == kernelOpMap_.end()) {
    taskdownMtx_.unlock();
    return CC_STATUS_SUCCESS;
  }

  auto kernelOpContext = it->second;

  if (kernelOpContext != nullptr && kernelOpContext->isRcSet == true) {
    rc.isEnable = true;
    kernelOpContext->isRcSet = false;
    rc.tensorRc.swap(kernelOpContext->tensorRc);
  }
  taskdownMtx_.unlock();
  return CC_STATUS_SUCCESS;
}

/*lint +e429*/
} // cce
