/*
 * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "MOTEmbedding.h"
#include <fstream>
#include <iostream>
#include <memory>
#include <cstring>

#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"

#include "Log/Log.h"
#include "FileEx/FileEx.h"
#include "FrameCache/FrameCache.h"

namespace ascendFaceRecognition {
namespace {
const uint32_t CPU_NORMALIZATION_MODE = 0;
} // namespace

MOTEmbedding::MOTEmbedding()
{
    isStop_ = true;
    instanceId_ = -1;
}

MOTEmbedding::~MOTEmbedding() {}

APP_ERROR MOTEmbedding::InitResource(void)
{
    LogDebug << "MOTEmbedding[" << instanceId_ << "]: Begin to init device resource.";
    size_t inputNum = modelInfer_->GetModelNumInputs();
    if (inputNum != 1) {
        LogFatal << "input tensor size is invaild" << inputNum;
        return APP_ERR_COMM_FAILURE;
    }
    LogDebug << "inputNum = " << inputNum;
    for (size_t i = 0; i < inputNum; i++) {
        void* buffer = nullptr;
        size_t size = modelInfer_->GetModelInputSizeByIndex(i);
        inputDataSet_.bufs.push_back(buffer);
        inputDataSet_.sizes.push_back(size);
        LogDebug << "MOTEmbedding[" << instanceId_ << "]: model input tensor i = " << i << ", size= " << size;
    }

    size_t outputNum = modelInfer_->GetModelNumOutputs();
    if (outputNum == 0) {
        LogFatal << "MOTEmbedding[" << instanceId_ << "]: output tensor size is invaild " << outputNum;
        return APP_ERR_COMM_FAILURE;
    }
    LogDebug << "MOTEmbedding[" << instanceId_ << "]: outputNum = " << outputNum;
    for (size_t i = 0; i < outputNum; i++) {
        void* buffer = nullptr;
        size_t size = modelInfer_->GetModelOutputSizeByIndex(i);
        APP_ERROR ret = aclrtMalloc(&buffer, size, ACL_MEM_MALLOC_HUGE_FIRST);
        if (ret != APP_ERR_OK) {
            LogFatal << "MOTEmbedding[" << instanceId_ << "]: create output failed!";
            return ret;
        }
        outputDataSet_.bufs.push_back(buffer);
        outputDataSet_.sizes.push_back(size);
        LogDebug << "MOTEmbedding[" << instanceId_ << "]: model output tensor i = " << i << ", size = " << size;
    }

    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::ParseConfig(ConfigParser &configParser)
{
    LogDebug << "MOTEmbedding[" << instanceId_ << "]: begin to parse config values.";
    std::string itemCfgStr = moduleName_ + std::string(".batch_size");
    APP_ERROR ret = configParser.GetUnsignedIntValue(itemCfgStr, batchSize_);
    if (ret != APP_ERR_OK) {
        return ret;
    }

    itemCfgStr = moduleName_ + std::string(".enable");
    int enable = 0;
    ret = configParser.GetIntValue(itemCfgStr, enable);
    if (ret != APP_ERR_OK) {
        return ret;
    }
    enable_ = enable;

    itemCfgStr = moduleName_ + std::string(".model_path");
    ret = configParser.GetStringValue(itemCfgStr, modelPath_);
    if (ret != APP_ERR_OK) {
        return ret;
    }

    itemCfgStr = moduleName_ + std::string(".normal_mode");
    ret = configParser.GetUnsignedIntValue(itemCfgStr, normalMode_);
    if (ret != APP_ERR_OK) {
        LogFatal << "FaceFeature[" << instanceId_ << "]: Fail to get config variable named " << itemCfgStr << ".";
        return ret;
    }

    LogDebug << "MOTEmbedding[" << instanceId_ << "]" << " batchSize_:" << batchSize_ <<
             " modelPath_:" << modelPath_.c_str();

    return ret;
}

APP_ERROR MOTEmbedding::Init(ConfigParser &configParser, ModuleInitArgs &initArgs)
{
    LogDebug << "MOTEmbedding[" << instanceId_ << "]: Begin to init MOT feature extraction instance" <<
             initArgs.instanceId;

    APP_ERROR ret = aclrtCreateStream(&mdlStream_);
    if (ret != APP_ERR_OK) {
        LogError << "VideoResize[" << instanceId_ << "]: aclrtCreateStream failed, ret=" << ret << ".";
        return ret;
    }

    AssignInitArgs(initArgs);

    isStop_ = false;

    // initialize config params
    ret = ParseConfig(configParser);
    if (ret != APP_ERR_OK) {
        LogFatal << "MOTEmbedding[" << instanceId_ << "]: Fail to parse config params, ret=" << ret << "(" <<
                 GetAppErrCodeInfo(ret) << ").";
        return ret;
    }

    // init model
    modelInfer_ = ModelResource::GetInstance().GetModelProcess(modelPath_, instanceId_);
    if (modelInfer_ == nullptr) {
        LogFatal << "FaceDetection[" << instanceId_ << "]::init model failed";
        return APP_ERR_COMM_FAILURE;
    }

    // init resource
    ret = InitResource();
    if (ret != APP_ERR_OK) {
        LogFatal << "MOTEmbedding[" << instanceId_ << "]: init resource failed";
        return ret;
    }

    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::DeInit(void)
{
    LogDebug << "MOTEmbedding[" << instanceId_ << "]::begin to deinit.";

    if (mdlStream_) {
        APP_ERROR ret = aclrtDestroyStream(mdlStream_);
        if (ret != APP_ERR_OK) {
            LogError << "Failed to destroy stream, ret = " << ret;
        }
        mdlStream_ = nullptr;
    }

    for (size_t i = 0; i < outputDataSet_.bufs.size(); i++) {
        aclrtFree(outputDataSet_.bufs[i]);
        outputDataSet_.bufs[i] = nullptr;
    }
    for (uint32_t i = 0; i < inputDataSet_.bufs.size(); i++) {
        inputDataSet_.bufs[i] = nullptr;
    }

    return APP_ERR_OK;
}

float MOTEmbedding::FeatureNorm(const float* feature, const uint32_t &featureSize)
{
    // calculate norm
    float norm = 0.f;
    for (uint32_t i = 0; i < featureSize; i++) {
        norm += feature[i] * feature[i];
    }
    norm = sqrt(norm);
    return norm;
}

APP_ERROR MOTEmbedding::PostData(std::shared_ptr<FrameMemoryInfo> &frameMemoryInfo)
{
    LogDebug << "MOTEmbedding[" << instanceId_ << "]:post process model outputs.";
    int featureVecterSize =
        outputDataSet_.sizes[0] / batchSize_; // acltodo for special model. outputDataSet_.sizes[0] / batchSize_;
    uint8_t* resPtr = (uint8_t*) outputDataSet_.bufs[0];       // acltodo for special model.  outputDataSet_.bufs[0];
    APP_ERROR ret = APP_ERR_OK;

    for (size_t i = 0; i < frameMemoryInfo->face.size(); i++) {
        auto outBuffer = std::shared_ptr<uint8_t>();
        outBuffer.reset(new uint8_t[featureVecterSize], std::default_delete<uint8_t[]>());
        if (outBuffer == nullptr) {
            LogFatal << "embedding aclrtMalloc failed";
            return APP_ERR_COMM_FAILURE;
        }

        if (runMode_ == ACL_HOST) { // under ACL_HOST mode, memory from device to host
            ret = aclrtMemcpy(outBuffer.get(), featureVecterSize, resPtr, featureVecterSize, ACL_MEMCPY_DEVICE_TO_HOST);
        } else if (runMode_ == ACL_DEVICE) { // under ACL_DEVICE mode, memory from device to device
            std::copy(resPtr, resPtr + featureVecterSize, outBuffer.get());
            ret = APP_ERR_OK;
        }
        if (ret != APP_ERR_OK) {
            LogError << "MOTEmbedding[" << instanceId_ << "]: aclrtMemcpy error!";
            return ret;
        }

        float norm = 1.f;
        if (normalMode_ == CPU_NORMALIZATION_MODE) {
            norm = FeatureNorm((float*) outBuffer.get(), featureVecterSize / sizeof(float));
        }

        if (runMode_ == ACL_HOST) { // reset host resources
            frameMemoryInfo->face[i].embedding.hostData = outBuffer;
        } else if (runMode_ == ACL_DEVICE) { // reset device resources
            frameMemoryInfo->face[i].embedding.deviceData = outBuffer;
        }
        frameMemoryInfo->face[i].embedding.dataSize = featureVecterSize;
        frameMemoryInfo->face[i].embeddingNorm = norm;
        resPtr += featureVecterSize;
    }

    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::ProcessFaceObjectQueue(std::shared_ptr<FrameMemoryInfo> &frameMemoryInfo)
{
    // inference
    if (inputDataSet_.sizes[0] != frameMemoryInfo->embeddingMemoryBlockSize) {
        LogError << "embeddingMemoryBlockSize is not equal to the inputSizes," << "inputSizes = "
                 << inputDataSet_.bufs[0] << " embeddingMemoryBlockSize = "
                 << frameMemoryInfo->embeddingMemoryBlockSize;
        return APP_ERR_COMM_INVALID_PARAM;
    }

    inputDataSet_.bufs[0] = frameMemoryInfo->embeddingMemoryBlock.get();
    APP_ERROR ret = modelInfer_->ModelInference(inputDataSet_, outputDataSet_, mdlStream_);
    if (ret != APP_ERR_OK) {
        inputDataSet_.bufs[0] = nullptr;
        frameMemoryInfo->embeddingMemoryBlock.reset();
        LogError << "MOTEmbedding[" << instanceId_ << "]: infer error!";
        return ret;
    }
    inputDataSet_.bufs[0] = nullptr;
    frameMemoryInfo->embeddingMemoryBlock.reset();
    // post process outputs
    ret = PostData(frameMemoryInfo);
    if (ret != APP_ERR_OK) {
        LogError << "MOTEmbedding[" << instanceId_ << "]: infer error!";
        return ret;
    }
    for (size_t i = 0; i < frameMemoryInfo->face.size(); ++i) {
        std::shared_ptr<FrameAiInfo> frameAiInfo =
            FrameCache::GetInstance(frameMemoryInfo->face[i].frameInfo.channelId)->GetFrame(
                frameMemoryInfo->face[i].frameInfo.frameId);
        if (frameAiInfo == nullptr) {
            continue;
        }
        frameAiInfo->face[frameAiInfo->embeddingCount] = frameMemoryInfo->face[i];
        frameAiInfo->embeddingCount++;
        if ((uint32_t) frameAiInfo->embeddingCount == frameAiInfo->face.size()) {
            LogDebug << "[Cache] " << frameAiInfo->embeddingCount << " Faces Recv Send Ch:" <<
                     frameAiInfo->info.channelId << " frame:" << frameAiInfo->info.frameId;
            SendToNextModule(MT_MOT_CONNECTION, frameAiInfo, frameAiInfo->info.channelId);
            FrameCache::GetInstance(frameAiInfo->info.channelId)->ClearFrame(frameAiInfo->info.frameId);
        }
    }

    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::ProcessEmptyFaceFrame(std::shared_ptr<FrameAiInfo> &frameAiInfo)
{
    SendToNextModule(MT_MOT_CONNECTION, frameAiInfo, frameAiInfo->info.channelId);
    FrameCache::GetInstance(frameAiInfo->info.channelId)->ClearFrame(frameAiInfo->info.frameId);
    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::ProcessMulitFaceFrame(std::shared_ptr<FrameMemoryInfo> &frameMemoryInfo)
{
    if (enable_) {
        APP_ERROR ret = ProcessFaceObjectQueue(frameMemoryInfo);
        if (ret != APP_ERR_OK) {
            return ret;
        }
    } else {
        for (size_t i = 0; i < frameMemoryInfo->face.size(); ++i) {
            std::shared_ptr<FrameAiInfo> frameAiInfo =
                FrameCache::GetInstance(frameMemoryInfo->face[i].frameInfo.channelId)->GetFrame(
                    frameMemoryInfo->face[i].frameInfo.frameId);
            if (frameAiInfo == nullptr) {
                continue;
            }
            frameAiInfo->face[frameAiInfo->embeddingCount] = frameMemoryInfo->face[i];
            frameAiInfo->embeddingCount++;
            LogDebug << "FrameCache MOTEmbedding Recv " << frameMemoryInfo->face[i].frameInfo.channelId << "_" <<
                     frameMemoryInfo->face[i].frameInfo.frameId << "Count " << frameAiInfo->embeddingCount;
            // Read Cache
            if ((uint32_t) frameAiInfo->embeddingCount == frameAiInfo->face.size()) {
                LogDebug << "[Cache] " << frameAiInfo->embeddingCount << " Faces Recv Send Ch:" <<
                         frameAiInfo->info.channelId << " frame:" << frameAiInfo->info.frameId;
                SendToNextModule(MT_MOT_CONNECTION, frameAiInfo, frameAiInfo->info.channelId);
                FrameCache::GetInstance(frameAiInfo->info.channelId)->ClearFrame(frameAiInfo->info.frameId);
            }
        }
    }
    return APP_ERR_OK;
}

APP_ERROR MOTEmbedding::Process(std::shared_ptr<void> inputData)
{
    std::shared_ptr<FrameMemoryInfo> frameMemoryInfo = std::static_pointer_cast<FrameMemoryInfo>(inputData);
    if (frameMemoryInfo == nullptr) {
        LogError << "FrameCache MOTEmbedding[" << instanceId_ << "]: Cann't Found frameMemoryInfo!";
        return APP_ERR_COMM_FAILURE;
    }
    LogDebug << "MOTEmbedding Ch #" << frameMemoryInfo->infoVector[0].channelId;
    std::shared_ptr<FrameAiInfo> frameAiInfo =
        FrameCache::GetInstance(frameMemoryInfo->infoVector[0].channelId)->GetFrame(
            frameMemoryInfo->infoVector[0].frameId);
    if (frameAiInfo == nullptr) {
        LogError << "FrameCache MOTEmbedding[" << instanceId_ << "]: Cann't Found FrameAiInfo!";
        return APP_ERR_COMM_FAILURE;
    }

    APP_ERROR ret;
    if (frameAiInfo->face.empty()) {
        ret = ProcessEmptyFaceFrame(frameAiInfo);
    } else {
        ret = ProcessMulitFaceFrame(frameMemoryInfo);
    }
    return ret;
}
} // namespace ascendFaceRecognition
