/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at

* http://www.apache.org/licenses/LICENSE-2.0

* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.

* File sample_process.cpp
* Description: handle acl resource
*/
#include "deepsort/extractor.h"
#include <iostream>
#include "acl/acl.h"
#include "utils.h"
#include <numeric>
#include <fstream>

using namespace std;

namespace {
//Inferential output dataset subscript 0 unit is detection box information data
const uint32_t kDataBufId = 0;
const uint32_t kMaxBatchSize = 32;
}

Extractor::Extractor(const char* modelPath,
uint32_t modelWidth,
uint32_t modelHeight
)
:deviceId_(0), context_(nullptr), stream_(nullptr), modelWidth_(modelWidth),
modelHeight_(modelHeight), isInited_(false){
    modelPath_ = modelPath;
}

Extractor::~Extractor() {
    DestroyResource();
}


Result Extractor::InitModel(const char* omModelPath) {
    Result ret = model_.LoadModel(omModelPath);
    if (ret != SUCCESS) {
        ERROR_LOG("execute LoadModelFromFileWithMem failed");
        return FAILED;
    }

    ret = model_.CreateDesc();
    if (ret != SUCCESS) {
        ERROR_LOG("execute CreateDesc failed");
        return FAILED;
    }

    ret = model_.CreateOutput();
    if (ret != SUCCESS) {
        ERROR_LOG("execute CreateOutput failed");
        return FAILED;
    }

    return SUCCESS;
}

Result Extractor::Init(int32_t& deviceId, aclrtContext& context, aclrtStream& stream, aclrtRunMode& runMode) {
    if (isInited_) {
        INFO_LOG("Classify instance is initied already!");
        return SUCCESS;
    }


    deviceId_ = deviceId;
    context_ = context;
    stream_ = stream;

    runMode_ = runMode;

    aclError ret = InitModel(modelPath_);
    if (ret != SUCCESS) {
        ERROR_LOG("Init model failed");
        return FAILED;
    }

    ret = dvpp_.InitResource(stream_);
    if (ret != SUCCESS) {
        ERROR_LOG("Init dvpp failed");
        return FAILED;
    }


    ret = CreateExtractorInputdDataset();
    if (ret != SUCCESS) {
        ERROR_LOG("Create image info buf failed");
        return FAILED;
    }

    isInited_ = true;
    return SUCCESS;
}

Result Extractor::CreateExtractorInputdDataset()
{
    //Request image data memory for input model
    imageDataSize_ = RGBU8_IMAGE_SIZE(modelWidth_, modelHeight_) * kMaxBatchSize;
    aclError aclRet = aclrtMalloc(&imageDataBuf_, imageDataSize_, ACL_MEM_MALLOC_HUGE_FIRST);
    if (aclRet != ACL_ERROR_NONE) {
        ERROR_LOG("malloc device data buffer failed, aclRet is %d", aclRet);
        return FAILED;
    }

    //Use the applied memory to create the model and input dataset. After creation, only update the memory data for each frame of inference, instead of creating the input dataset every time
    Result ret = model_.CreateInput(imageDataBuf_, imageDataSize_);
    if (ret != SUCCESS) {
        ERROR_LOG("Create model input dataset failed");
        return FAILED;
    }

    return SUCCESS;
}

Result Extractor::extract(cv::Mat& srcImage, vector<BBox>& dets, vector<vector<float>>& featureMat) {
    Result ret;
    if (dets.size() <= 0) {
        return SUCCESS;
    }
    struct timeval begin;
    struct timeval end;

    gettimeofday(&begin, NULL);
    ret = Preprocess(srcImage, dets);
    gettimeofday(&end, NULL);
    if (ret != SUCCESS) {
        ERROR_LOG("Preprocess failed");      
        return FAILED;
    }
    // float time_cost = 1000 * (end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec) / 1000.000;
    // INFO_LOG("Preprocess time: %fms", time_cost);
    // INFO_LOG("Target number: %d", dets.size());

    aclmdlDataset* inferenceOutput = nullptr;
    gettimeofday(&begin, NULL);
    ret = Inference(inferenceOutput);
    gettimeofday(&end, NULL);
    if ((ret != SUCCESS) || (inferenceOutput == nullptr)) {
        ERROR_LOG("Inference model inference output data failed");
        return FAILED;
    }
    // time_cost = 1000 * (end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec) / 1000.000;
    // INFO_LOG("Inference time: %fms", time_cost);

    gettimeofday(&begin, NULL);
    ret = Postprocess(inferenceOutput, featureMat, dets.size());
    gettimeofday(&end, NULL);
    if (ret != SUCCESS) {
        ERROR_LOG("Process model inference output data failed");
        return FAILED;
    }
    // time_cost = 1000 * (end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec) / 1000.000;
    // INFO_LOG("Postprocess time: %fms", time_cost);
    return ret;

}

cv::Rect regular_rect(cv::Rect2f& rect, cv::Size size) {
    
    cv::Rect ret;
    ret.x = rect.x<0 ? 0:rect.x;
    ret.y = rect.y<0 ? 0:rect.y;
    ret.width = rect.x+rect.width>size.width ? size.width-rect.x:rect.width;
    ret.height = rect.y+rect.height>size.height ? size.height-rect.y:rect.height;
    return ret;
}

Result Extractor::Preprocess(cv::Mat& originImg, vector<BBox>& dets) {
    // INFO_LOG("start Preprocess...");
    Result ret;

    aclrtMemcpyKind policy = (runMode_ == ACL_HOST)?
                             ACL_MEMCPY_HOST_TO_DEVICE:ACL_MEMCPY_DEVICE_TO_DEVICE;

    uint32_t resizedImgSize = RGBU8_IMAGE_SIZE(64, 128);
    for (size_t i = 0; i < dets.size(); ++i) {
        if (i == 32) break;
        // 由于opencv的Rect2f转Rect时是四舍五入的，有时候就会导致Rect的大小超过originImg大小，所以采用了这么麻烦的方法避免四舍五入
        cv::Rect rect = regular_rect(dets[i].rect, originImg.size());
        cv::Mat resziedImg, roiImg = originImg(rect);
        cv::Mat transRoiImg;
        cv::transpose(roiImg, transRoiImg);
        cv::resize(transRoiImg, resziedImg, cv::Size(64, 128));

        // std::vector<float> mean_values{ 0.406, 0.456, 0.485 };
        // std::vector<float> std_values{ 0.255, 0.224, 0.229 };

        // resziedImg.convertTo(resziedImg, CV_32FC3, 1.0/255);
        // cv::Mat dest;
        // std::vector<cv::Mat> bgrChannls(3);
        // cv::split(resziedImg, bgrChannls);
        // for (auto i = 0; i < bgrChannls.size(); ++i) {
        //     bgrChannls[i].convertTo(bgrChannls[i], CV_32FC1, 1.0 / std_values[i], (0.0 - mean_values[i]) / std_values[i]);
        // }
        // cv::merge(bgrChannls, resziedImg);

        ret = (Result)aclrtMemcpy((void*)((uint8_t*)imageDataBuf_+i*resizedImgSize), (kMaxBatchSize-i)*resizedImgSize,
                            resziedImg.data, resizedImgSize, policy);
        if (ret != ACL_ERROR_NONE) {
          ERROR_LOG("Copy resized image data to device failed.");
          return FAILED;
        }
    }

    DynamicInfo dynamicInfo;
    dynamicInfo.dynamicArr[0] = std::min(dets.size(), (size_t)32);
    dynamicInfo.dynamicType = DYNAMIC_BATCH;
    
    ret = model_.SetDynamicSize(dynamicInfo);
    if (ret != SUCCESS) {
        ERROR_LOG("execute SetDynamicSize failed.");
        return FAILED;
    }
    // model_.PrintModelDescInfo(dynamicInfo.dynamicType);
    // model_.PrintModelCurOutputDims();

    return SUCCESS;
}

Result Extractor::Inference(aclmdlDataset*& inferenceOutput) {
    // INFO_LOG("start Inference...");

    Result ret = model_.Execute();
    if (ret != SUCCESS) {
        ERROR_LOG("Execute model inference failed");
        return FAILED;
    }

    inferenceOutput = model_.GetModelOutputData();

    // 调试用
    // string modelName = "yolov5s_sim_t";
    // model_.OutputModelResult(modelName);
    return SUCCESS;
}

Result Extractor::Postprocess(aclmdlDataset* modelOutput, vector<vector<float>>& featureMat, uint32_t batchSize)
{
    // INFO_LOG("start Postprocess...");

    // Get box information data
    uint32_t dataSize = 0;
    float* featureData = (float*)GetInferenceOutputItem(dataSize, modelOutput, kDataBufId);
    if (featureData == nullptr) return FAILED;

    for (uint32_t i = 0; i < batchSize; ++i) {
        float* begin = featureData + i*512;
        featureMat.push_back(vector<float>(begin, begin+512));
    }
    if (runMode_ == ACL_HOST) {
        delete[]((uint8_t*)featureData);
    }
    return SUCCESS;
}

void* Extractor::GetInferenceOutputItem(uint32_t& itemDataSize,
aclmdlDataset* inferenceOutput,
uint32_t idx) {
    aclDataBuffer* dataBuffer = aclmdlGetDatasetBuffer(inferenceOutput, idx);
    if (dataBuffer == nullptr) {
        ERROR_LOG("Get the %dth dataset buffer from model "
        "inference output failed", idx);
        return nullptr;
    }

    void* dataBufferDev = aclGetDataBufferAddr(dataBuffer);
    if (dataBufferDev == nullptr) {
        ERROR_LOG("Get the %dth dataset buffer address "
        "from model inference output failed", idx);
        return nullptr;
    }

    size_t bufferSize = aclGetDataBufferSizeV2(dataBuffer);
    if (bufferSize == 0) {
        ERROR_LOG("The %dth dataset buffer size of "
        "model inference output is 0", idx);
        return nullptr;
    }

    void* data = nullptr;
    if (runMode_ == ACL_HOST) {
        data = Utils::CopyDataDeviceToLocal(dataBufferDev, bufferSize);
        if (data == nullptr) {
            ERROR_LOG("Copy inference output to host failed");
            return nullptr;
        }
    }
    else {
        data = dataBufferDev;
    }

    itemDataSize = bufferSize;
    return data;
}



void Extractor::DestroyResource()
{
    model_.UnloadModel();
    model_.DestroyDesc();
    model_.DestroyInput();
    model_.DestroyOutput();
    dvpp_.DestroyResource();
}