/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2024. All rights reserved.
 * Description: version definiation for model_run_tool and data_proc_tool.
 */
#include "framework/model/aipp/aipp_tensor_creator.h"

#include <map>
// v2
#include "tensor/image_process_config_types.h"
#include "tensor/image_tensor_buffer.h"
#include "tensor/image_config_tensor_util.h"

#include "framework/infra/log/log.h"
#include "infra/base/securestl.h"
#include "aipp_json_parser.h"

using namespace std;
namespace hiai {
static ImageFormat GetAippImageFormat(const string& cliOption)
{
    const static map<string, ImageFormat> strMap = {
        {"YUV420SP_U8", ImageFormat::YUV420SP},
        {"XRGB8888_U8", ImageFormat::XRGB8888},
        {"YUV400_U8", ImageFormat::YUV400},
        {"ARGB8888_U8", ImageFormat::ARGB8888},
        {"YUYV_U8", ImageFormat::YUYV},
        {"YUV422SP_U8", ImageFormat::YUV422SP},
        {"AYUV444_U8", ImageFormat::AYUV444},
        {"RGB888_U8", ImageFormat::RGB888},
        {"BGR888_U8", ImageFormat::BGR888},
        {"YUV444SP_U8", ImageFormat::YUV444SP},
        {"YVU444SP_U8", ImageFormat::YVU444SP}
    };
    if (strMap.find(cliOption) != strMap.end()) {
        return strMap.at(cliOption);
    }
    return ImageFormat::INVALID;
}

Status AippTensorCreator::GetAippFormatVec(const string& aippFormat)
{
    if (!aippFormat.empty()) {
        aippFmtVec_.clear();
        vector<string> fmtStrs = Split(aippFormat, ",");
        FMK_LOGI("--aipp_format=%s", aippFormat.c_str());
        for (string s : fmtStrs) {
            Trim(s);
            ImageFormat v = GetAippImageFormat(s);
            aippFmtVec_.push_back(v);
        }
    }

    bool clearVec = true;
    for (auto v : aippFmtVec_) {
        if (v != ImageFormat::INVALID) {
            clearVec = false;
            break;
        }
    }
    if (clearVec) {
        aippFmtVec_.clear();
    }

    pair<bool, bool> perIsAippAndDynamic = {false, false};
    for (auto v : aippFmtVec_) {
        if (v != ImageFormat::INVALID) {
            perIsAippAndDynamic = {true, false};
        } else {
            perIsAippAndDynamic = {false, false};
        }
        isAippAndDynamic_.push_back(perIsAippAndDynamic);
    }
    return SUCCESS;
}

Status AippTensorCreator::GetAippConfigVec(const string& aippConfigJson)
{
    if (aippConfigJson.empty()) {
        return SUCCESS;
    }
    vector<string> vecConfigs = Split(aippConfigJson, ",");
    for (string configs : vecConfigs) {
        vector<AippConfigV2> vecAippCfg;
        Trim(configs);
        vector<string> vecConfig = Split(configs, "|");
        for (string config : vecConfig) {
            Trim(configs);
            AippConfigV2 aippCfg;
            if (config == "NONE") {
                aippCfg.config = false;
            } else if (!AippConfigParser::ParseConfig(config, aippCfg)) {
                FMK_LOGE("Parse aipp config %s failed.", config.c_str());
                return FAILURE;
            }
            vecAippCfg.push_back(aippCfg);
        }
        aippConfigVec_.push_back(vecAippCfg);
    }

    pair<bool, bool> perIsAippAndDynamic = {false, false};
    for (size_t i = 0; i < aippConfigVec_.size(); i++) {
        for (auto eAippConfig : aippConfigVec_[i]) {
            if (eAippConfig.config) {
                perIsAippAndDynamic = {true, true};
            } else {
                perIsAippAndDynamic = {false, false};
            }
        }
        isAippAndDynamic_.push_back(perIsAippAndDynamic);
    }

    return SUCCESS;
}


static AiTensorImage_Format GetAippImageFormatLegacy(const string& cliOption)
{
    const static map<string, AiTensorImage_Format> strMap = {
        {"YUV420SP_U8", AiTensorImage_YUV420SP_U8},
        {"XRGB8888_U8", AiTensorImage_XRGB8888_U8},
        {"YUV400_U8", AiTensorImage_YUV400_U8},
        {"ARGB8888_U8", AiTensorImage_ARGB8888_U8},
        {"YUYV_U8", AiTensorImage_YUYV_U8},
        {"YUV422SP_U8", AiTensorImage_YUV422SP_U8},
        {"AYUV444_U8", AiTensorImage_AYUV444_U8},
        {"RGB888_U8", AiTensorImage_RGB888_U8}
    };
    if (strMap.find(cliOption) != strMap.end()) {
        return strMap.at(cliOption);
    }
    return AiTensorImage_INVALID;
}

Status AippTensorCreator::GetAippFormatVecLegacy(shared_ptr<AiModelMngerClient> mmgrClient,
    const string& modelName, const string& aippFormat)
{
    if (mmgrClient.get() == nullptr) {
        return FAILURE;
    }
    if (!aippFormat.empty()) {
        aippFmtVecLegacy_.clear();
        vector<string> fmtStrs = Split(aippFormat, ",");
        FMK_LOGI("--aipp_format=%s", aippFormat.c_str());
        for (string s : fmtStrs) {
            Trim(s);
            AiTensorImage_Format v = GetAippImageFormatLegacy(s);
            FMK_LOGI("%s -- %u", s.c_str(), v);
            aippFmtVecLegacy_.push_back(v);
        }
    } else {
        // default value, still can be aipp model.
        std::vector<std::shared_ptr<AippPara>> aippParas;
        // get aipp parameter fail is normal(e.g. no aipp para), should return success
        AIStatus status = mmgrClient->GetModelAippPara(modelName, aippParas);
        if (status != AI_SUCCESS) {
            FMK_LOGE("call GetModelAippPara ret = %d", status);
            return SUCCESS;
        }

        // dynamic aipp will return AiTensorImage_INVALID, and aippParas.size() == 1
        for (auto p : aippParas) {
            AiTensorImage_Format v = p->GetInputFormat();
            FMK_LOGI("AiTensorImage_Format v = %u", v);
            aippFmtVecLegacy_.push_back(v);
        }
    }

    bool clearVec = true;
    for (auto v : aippFmtVecLegacy_) {
        if (v != AiTensorImage_INVALID) {
            clearVec = false;
            break;
        }
    }
    if (clearVec) {
        aippFmtVecLegacy_.clear();
    }

    return SUCCESS;
}

Status AippTensorCreator::CheckConfigInvalid(const size_t inputDimsSize)
{
    if (aippFmtVec_.size() > 0 && aippFmtVec_.size() != inputDimsSize) {
        FMK_LOGE("Number of aipp format options: %zu, number of input tensors: %zu",
            aippFmtVec_.size(), inputDimsSize);
        return FAILURE;
    }
    if (aippConfigVec_.size() != 0 && inputDimsSize <= isAippAndDynamic_.size()) {
        FMK_LOGE("inputDims size: %d shuould greater than imageData size: %d", inputDimsSize, isAippAndDynamic_.size());
        return FAILURE;
    }
    return SUCCESS;
}

size_t AippTensorCreator::GetAippDyFuncCnt(size_t inputDimsSize)
{
    if (aippConfigVec_.size() != 0 && inputDimsSize <= isAippAndDynamic_.size()) {
        FMK_LOGW("InputTensorDescs size should greater than inputData number when dynamic aipp, please check");
        return 0;
    }
    return aippConfigVec_.size() == 0 ? 0 : inputDimsSize - isAippAndDynamic_.size();
}

Status AippTensorCreator::GetAippConfigVecLegacy(const string& aippConfigJson)
{
    if (aippConfigJson.empty()) {
        return SUCCESS;
    }
    vector<string> vecConfigs = Split(aippConfigJson, ",");
    for (string configs : vecConfigs) {
        vector<AippConfig> vecAippCfg;
        Trim(configs);
        vector<string> vecConfig = Split(configs, "|");
        for (string config : vecConfig) {
            Trim(configs);
            AippConfig aippCfg;
            if (config == "NONE") {
                aippCfg.config = false;
            } else if (!AippConfigParser::ParseConfigLegacy(config, aippCfg)) {
                FMK_LOGE("Parse aipp config %s failed.", config.c_str());
                return FAILURE;
            }
            vecAippCfg.push_back(aippCfg);
        }
        aippConfigVecLegacy_.push_back(vecAippCfg);
    }
    return SUCCESS;
}

Status AippTensorCreator::InitStaticAipp(const string& aippFormat,
    std::shared_ptr<AiModelMngerClient> mmgrClient, const std::string& modelName)
{
    if (mmgrClient == nullptr && modelName == "") {
        if (GetAippFormatVec(aippFormat) != SUCCESS) {
            FMK_LOGE("Get aipp format fail.");
            return FAILURE;
        }
    } else {
        if (GetAippFormatVecLegacy(mmgrClient, modelName, aippFormat) != SUCCESS) {
            FMK_LOGE("Get aipp format fail.");
            return FAILURE;
        }
    }
    return SUCCESS;
}

Status AippTensorCreator::InitDynamicAipp(const string& aippConfigJson)
{
    if (GetAippConfigVec(aippConfigJson) != SUCCESS) {
        FMK_LOGE("Get aipp config fail.");
        return FAILURE;
    }
    if (GetAippConfigVecLegacy(aippConfigJson) != SUCCESS) {
        FMK_LOGE("Get aipp config fail.");
        return FAILURE;
    }
    return SUCCESS;
}

Status AippTensorCreator::SetImageBufferToInTensorBuffer(size_t idx, const vector<int32_t>& dimsBHW,
    vector<shared_ptr<INDTensorBuffer>>& inTensors)
{
    if (idx > isAippAndDynamic_.size() || dimsBHW.size() != 3) {
        FMK_LOGE("dataIdx or dimsBHW is not invalid.");
        return FAILURE;
    }
    ImageFormat inputFormat = ImageFormat::INVALID;
    int32_t imageSizeB = dimsBHW[0];
    int32_t imageSizeH = dimsBHW[1];
    int32_t imageSizeW = dimsBHW[2];
    if (!isAippAndDynamic_[idx].second) { // static aipp
        inputFormat = aippFmtVec_[idx];
    } else { // dynamic aipp
        inputFormat = aippConfigVec_[idx][0].inputPara.format;
        imageSizeB = aippConfigVec_[idx][0].inputPara.shape.srcImageSizeB != 0 ?
            aippConfigVec_[idx][0].inputPara.shape.srcImageSizeB : imageSizeB;

        imageSizeH = aippConfigVec_[idx][0].inputPara.shape.srcImageSizeH != 0 ?
            aippConfigVec_[idx][0].inputPara.shape.srcImageSizeH : imageSizeH;

        imageSizeW = aippConfigVec_[idx][0].inputPara.shape.srcImageSizeW != 0 ?
            aippConfigVec_[idx][0].inputPara.shape.srcImageSizeW : imageSizeW;
    }
    std::shared_ptr<IImageTensorBuffer> imageBuffer = CreateImageTensorBuffer(
        imageSizeB, imageSizeH, imageSizeW, inputFormat, ImageColorSpace::JPEG, 0);
    // colorSpace, rotation需要增加配置,或更改接口
    inTensors.push_back(imageBuffer);

    return SUCCESS;
}

template <typename T>
void CreateAippFuncTensor(bool isDyfunc, T& multiAippPara, vector<std::shared_ptr<INDTensorBuffer>>& perAippTensorVec)
{
    if (isDyfunc) {
        std::shared_ptr<INDTensorBuffer> paraTesnor =
            ImageConfigTensorUtil::CreateImageConfigTensor(multiAippPara);
        perAippTensorVec.push_back(paraTesnor);
    }
    return;
}

Status SetAippPara(vector<vector<std::shared_ptr<INDTensorBuffer>>>& aippTensorVec,
    AippConfigV2& aippConfig)
{
    if (!aippConfig.config) {
        FMK_LOGI("aipp config is false.");
        return SUCCESS;
    }
    if (aippConfig.isDynamicFunc.size() != 7) { // 7为每个动态aipp输入对应的动态小算子的个数
        FMK_LOGE("aipp config dynamic funcs size fail.");
        return FAILURE;
    }
    vector<std::shared_ptr<INDTensorBuffer>> perAippTensorVec;
    CreateAippFuncTensor(aippConfig.isDynamicFunc[CROP_INDEX], aippConfig.vecCropPara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[CHANNLE_SWAP_INDEX], aippConfig.channelSwapPara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[CSC_INDEX], aippConfig.cscPara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[RESIZE_INDEX], aippConfig.vecResizePara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[DTC_INDEX], aippConfig.vecDtcConfig.dtcPara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[ROTATION_INDEX], aippConfig.vecRotatePara, perAippTensorVec);
    CreateAippFuncTensor(aippConfig.isDynamicFunc[PADDING_INDEX], aippConfig.vecPaddingPara, perAippTensorVec);

    aippTensorVec.push_back(perAippTensorVec);

    return SUCCESS;
}

Status AippTensorCreator::SetAippDyFuncBufferToInTensorBuffer(vector<shared_ptr<INDTensorBuffer>>& inTensors,
    size_t aippDyFuncCnt)
{
    size_t configFuncCnt = 0;
    for (size_t j = 0; j < aippConfigVec_.size(); j++) {
        // 动态aipp func小算子的输入
        vector<vector<std::shared_ptr<INDTensorBuffer>>> aippTensorVec;
        if (SetAippPara(aippTensorVec, aippConfigVec_[j][0]) != SUCCESS) {
            FMK_LOGE("SetAippPara failed");
            return FAILURE;
        } // 只考虑输入的输出边只有一个的情况
        if (!aippTensorVec.empty()) { // 只要有动态的就需要配置para参数
            for (auto perAippTensorVec : aippTensorVec) {
                configFuncCnt += perAippTensorVec.size();
                inTensors.insert(inTensors.end(), perAippTensorVec.begin(), perAippTensorVec.end());
            }
        }
    }
    if (aippDyFuncCnt != configFuncCnt) {
        FMK_LOGE("aippDyFuncCnt[%zu] is not equal to configFuncCnt[%zu]",
            aippDyFuncCnt, configFuncCnt);
        return FAILURE;
    }
    return SUCCESS;
}

bool AippTensorCreator::InputIsAipp(const uint32_t dataIdx)
{
    if (isAippAndDynamic_.size() == 0) {
        FMK_LOGW("init AippTensorCreator failed or is not aipp model.");
        return false;
    }
    if (dataIdx >= isAippAndDynamic_.size()) {
        FMK_LOGE("dataIdx is not invalid.");
        return false;
    }
    return isAippAndDynamic_[dataIdx].first;
}

Status AippTensorCreator::CreateImageTensors(
    const vector<NDTensorDesc>& inputDims, vector<shared_ptr<INDTensorBuffer>>& inTensors)
{
    if (CheckConfigInvalid(inputDims.size()) != SUCCESS) {
        FMK_LOGE("CheckConfigInvalid fail.");
        return FAILURE;
    }
    size_t aippDyFuncCnt = GetAippDyFuncCnt(inputDims.size());
    size_t dataCounts = aippDyFuncCnt > 0 ? inputDims.size() - aippDyFuncCnt : inputDims.size();
    for (size_t i = 0; i < dataCounts; i++) {
        if (InputIsAipp(i)) {
            // 设置Aipp的输入tensor buffer到inTensors
            vector<int32_t> dimsBHW = {inputDims[i].dims[0], inputDims[i].dims[2], inputDims[i].dims[3]};
            if (SetImageBufferToInTensorBuffer(i, dimsBHW, inTensors) != SUCCESS) {
                FMK_LOGE("SetImageBufferToInTensorBuffer failed");
                return FAILURE;
            }
        } else {
            shared_ptr<INDTensorBuffer> inputTensor = CreateNDTensorBuffer(inputDims[i]);
            if (inputTensor == nullptr) {
                FMK_LOGE("CreateNDTensorBuffer failed");
                return FAILURE;
            }
            inTensors.push_back(inputTensor);
        }
    }
    if (aippDyFuncCnt != 0) {
        // 设置动态aipp小算子tensor buffer到inTensors
        if (SetAippDyFuncBufferToInTensorBuffer(inTensors, aippDyFuncCnt) != SUCCESS) {
            FMK_LOGE("SetAippDyFuncBufferToInTensorBuffer failed");
            return FAILURE;
        }
    }
    return SUCCESS;
}

static bool SetInputIndex(const shared_ptr<AippPara>& aippPara, const uint32_t& index, const int& outputIndex)
{
    if (aippPara->SetInputIndex(index) != AI_SUCCESS) {
        FMK_LOGE("AippPara setInputIndex failed.");
        return false;
    }

    if (outputIndex >= 0) {
        if (aippPara->SetInputAippIndex(static_cast<uint32_t>(outputIndex)) != AI_SUCCESS) {
            FMK_LOGE("AippPara setInputAippIndex failed.");
            return false;
        }
    }
    return true;
}

static bool SetInputShape(
    const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig, const TensorDimension& inputDim)
{
    AippInputShape inputShape = aippConfig.inputPara.shape;
    if (aippConfig.inputPara.shape.srcImageSizeW == 0 || aippConfig.inputPara.shape.srcImageSizeH == 0) {
        inputShape.srcImageSizeW = inputDim.GetWidth();
        inputShape.srcImageSizeH = inputDim.GetHeight();
    }

    if (aippPara->SetInputShape(inputShape) != AI_SUCCESS) {
        FMK_LOGE("AippPara setInputShape failed.");
        return false;
    }
    return true;
}

static bool SetCscPara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.cscPara.switch_) {
        if (aippPara->SetCscPara(aippConfig.cscPara) != AI_SUCCESS) {
            FMK_LOGE("AippPara setCscPara failed.");
            return false;
        }
    }
    return true;
}

static bool SetChannelSwapPara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.channelSwapPara.rbuvSwapSwitch || aippConfig.channelSwapPara.axSwapSwitch) {
        if (aippPara->SetChannelSwapPara(aippConfig.channelSwapPara) != AI_SUCCESS) {
            FMK_LOGE("AippPara setChannelSwapPara failed.");
            return false;
        }
    }
    return true;
}

static bool SetCropPara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.vecCropPara.size() > 1) {
        for (uint32_t batchIndex = 0; batchIndex < aippConfig.vecCropPara.size(); batchIndex++) {
            if (aippConfig.vecCropPara[batchIndex].switch_ &&
                aippPara->SetCropPara(batchIndex, aippConfig.vecCropPara[batchIndex]) != AI_SUCCESS) {
                FMK_LOGE("Batch index %u AippPara setCropPara failed.", batchIndex);
                return false;
            }
        }
    } else if (aippConfig.vecCropPara.size() == 1) {
        if (aippConfig.vecCropPara[0].switch_ && aippPara->SetCropPara(aippConfig.vecCropPara[0]) != AI_SUCCESS) {
            FMK_LOGE("AippPara setCropPara failed.");
            return false;
        }
    }
    return true;
}

static bool SetResizePara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.vecResizePara.size() > 1) {
        for (uint32_t batchIndex = 0; batchIndex < aippConfig.vecResizePara.size(); batchIndex++) {
            if (aippConfig.vecResizePara[batchIndex].switch_ &&
                aippPara->SetResizePara(batchIndex, aippConfig.vecResizePara[batchIndex]) != AI_SUCCESS) {
                FMK_LOGE("Batch index %u AippPara SetResizePara failed.", batchIndex);
                return false;
            }
        }
    } else if (aippConfig.vecResizePara.size() == 1) {
        if (aippConfig.vecResizePara[0].switch_ && aippPara->SetResizePara(aippConfig.vecResizePara[0]) != AI_SUCCESS) {
            FMK_LOGE("AippPara SetResizePara failed.");
            return false;
        }
    }
    return true;
}

static bool SetPaddingPara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.vecPaddingPara.size() > 1) {
        for (uint32_t batchIndex = 0; batchIndex < aippConfig.vecPaddingPara.size(); batchIndex++) {
            if (aippConfig.vecPaddingPara[batchIndex].switch_ &&
                aippPara->SetPaddingPara(batchIndex, aippConfig.vecPaddingPara[batchIndex]) != AI_SUCCESS) {
                FMK_LOGE("Batch index %u AippPara SetPaddingPara failed.", batchIndex);
                return false;
            }
        }
    } else if (aippConfig.vecPaddingPara.size() == 1) {
        if (aippConfig.vecPaddingPara[0].switch_ &&
            aippPara->SetPaddingPara(aippConfig.vecPaddingPara[0]) != AI_SUCCESS) {
            FMK_LOGE("AippPara SetPaddingPara failed.");
            return false;
        }
    }
    return true;
}

static bool SetDtcPara(const shared_ptr<AippPara>& aippPara, const AippConfig& aippConfig)
{
    if (aippConfig.vecDtcConfig.size() > 1) {
        for (uint32_t batchIndex = 0; batchIndex < aippConfig.vecDtcConfig.size(); batchIndex++) {
            if (aippConfig.vecDtcConfig[batchIndex].switch_ &&
                aippPara->SetDtcPara(batchIndex, aippConfig.vecDtcConfig[batchIndex].dtcPara) != AI_SUCCESS) {
                FMK_LOGE("Batch index %u AippPara SetDtcPara failed.", batchIndex);
                return false;
            }
        }
    } else if (aippConfig.vecDtcConfig.size() == 1) {
        if (aippConfig.vecDtcConfig[0].switch_ &&
            aippPara->SetDtcPara(aippConfig.vecDtcConfig[0].dtcPara) != AI_SUCCESS) {
            FMK_LOGE("AippPara SetDtcPara failed.");
            return false;
        }
    }
    return true;
}

bool SetAippParaLegacy(uint32_t index, int outputIndex,
    shared_ptr<AippPara>& aippPara, const TensorDimension& inputDim, const AippConfig& aippConfig)
{
    aippPara = hiai::make_shared_nothrow<AippPara>();
    if (aippPara.get() == nullptr) {
        FMK_LOGE("aippPara is null.");
        return false;
    }

    if (aippPara->Init(inputDim.GetNumber()) != AI_SUCCESS) {
        FMK_LOGE("AippPara init failed.");
        return false;
    }

    if (!SetInputIndex(aippPara, index, outputIndex)) {
        return false;
    }

    if (aippPara->SetInputFormat(aippConfig.inputPara.format) != AI_SUCCESS) {
        FMK_LOGE("AippPara setInputFormat failed.");
        return false;
    }

    if (!SetInputShape(aippPara, aippConfig, inputDim) || !SetCscPara(aippPara, aippConfig) ||
        !SetChannelSwapPara(aippPara, aippConfig) || !SetCropPara(aippPara, aippConfig) ||
        !SetResizePara(aippPara, aippConfig) || !SetPaddingPara(aippPara, aippConfig) ||
        !SetDtcPara(aippPara, aippConfig)) {
        return false;
    }

    return true;
}

Status CreateAippParas(std::vector<AippConfig>& aippConfigV1, size_t index,
    const TensorDimension& inputDim, vector<shared_ptr<AippPara>>& aippParas)
{
    if (aippConfigV1.size() == 0) {
        FMK_LOGE("AippConfig vector size is zero.");
        return FAILURE;
    }
    FMK_LOGI("Input enable dynamic AIPP.");
    for (size_t j = 0; j < aippConfigV1.size(); j++) {
        shared_ptr<AippPara> aippPara;
        // 当只有一个aippConfigs时，不设置SetInputAippIndex
        int outIndex = aippConfigV1.size() > 1 ? (static_cast<int>(j)) : -1;
        if (!SetAippParaLegacy(static_cast<uint32_t>(index), outIndex, aippPara, inputDim, aippConfigV1[j])) {
            FMK_LOGE("AippPara SetAippParaLegacy failed.");
            return FAILURE;
        }
        aippParas.push_back(aippPara);
    }
    return SUCCESS;
}

void GetAippInputParas(std::vector<AippConfig>& aippConfigV1, const TensorDimension& inputDim,
    uint32_t& srcImageSizeW, uint32_t& srcImageSizeH, hiai::AiTensorImage_Format& format)
{
    srcImageSizeW = aippConfigV1[0].inputPara.shape.srcImageSizeW > 0 ?
        aippConfigV1[0].inputPara.shape.srcImageSizeW : inputDim.GetWidth();
    srcImageSizeH = aippConfigV1[0].inputPara.shape.srcImageSizeH > 0 ?
        aippConfigV1[0].inputPara.shape.srcImageSizeH : inputDim.GetHeight();
    format = aippConfigV1[0].inputPara.format;
    return;
}

bool AippTensorCreator::IsStaticAippInput(const size_t inputInx)
{
    if (aippFmtVecLegacy_.size() == 0) {
        return false;
    }
    if (inputInx >= aippFmtVecLegacy_.size()) {
        return FAILURE;
    }
    return aippFmtVecLegacy_[inputInx] == AiTensorImage_INVALID ? false : true;
}

bool AippTensorCreator::IsDynamicAippInput(const size_t inputInx)
{
    if (aippConfigVecLegacy_.size() == 0 || inputInx >= aippConfigVecLegacy_.size()) {
        return false;
    }
    return aippConfigVecLegacy_[inputInx][0].config ? true : false;
}

Status AippTensorCreator::InitInputAippTensor(
    const size_t index, shared_ptr<AiTensor>& input, const TensorDimension& inputDim)
{
    uint32_t srcImageSizeB = inputDim.GetNumber();
    uint32_t srcImageSizeW = inputDim.GetWidth();
    uint32_t srcImageSizeH = inputDim.GetHeight();
    hiai::AiTensorImage_Format format = AiTensorImage_INVALID;
    vector<shared_ptr<AippPara>> aippParas;
    if (IsStaticAippInput(index)) {
        format = aippFmtVecLegacy_[index];
        if (input->Init(srcImageSizeB, srcImageSizeH, srcImageSizeW, format) != AI_SUCCESS) {
            FMK_LOGE("Input index %d dynamic aipp input init failed.", index);
            return FAILURE;
        }
    } else if (IsDynamicAippInput(index)) {
        std::vector<AippConfig> aippConfigV1 = aippConfigVecLegacy_[index];
        if (CreateAippParas(aippConfigV1, index, inputDim, aippParas) != SUCCESS) {
            FMK_LOGE("Input index %d CreateAippParas failed.", index);
            return FAILURE;
        }
        GetAippInputParas(aippConfigV1, inputDim, srcImageSizeW, srcImageSizeH, format);
        if (input->Init(srcImageSizeB, srcImageSizeH, srcImageSizeW, format) != AI_SUCCESS) {
            FMK_LOGE("Input index %d dynamic aipp input init failed.", index);
            return FAILURE;
        }
        shared_ptr<AippTensor> aippTensor = make_shared<AippTensor>(input, aippParas);
        if (aippTensor.get() == nullptr) {
            FMK_LOGE("AippTensor ptr is null.");
            return FAILURE;
        }
        input = aippTensor;
    } else {
        if (input->Init(&inputDim) != AI_SUCCESS) {
            FMK_LOGE("Input index %d init failed.", index);
            return FAILURE;
        }
    }
    return SUCCESS;
}

Status AippTensorCreator::CheckSizeMatch(const size_t inputSize)
{
    if (aippFmtVecLegacy_.size() > 0 && aippConfigVecLegacy_.size() > 0) {
        FMK_LOGE("aipp static and aipp dynamic can not effect at the same time");
        return FAILURE;
    }
    if (aippFmtVecLegacy_.size() > 0 && aippFmtVecLegacy_.size() != inputSize) {
        return FAILURE;
    }
    if (aippConfigVecLegacy_.size() > 0 && aippConfigVecLegacy_.size() != inputSize) {
        return FAILURE;
    }
    return SUCCESS;
}

Status AippTensorCreator::CreateImageTensors(
    const vector<TensorDimension>& inputDims, vector<shared_ptr<AiTensor>>& inTensors)
{
    if (CheckSizeMatch(inputDims.size()) != SUCCESS) {
        FMK_LOGE("CheckSizeMatch fail.");
        return FAILURE;
    }
    for (size_t i = 0; i < inputDims.size(); i++) {
        shared_ptr<AiTensor> input = hiai::make_shared_nothrow<AiTensor>();
        if (InitInputAippTensor(i, input, inputDims[i]) != SUCCESS) {
            return FAILURE;
        }
        inTensors.push_back(input);
    }
    return SUCCESS;
}
} // namespace hiai
