#include <iostream>
#include <fstream>
#include <iomanip>
#include <cstdlib>
#include <algorithm>
#include <jni.h>
#include <memory>
#include <string>
#include <unistd.h>
#include <map>
#include <vector>
#include <android/log.h>
#include "HiAiModelManagerService.h"
#include "graph/buffer.h"
#include "graph/model.h"
#include "graph/op/all_ops.h"
#include "hiai_ir_build.h"
#include "ir_build.h"

using namespace std;
using namespace ge;

#define TAG "NativeCodec"
#define HIAI_LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define HIAI_LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)

AAssetManager* g_mgr= nullptr;
vector<shared_ptr<Operator>> g_ops;
vector<shared_ptr<TensorDesc>> g_tensorDescs;
shared_ptr<hiai::AiModelMngerClient> g_client= nullptr;
string modelName="testSR";
hiai::AiContext context;
vector<shared_ptr<hiai::AiTensor>> itensors;
vector<shared_ptr<hiai::AiTensor>> otensors;

int64_t GetShapeSize(const ge::Shape& shape)
{
    int64_t size = 1;
    for (auto d : shape.GetDims()) {
        size = size * d;
    }
    return size;
}

void SetConstData(hiai::op::Const& constOp, const TensorDesc& wDesc,
                  uint8_t* data, size_t dataSize)
{
    ge::DataType dt = wDesc.GetDataType();

    int64_t length = GetShapeSize(wDesc.GetShape());
    switch (dt) {
        case DataType::DT_INT32:
        case DataType::DT_FLOAT: {
            length *= sizeof(float);
            break;
        }
        case DataType::DT_INT8: {
            length *= sizeof(uint8_t);
            break;
        }
        default: {
            length *= sizeof(float);
            break;
        }
    }

    TensorPtr weight = std::make_shared<Tensor>();
    weight->SetTensorDesc(wDesc);
    weight->SetData(data,dataSize);
    constOp.set_attr_value(weight);
}

void ReadBinFromAsset(string filePath, void* buffer, int64_t bufferSize)
{
    AAsset* asset = AAssetManager_open(g_mgr, filePath.c_str(),AASSET_MODE_UNKNOWN);
    if(asset==NULL)
    {
        HIAI_LOGE(" %s","asset==NULL");
        return ;
    }
    /*获取文件大小*/
    off_t fileSize = AAsset_getLength(asset);
    if(bufferSize != fileSize)
    {
        HIAI_LOGE("fileSize: %ld, bufferSize: %ld is not same!",fileSize,bufferSize);
        AAsset_close(asset);
        return;
    }
    int numBytesRead = AAsset_read(asset, buffer, fileSize);
    if(numBytesRead != bufferSize)
    {
        HIAI_LOGE("reading Asset error %d:",numBytesRead);
    }
    AAsset_close(asset);
    return;
}


shared_ptr<Operator> CreateConvReluOp(string opName, Operator inputOp,
                                  vector<int64_t> filterSize,
                                  vector<int64_t> strides,
                                  vector<int64_t> pads)
{
    shared_ptr<TensorDesc> filterDesc(new TensorDesc(Shape(filterSize), FORMAT_NCHW, DT_FLOAT));
    vector<float> filterVal(GetShapeSize(filterDesc->GetShape()),0.1);
    ReadBinFromAsset("weight/"+opName+"_weight.bin",filterVal.data(),filterVal.size()*sizeof(float));
    shared_ptr<hiai::op::Const> filter(new hiai::op::Const(opName+"filter"));
    SetConstData(*filter,*filterDesc,(uint8_t*)filterVal.data(),
                 GetShapeSize(filterDesc->GetShape())*sizeof(float));
    g_tensorDescs.push_back(filterDesc);
    g_ops.push_back(filter);

    shared_ptr<TensorDesc> biasDesc(new TensorDesc(Shape({1,filterSize[0],1,1}),FORMAT_NCHW, DT_FLOAT));
    vector<float> biasVal(GetShapeSize(biasDesc->GetShape()),0.1);
    ReadBinFromAsset("weight/"+opName+"_bias.bin",biasVal.data(),biasVal.size()*sizeof(float));
    shared_ptr<hiai::op::Const> bias(new hiai::op::Const(opName+"bias"));
    SetConstData(*bias,*biasDesc,(uint8_t*)biasVal.data(),
                 GetShapeSize(biasDesc->GetShape())*sizeof(float));
    g_tensorDescs.push_back(biasDesc);
    g_ops.push_back(bias);

    shared_ptr<hiai::op::Convolution> convOp(new hiai::op::Convolution(opName));
    (*convOp).set_input_x(inputOp)
             .set_input_filter(*filter)
             .set_input_bias(*bias)
             .set_attr_strides(hiai::AttrValue::LIST_INT(strides))
             .set_attr_pads(hiai::AttrValue::LIST_INT(pads));
    shared_ptr<hiai::op::Activation> relu(new hiai::op::Activation(opName+"relu"));
    (*relu).set_input_x(*convOp);
    g_ops.push_back(convOp);
    g_ops.push_back(relu);

    return relu;
}

shared_ptr<Operator> CreateDeconvOp(string opName, Operator inputOp,
                                      vector<int64_t> filterSize,
                                      vector<int64_t> strides,
                                      vector<int64_t> pads)
{
    shared_ptr<TensorDesc> filterDesc(new TensorDesc(Shape(filterSize), FORMAT_NCHW, DT_FLOAT));
    vector<float> filterVal(GetShapeSize(filterDesc->GetShape()),0.1);
    ReadBinFromAsset("weight/"+opName+"_weight.bin",filterVal.data(),filterVal.size()*sizeof(float));
    shared_ptr<hiai::op::Const> filter(new hiai::op::Const(opName+"filter"));
    SetConstData(*filter,*filterDesc,(uint8_t*)filterVal.data(),
                 GetShapeSize(filterDesc->GetShape())*sizeof(float));
    g_tensorDescs.push_back(filterDesc);
    g_ops.push_back(filter);

    shared_ptr<hiai::op::ConvTranspose> deconvOp(new hiai::op::ConvTranspose(opName));
    (*deconvOp).set_input_x(inputOp)
            .set_input_filter(*filter)
            .set_attr_strides(hiai::AttrValue::LIST_INT(strides))
            .set_attr_pads(hiai::AttrValue::LIST_INT(pads));
    g_ops.push_back(deconvOp);

    return deconvOp;
}

void build_IR_Graph(Graph& graph, int height, int width)
{
    // input datatype fp16
    shared_ptr<TensorDesc>  dataDesc(new TensorDesc(Shape({1, 3, height, width}), FORMAT_NCHW, DT_FLOAT16));
    auto data = hiai::op::Data("data");
    data.update_input_desc_x(*dataDesc);
    g_tensorDescs.push_back(dataDesc);

    auto conv1 = CreateConvReluOp("model_conv1",data,{16,3,3,3},{1,1},{1,1,1,1});
    auto conv2 = CreateConvReluOp("model_conv2",*conv1,{16,16,3,3},{1,1},{1,1,1,1});
    auto conv3 = CreateConvReluOp("model_conv3",*conv2,{16,16,3,3},{1,1},{1,1,1,1});
    auto conv4 = CreateConvReluOp("model_conv4",*conv3,{16,16,3,3},{1,1},{1,1,1,1});
    auto deconv = CreateDeconvOp("model_deconv",*conv4,{16,3,4,4},{2,2},{1,1,1,1});

    // output datatype fp16
    shared_ptr<hiai::op::NetOutput> outNode(new hiai::op::NetOutput("netoutput_fp16"));
    outNode->set_input_x(*deconv);
    shared_ptr<TensorDesc> outDesc(new TensorDesc(ge::Shape({1,3,height*2,width*2}), ge::FORMAT_ND, ge::DT_FLOAT16));
    outDesc->SetAttr("is_output_datatype_set",ge::AttrValue::CreateFrom<ge::AttrValue::BOOL>(true));
    outNode->update_input_desc_x(*outDesc);
    outNode->UpdateOutputDesc("y",*outDesc);
    g_tensorDescs.push_back(outDesc);
    g_ops.push_back(outNode);

    std::vector<Operator> inputs{data};
    std::vector<Operator> outputs{};
    graph.SetInputs(inputs).SetOutputs(outputs);
    return;
}

bool WriteToBufferFile(ge::Buffer& buffer, std::string path)
{
    std::ofstream outFile(path.c_str(), std::ios::out | std::ios::binary);
    outFile.write((char*)buffer.data(), buffer.size());
    outFile.close();
    return true;
}

bool WriteToOmFile(domi::ModelBufferData& buffer, std::string path)
{
    std::ofstream outFile(path.c_str(), std::ios::out | std::ios::binary);
    outFile.write((char*)buffer.data, buffer.length);
    outFile.close();
    return true;
}

int InitAiTensor()
{
    vector<hiai::TensorDimension> input_dims;
    vector<hiai::TensorDimension> output_dims;
    auto ret = g_client->GetModelIOTensorDim(modelName,input_dims,output_dims);
    if(ret != hiai::AI_SUCCESS) {
        HIAI_LOGE("AI client getModelIO Failed!");
        g_client = nullptr;
        return -1;
    }

    for (int i=0;i< input_dims.size();i++) {
        shared_ptr<hiai::AiTensor> inputTensor= make_shared<hiai::AiTensor>();
        inputTensor->Init(&input_dims[i], hiai::HIAI_DATATYPE_FLOAT16);
        HIAI_LOGV("input tensor %u:[%d,%d,%d,%d]",i,
                   input_dims[i].GetNumber(),
                   input_dims[i].GetChannel(),
                   input_dims[i].GetHeight(),
                   input_dims[i].GetWidth());
        itensors.push_back(inputTensor);
    }

    for (int i=0;i< output_dims.size();i++) {
        shared_ptr<hiai::AiTensor> outputTensor= make_shared<hiai::AiTensor>();
        outputTensor->Init(&output_dims[i], hiai::HIAI_DATATYPE_FLOAT16);
        HIAI_LOGV("output tensor %u:[%d,%d,%d,%d]",i,
                  output_dims[i].GetNumber(),
                  output_dims[i].GetChannel(),
                  output_dims[i].GetHeight(),
                  output_dims[i].GetWidth());
        otensors.push_back(outputTensor);
    }
    return 0;
}

int HiaiBuildIRModel(AAssetManager* mgr,int height, int width)
{
    g_ops.clear();
    g_tensorDescs.clear();
    itensors.clear();
    otensors.clear();

    g_mgr = mgr;
    Graph graph("sr");
    build_IR_Graph(graph,height,width);

    ge::Model model("model","model_v00001");
    model.SetGraph(graph);

//    ge::Buffer buffer;
//    model.Save(buffer);
//    WriteToBufferFile(buffer,"/sdcard/1/sr.irpb");

    domi::HiaiIrBuild irBuild;
    domi::ModelBufferData om_model_buff;
    irBuild.CreateModelBuff(model,om_model_buff);
    bool ret = irBuild.BuildIRModel(model,om_model_buff);
    if(!ret) {
        HIAI_LOGE("Build IR model Failed!");
    }
//    WriteToOmFile(om_model_buff,"/sdcard/1/sr.om");
    g_client = make_shared<hiai::AiModelMngerClient>();
    if(g_client == nullptr)
    {
        HIAI_LOGE("new AI client Failed!");
        return -1;
    }
    auto ret_code = g_client->Init(nullptr);
    if(ret_code != hiai::AI_SUCCESS) {
        HIAI_LOGE("AI client Init Failed!");
        g_client = nullptr;
        return -1;
    }

    auto model_desc = make_shared<hiai::AiModelDescription>(modelName,3,0,0,0);
    model_desc->SetModelBuffer(om_model_buff.data, om_model_buff.length);
    vector<shared_ptr<hiai::AiModelDescription>> model_descs;
    model_descs.push_back(model_desc);
    ret_code = g_client->Load(model_descs);
    if(ret_code != hiai::AI_SUCCESS) {
        HIAI_LOGE("AI client Load Failed!");
        g_client = nullptr;
        return -1;
    }
    string key = "model_name";
    string value = modelName;
    context.AddPara(key,value);
    // init AI Tensors
    return InitAiTensor();
}

vector<shared_ptr<hiai::AiTensor>>& HiaiGetInputTensors()
{
    return  itensors;
}
vector<shared_ptr<hiai::AiTensor>>& HiaiGetOutputTensors()
{
    return otensors;
}

int64_t nanotime() {
    timespec now;
    clock_gettime(CLOCK_MONOTONIC, &now);
    return now.tv_sec * 1000000000LL + now.tv_nsec;
}


int HiaiRunIRModel()
{
    int istamp;
    //int64_t start = nanotime();
    int ret = g_client->Process(context,itensors,otensors,1000, istamp);
    //int64_t end = nanotime();
    //float time = (end - start)/1000000.0;
    //HIAI_LOGV("HiaiRunIRModel run:%.2fms ret:%d",time,ret);
    return ret;
}

char* HiaiGetVersion()
{
    shared_ptr<hiai::AiModelMngerClient> client = make_shared<hiai::AiModelMngerClient>();
    if(client.get() == nullptr) {
        HIAI_LOGE("[HiaiGetVersion] new AiModelMngerClient failed!");
        return nullptr;
    }

    auto ret = client->Init(nullptr);
    if (ret != hiai::AI_SUCCESS) {
        HIAI_LOGE("[HiaiGetVersion] AiModelMngerClient init failed!");
        return nullptr;
    }

    char* currVersion = client->GetVersion();
    if(currVersion != nullptr) {
        HIAI_LOGV("[HiaiGetVersion] current HIAI version: %s",currVersion);
    } else {
        HIAI_LOGE("[HiaiGetVersion] current HIAI version: NULL");
    }
    return currVersion;
}




















