//
// Created by mind on 10/2/20.
//
#include "model_process.h"
#include "utils.h"
#include "pose_process.h"
#include "acl/acl.h"
#include <bits/stdint-uintn.h>
#include <cstddef>
#include <cmath>
#include <ctime>

#include "eigen3/Eigen/Core"
#include "eigen3/Eigen/Dense"
#include "opencv2/highgui.hpp"
#include "opencv2/core/eigen.hpp"
#include "opencv2/imgproc.hpp"
#include "ascenddk/presenter/agent/presenter_channel.h"
#include<fstream>
#include <string>
using namespace std;
bool g_isDevice = false;

OpenPoseProcess::OpenPoseProcess() : deviceId_(0),context_(nullptr),stream_(nullptr),
poseInputBuf_(nullptr),channel_(nullptr), ModelProcess()
{
    poseInputBufSize_=RGB_IMAGE_SIZE_F32(modelWidth_,modelHeight_);
}

OpenPoseProcess::OpenPoseProcess(string modelPath) : deviceId_(0),context_(nullptr),stream_(nullptr),
    poseInputBuf_(nullptr),channel_(nullptr), modelPath_(modelPath), ModelProcess()
{
    poseInputBufSize_=RGB_IMAGE_SIZE_F32(modelWidth_,modelHeight_);
}

Result OpenPoseProcess::InitResource()
{
    const char *aclConfigPath = "../src/acl.json";
    aclError ret = aclInit(aclConfigPath);
    if(ret!=ACL_ERROR_NONE) {
        ERROR_LOG("acl init failed");
        return FAILED;
    }
    INFO_LOG("acl init success");

    ret = aclrtSetDevice(deviceId_);
    if(ret!=ACL_ERROR_NONE) {
        ERROR_LOG("acl open device %d failed", deviceId_);
        return FAILED;
    }
    INFO_LOG("acl open device %d success",deviceId_);

    ret = aclrtCreateContext(&context_,deviceId_);
    if(ret!=ACL_ERROR_NONE) {
        ERROR_LOG("acl create context failed");
        return FAILED;
    }
    INFO_LOG("acl create context success");

    ret = aclrtCreateStream(&stream_);
    if(ret!=ACL_ERROR_NONE) {
        ERROR_LOG("acl create stream failed");
        return FAILED;
    }
    INFO_LOG("acl create stream success");

    return SUCCESS;
}

Result OpenPoseProcess::InitModel(const string modelPath)
{
    Result ret = LoadModelFromFileWithMem(modelPath.c_str());
    if (ret != SUCCESS) {
        ERROR_LOG("model load failed");
        return FAILED;
    }
    INFO_LOG("model load success");

    ret = CreateDesc();
    if (ret != SUCCESS) {
        ERROR_LOG("model CreateDesc failed");
        return FAILED;
    }
    INFO_LOG("model CreateDesc success");

    // register memory on the device
    aclrtMalloc(&poseInputBuf_, (size_t)(poseInputBufSize_), ACL_MEM_MALLOC_HUGE_FIRST);
    if (poseInputBuf_ == nullptr) {
        ERROR_LOG("Acl malloc image buffer failed.");
        return FAILED;
    }

    ret = CreateInput(poseInputBuf_, poseInputBufSize_);
    if (ret != SUCCESS) {
        ERROR_LOG("model CreateInput failed");
        return FAILED;
    }
    INFO_LOG("model CreateInput success");

    ret = CreateOutput();
    if (ret != SUCCESS) {
        ERROR_LOG("model CreateOutPut failed");
        return FAILED;
    }

    INFO_LOG("model CreateOutPut success");

    INFO_LOG("OpenPose Model initial success!");

    return SUCCESS;

}

Result OpenPoseProcess::Init() {

    Result ret = InitResource();
    if (ret != SUCCESS) {
        ERROR_LOG("Init acl resource failed");
        return FAILED;
    }

    ret = InitModel(modelPath_);
    if (ret != SUCCESS) {
        ERROR_LOG("Init model failed");
        return FAILED;
    }



    return SUCCESS;
}

void OpenPoseProcess::EncodeImage(vector<uint8_t>& encodeImg, cv::Mat& origImg) {
    vector<int> param = vector<int>(2);
    param[0] = CV_IMWRITE_JPEG_QUALITY;
    param[1] = 95;//default(95) 0-100
    cv::imencode(".jpg", origImg, encodeImg, param);
}


void OpenPoseProcess::SendImage(cv::Mat& image) {
    vector<uint8_t> encodeImg;
    EncodeImage(encodeImg, image);

    ascend::presenter::ImageFrame imageParam;
    imageParam.format = ascend::presenter::ImageFormat::kJpeg;
    imageParam.width = image.cols;
    imageParam.height = image.rows;
    imageParam.size = encodeImg.size();
    imageParam.data = reinterpret_cast<uint8_t*>(encodeImg.data());
    std::vector<ascend::presenter::DetectionResult> gestureResults;
    imageParam.detection_results = gestureResults;

    ascend::presenter::PresenterErrorCode errorCode = ascend::presenter::PresentImage(channel_, imageParam);
    if (errorCode != ascend::presenter::PresenterErrorCode::kNone) {
        ERROR_LOG("PresentImage failed %d", static_cast<int>(errorCode));
    }

}

Result OpenPoseProcess::OpenPresenterChannel() {
    ascend::presenter::OpenChannelParam param;
    param.host_ip = "192.168.1.134";  //IP address of Presenter Server
    param.port = 7008;  //port of present service
    param.channel_name = "OpenPose";
    param.content_type = ascend::presenter::ContentType::kVideo;  //content type is Video
    INFO_LOG("OpenChannel start");
    ascend::presenter::PresenterErrorCode errorCode =ascend::presenter::OpenChannel(channel_, param);
    INFO_LOG("OpenChannel param");
    if (errorCode != ascend::presenter::PresenterErrorCode::kNone) {
        ERROR_LOG("OpenChannel failed %d", static_cast<int>(errorCode));
        return FAILED;
    }
    return SUCCESS;

}



Result OpenPoseProcess::Preprocess(string& imageFile)
{
    // read and normalize image, convert to NCHW format
    cv::Mat image = cv::imread(imageFile, CV_LOAD_IMAGE_COLOR); // BGR image:
    if (image.empty()) {
        ERROR_LOG("Read image %s failed", imageFile.c_str());
        return FAILED;
    }

    cv::resize(image, image, cv::Size(modelWidth_,modelHeight_),cv::INTER_CUBIC);

    image.convertTo(image, CV_32FC3); // uint8 -> float32
    image=image*(1/255.0)-0.5;

    std::vector<cv::Mat> channels;
    cv::split(image,channels);
    uint32_t channelSize=IMAGE_CHAN_SIZE_F32(modelWidth_,modelHeight_);

    int pos=0;
    for (int i = 0; i < 3; i++) {
        memcpy(static_cast<uint8_t*>(poseInputBuf_) + pos,(float*)channels[i].data, channelSize);
        pos+=channelSize;
    }
    return SUCCESS;
}


Result OpenPoseProcess::Preprocess(cv::Mat image)
{
    // overload of Preprocess

    cv::resize(image, image, cv::Size(modelWidth_,modelHeight_),cv::INTER_CUBIC);
    image.convertTo(image, CV_32FC3); // uint8 -> float32
    image=image*(1/255.0)-0.5;

    std::vector<cv::Mat> channels;
    cv::split(image,channels);
    uint32_t channelSize=IMAGE_CHAN_SIZE_F32(modelWidth_,modelHeight_);

    int pos=0;
    for (int i = 0; i < 3; i++) {
        memcpy(static_cast<uint8_t*>(poseInputBuf_) + pos,(float*)channels[i].data, channelSize);
        pos+=channelSize;
    }
    return SUCCESS;
}

Result OpenPoseProcess::Inference(aclmdlDataset*& inferenceOutput) {
    Result ret = Execute();
    if (ret != SUCCESS) {
        ERROR_LOG("Execute model inference failed");
        return FAILED;
    }

    inferenceOutput = GetModelOutputData();

    return SUCCESS;
}



Result OpenPoseProcess::Postprocess(aclmdlDataset*& openposeOutput,float motion_data[1][3][FRAME_LENGTH][18]) {

    static float motion_data_old[1][3][FRAME_LENGTH][18];
    static bool flag_=false;
    if(!flag_)
    {
        memset(motion_data_old,0,sizeof(motion_data_old));
        flag_=true;
    }


    uint32_t heatmapSize = 0;
    float* heatmap_=(float*)GetInferenceOutputItem(heatmapSize,openposeOutput,0);

    vector<key_pointsT> all_keypoints;
    float max_val=0.0;
    float threshold=0.4;

    for (int part = 0; part < 18; part++) {
        float *v = heatmap_ + part * 300;
        vector<float> vec(v, v+300);
        vector<float>::iterator biggest = max_element(vec.begin(), vec.end());
        int position=std::distance(vec.begin(), biggest);
        max_val=v[position];
        int maxRow=position / 20;
        int maxCol=position-maxRow*20; //(position-x)/20;
        key_pointsT keypoints={0.0,0.0,0.0};
        if(max_val>threshold)
        {
            keypoints = {
                (float)maxCol*8,(float)maxRow*8,max_val
            };
        }
        all_keypoints.push_back(keypoints);
    }
    // normalize motion data
    float x[18]={0},y[18]={0},s[18]={0};
    for(int k=0;k<18;k++)
    {
        float tmp = all_keypoints[k].score;
        if(tmp<threshold) continue;
        else
        {
            x[k] = all_keypoints[k].x / modelWidth_ - 0.5;
            y[k] = 0.5 - all_keypoints[k].y / modelHeight_;
            s[k] = tmp;
        }
    }

    // dump pose result to motion data from tail to head
    memcpy(motion_data[0][0][FRAME_LENGTH-1], x, sizeof(x)); // add new pose data to the tail
    memcpy(motion_data[0][1][FRAME_LENGTH-1], y, sizeof(y));
    memcpy(motion_data[0][2][FRAME_LENGTH-1], s, sizeof(s));
    memcpy(motion_data[0][0][0],motion_data_old[0][0][1],sizeof(x)*(FRAME_LENGTH-1)); // move out the data at the head
    memcpy(motion_data[0][1][0],motion_data_old[0][1][1],sizeof(y)*(FRAME_LENGTH-1));
    memcpy(motion_data[0][2][0],motion_data_old[0][2][1],sizeof(s)*(FRAME_LENGTH-1));
    memcpy(motion_data_old,motion_data,sizeof(x)*FRAME_LENGTH*3); // update old data


    return SUCCESS;
}



Result OpenPoseProcess::Postprocess(aclmdlDataset*& modelOutput,string imagePath) {

    uint32_t heatmapSize = 0;
    float* heatmap_=(float*)GetInferenceOutputItem(heatmapSize,modelOutput,0);

    vector<key_pointsT> all_keypoints; // Assume one person only
    float max_val=0.0;
    float threshold=0.4;

    for (int part = 0; part < 18; part++) {

        float *v = heatmap_ + part * 300;
        vector<float> vec(v, v+300);
        vector<float>::iterator biggest = max_element(vec.begin(), vec.end());
        int position=std::distance(vec.begin(), biggest);
        max_val=v[position];
        int maxRow=position / 20 ;
        int maxCol=position-maxRow*20;
        key_pointsT keypoints={0.0,0.0,0.0};
        if(max_val>threshold)
        {
            keypoints = {
                (float)maxCol*8,(float)maxRow*8,max_val
            };
        }
        all_keypoints.push_back(keypoints);
    }

    // normalize motion data
    float x[18]={0},y[18]={0},s[18]={0};
    for(int k=0;k<18;k++)
    {
        float tmp = all_keypoints[k].score;
        if(tmp==0) continue;
        else
        {
            x[k] = all_keypoints[k].x/modelWidth_-0.5;
            y[k] = 0.5-all_keypoints[k].y/modelHeight_;
            s[k] = tmp;
        }
    }

    Utils::DrawHuman(imagePath,all_keypoints);

    return SUCCESS;
}

Result OpenPoseProcess::Postprocess(aclmdlDataset*& modelOutput,cv::Mat& frame) {

    uint32_t heatmapSize = 0;//, pafSize=0;
    float* heatmap_=(float*)GetInferenceOutputItem(heatmapSize,modelOutput,0);

    Eigen::Matrix <float, 15, 20> resized_matrix;


    Eigen::MatrixXd::Index maxRow, maxCol;
    vector<key_pointsT> all_keypoints;

    float max_val;
    float thre=0.4;

    // find 18 keypoints of a person
    clock_t start_time;
    for (int part = 0; part < 18; part++) {
        start_time = clock();
        float *v = heatmap_ + part * 300;
        cv::Mat heatmap(15,20,CV_32FC1,v);
        cv::cv2eigen(heatmap, resized_matrix);
        max_val=resized_matrix.maxCoeff(&maxRow,&maxCol);

        key_pointsT keypoints={0.0,0.0,0.0};
        if(max_val>thre)
        {
            keypoints = {
                (float)maxCol*8,(float)maxRow*8,max_val
            };
        }

        all_keypoints.push_back(keypoints);
        std::cout << "process keypoints time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;

    }
    Utils::DrawHuman(frame,all_keypoints);

    return SUCCESS;
}

void OpenPoseProcess::txtProcess(string txtPath)
{
    // process/visualize motion_data.txt into videos
    float motion_data[1][3][FRAME_LENGTH][18];
    memset(motion_data,0,sizeof(motion_data)); // necessary
    Utils::read_motion_data(txtPath,motion_data,true); // mode: true for NHWC data
    Utils::DrawHuman(motion_data);
    cout<<"write motion video success!"<<endl;

}

void OpenPoseProcess::fileProcess(string filepath,bool if_break,bool write_data)
{
    // if_break: false--presenter, true--dataset making
    // write_data: false--presenter, true--dataset making
    // file name: imageDir/n/k.jpg
    float motion_data[1][3][FRAME_LENGTH][18];
    memset(motion_data,0,sizeof(motion_data));
    Result ret;
    static int fileid=1; // start file
    int image_num = 0;
    while (1) {

        std::cout << "file/image: " <<fileid<<"/"<<image_num << std::endl;

        if (image_num >= FRAME_LENGTH) // When pose infer count 100 , start gesture infer process.
        {
            if(write_data)
            {
//                Utils::ProcessMotionData(motion_data);
                Utils::WriteMotionData(motion_data);
            }
            if(if_break) break;
            else
            {
                image_num %= FRAME_LENGTH;
            }


        }

        // file of the image
        string imagePath = filepath + "/" + to_string(fileid) + "/" + to_string(image_num++) + ".jpg";
        cout<<imagePath<<endl;

        while (access(imagePath.data(), 0) == -1)
        {
            if (if_break)
            {
                break;
            }
            else
            {
                cout<<"waiting..."+imagePath<<endl;
            }

        }

        ret = Preprocess(imagePath);
        aclmdlDataset * inferenceOutput;
        ret = Inference(inferenceOutput);

        if(!write_data)
            ret = Postprocess(inferenceOutput,imagePath);
        else
            ret = Postprocess(inferenceOutput, motion_data);


        if (ret != SUCCESS) {
            std::cout<<"Openpose Postprocess not success"<<std::endl;
            continue;
        }

        if(!if_break)
            unlink(imagePath.c_str());
    }
    fileid++;
}


void OpenPoseProcess::presenterProcess(string filepath)
{
    Result ret = OpenPresenterChannel();
    if (ret != SUCCESS) {
        ERROR_LOG("Open presenter channel failed");
    };
    static int fileid=1;
    int image_num = 0;

    while (1) {

        if (image_num >= FRAME_LENGTH) // When pose infer count FRAME_LENGTH , start gesture infer process.
        {
            image_num %= FRAME_LENGTH;
        }

        std::cout << "image: "<<image_num << std::endl;

        string imagePath = filepath + "/" + to_string(image_num++) + ".jpg";
        cout<<imagePath<<endl;

        while (access(imagePath.data(), 0) == -1)
        {
            cout << "waiting..."+ imagePath << endl;

        }

        clock_t start_time = clock();
        cv::Mat img=cv::imread(imagePath);

        if(img.empty())
        {
            image_num++;
            unlink(imagePath.c_str());
            continue;
        }

        std::cout << "read time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;
        start_time = clock();
        ret = Preprocess(img);
//        ret = Preprocess(imagePath); // slower
        std::cout << "preprocess time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;
        start_time = clock();
        aclmdlDataset * inferenceOutput;
        ret = Inference(inferenceOutput);
        std::cout << "infer time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;
        start_time = clock();
        ret = Postprocess(inferenceOutput,img);
        std::cout << "postprocess time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;
        start_time = clock();
        SendImage(img);
        std::cout << "send time " << double(clock() - start_time) / CLOCKS_PER_SEC << std::endl;

        unlink(imagePath.c_str());
    }
    fileid++;
}


void OpenPoseProcess::imageProcess(string imagePath)
{
    Result ret = Preprocess(imagePath);
    aclmdlDataset* inferenceOutput;
    ret = Inference(inferenceOutput);
    ret = Postprocess(inferenceOutput,imagePath);
    INFO_LOG("OpenPose Image Process Success.");
}

void OpenPoseProcess::videoProcess(string videoPath)
{
    cv::VideoCapture cap(videoPath);
    double rate=cap.get(cv::CAP_PROP_FPS);
    cv::Size videoSize(cap.get(cv::CAP_PROP_FRAME_WIDTH),cap.get(cv::CAP_PROP_FRAME_HEIGHT));

    cv::VideoWriter writer;
    writer.open("./output/result.mp4", 0x00000021,rate, videoSize); //cv::CAP_PROP_FOURCC
    cv::Mat frame;
    while(cap.isOpened())
    {
        cap>>frame;
        Result ret = Preprocess(videoPath);
        aclmdlDataset* inferenceOutput;
        ret = Inference(inferenceOutput);
        ret = Postprocess(inferenceOutput,frame);
        writer<<frame;
    }

}



Result OpenPoseProcess::DeInit()
{
    aclError ret;
    if (stream_ != nullptr) {
        ret = aclrtDestroyStream(stream_);
        if(ret != ACL_ERROR_NONE) {
            ERROR_LOG("destroy stream failed");
        }
        stream_ = nullptr;
    }

    ret = aclrtResetDevice(deviceId_);
    if(ret != ACL_ERROR_NONE) {
        ERROR_LOG("reset device failed");
    }

    ret = aclFinalize();
    if(ret != ACL_ERROR_NONE) {
        ERROR_LOG("finalize acl failed");
    }
    INFO_LOG("OpenPose deinit success.");
    return SUCCESS;
}
