/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/

#include "detection_vision/center_net_detector.h"
#include "detection_vision/utils2.h"
#include "detection_vision/utils.h"
#include <glog/logging.h>
#include <memory>
#include "detection_vision/cuda/center_net_decoder.h"
#include <unistd.h>
#include <tensor_rt.pb.h>
#include <stdlib.h>

namespace detection
{
template<typename Scalar>
CenterNetDetector<Scalar>::CenterNetDetector(const std::string& config_file)
{   
    InitConfig(config_file);

    std::string cache_path = getenv("HOME");
    cache_path = cache_path + "/.cache/models/";
    int ret = access(cache_path.c_str(), 0);
    if(ret != 0)
    {
        std::string command = "mkdir -p " + cache_path;
        int res = system(command.c_str());
        (void) res;
    }
    tensorrt_inference::TensorRTConfig config_trnsorrt = config_.tensor_rt_config();
    std::string model_name = config_trnsorrt.serialize_model_name();
    model_name = cache_path +  model_name;
    config_trnsorrt.set_serialize_model_name(model_name);
    p_trt_inference_ = std::make_shared<tensorrt_inference::TRTInfernce>(config_trnsorrt);
    auto output_hm = p_trt_inference_->GetTensor(1);
    cnn_output_hm_channel_ = output_hm.channel();
    LOG(INFO)<<"center det numbers of classes: : "<<cnn_output_hm_channel_;
    auto input_tensor = p_trt_inference_->GetTensor(0);
    cnn_input_channel_ = input_tensor.channel();
    cnn_input_width_ = input_tensor.width();
    cnn_input_height_ = input_tensor.height();
    LOG(INFO)<<"cnn input width: "<<cnn_input_width_<<"  height: "<<cnn_input_height_<<"  channel: "<<cnn_input_channel_;
    batch_size_ =  config_.tensor_rt_config().batch_size();
    down_ratio_ = config_.down_ratio();
    image_width_ = config_.image_width();
    image_height_ = config_.image_height();
    score_threshold_ = config_.score_threshold();
    CUDA_CHECK(cudaStreamCreate(&stream_));
    mean_.push_back(config_.mean_1());
    mean_.push_back(config_.mean_2());
    mean_.push_back(config_.mean_3());

    stdd_.push_back(config_.std_1());
    stdd_.push_back(config_.std_2());
    stdd_.push_back(config_.std_3());

    input_buffer_ = new float[batch_size_ * cnn_input_width_ * cnn_input_height_ * cnn_input_channel_];
    size_t buffer_szie =  batch_size_ * cnn_input_width_ / down_ratio_ * cnn_input_height_ / down_ratio_  * (sizeof(float)*6 + sizeof(unsigned int));
    output_buffer_cpu_ = new char[buffer_szie];
    output_buffer_gpu_ = tensorrt_inference::SafeCudaMalloc(buffer_szie);
    scale_ = std::min(float(cnn_input_width_)/ image_width_, float(cnn_input_height_)/image_height_);
    sacle_size_ = cv::Size(image_width_ * scale_, image_height_ * scale_);
    crop_mat_ = cv::Mat::zeros(cnn_input_height_, cnn_input_width_, CV_8UC3);
    input_feature_map_size_ = cnn_input_height_ * cnn_input_width_;
    input_batch_map_size_ = input_feature_map_size_ * cnn_input_channel_;
    rect_crop_ = cv::Rect((cnn_input_width_ - sacle_size_.width)/2, (cnn_input_height_ - sacle_size_.height)/2, sacle_size_.width, sacle_size_.height);
    
}

template<typename Scalar>
CenterNetDetector<Scalar>::~CenterNetDetector()
{
    delete[] input_buffer_;
    delete[] output_buffer_cpu_;
    tensorrt_inference::SafeCudaFree(output_buffer_gpu_);
}

template<typename Scalar>
void CenterNetDetector<Scalar>::PreProcess(const std::vector<cv::Mat>& images)
{
    //assert(batch_size >= images.size());
    if(images.size() > batch_size_)
    {
        LOG(ERROR)<<"center net imput images size more than max batch size ";
        throw "center net imput images size more than max batch size ";
    }

    real_batch_size_ = images.size();

    float* data_ptr = nullptr;
    for(size_t B = 0; B < real_batch_size_; B++)
    {
        data_ptr = ((float*)input_buffer_) + B * input_batch_map_size_;
        cv::Mat img_raw = images[B];
        cv::resize(img_raw, resize_mat_, sacle_size_);

        resize_mat_.copyTo(crop_mat_(rect_crop_));
        crop_mat_.convertTo(float_mat_, CV_32FC3, 1./255);
        //hwc -> chw
        std::vector<cv::Mat> input_channels(3);
        cv::split(float_mat_, input_channels);
        for(size_t C = 0; C < 3; C++)
        {
            cv::Mat normal_channel = (input_channels[C] / mean_[C]) / stdd_[C];
            memcpy(data_ptr, normal_channel.data, input_feature_map_size_ * sizeof(float));
            data_ptr +=  input_feature_map_size_;
        }
    }

}

template<typename Scalar>
bool CenterNetDetector<Scalar>::Detect(const std::vector<cv::Mat>& imgs, std::vector<std::vector<ImageBBox2dType>>& objs)
{
    PreProcess(imgs);
    Inference();
    Decode(objs);
    return true;
}

template<typename Scalar>
void CenterNetDetector<Scalar>::Inference()
{
    p_trt_inference_->Inferrence(input_buffer_);
}

template<typename Scalar>
void CenterNetDetector<Scalar>::Decode(std::vector<std::vector<ImageBBox2dType> >& objects)
{
    void* hm = nullptr;
    void* wh = nullptr;
    void* reg = nullptr;
    p_trt_inference_->GetBuffDataGPU(1, &hm);
    p_trt_inference_->GetBuffDataGPU(2, &reg);
    p_trt_inference_->GetBuffDataGPU(3, &wh);
    objects.clear();
    CUDA_CHECK(cudaMemset(output_buffer_gpu_, 0, sizeof(float)));
    CenterNetDetDecoderGPU((float*)hm, (float*)reg, (float*)wh, real_batch_size_, cnn_input_width_,
                            cnn_input_height_, cnn_output_hm_channel_, 3, down_ratio_, score_threshold_,
                            (float*)output_buffer_gpu_);
    //size_t buffer_szie =  batch_size_ * cnn_input_width_ / down_ratio_ * cnn_input_height_ / down_ratio_  * sizeof(ImageBBox2D<float>) + sizeof(unsigned int) * batch_size_;
    int one_batch_size = cnn_input_width_ / down_ratio_ * cnn_input_height_ / down_ratio_  * (sizeof(float)*6 + sizeof(unsigned int));
    size_t buffer_szie = one_batch_size * batch_size_;
    CUDA_CHECK(cudaMemcpyAsync(output_buffer_cpu_, output_buffer_gpu_, 
                            buffer_szie, cudaMemcpyDeviceToHost, stream_));
    CUDA_CHECK(cudaStreamSynchronize(stream_));
    for(size_t B =0; B < batch_size_; B++)
    {   
        char* p_res = (output_buffer_cpu_) + B * one_batch_size;
        unsigned int* p_float_type = (unsigned int*)p_res;
        int num_dets = (*p_float_type);
        float* p_bbox = (float*)(p_float_type + 1);
        
        std::vector<ImageBBox2dType> bboxs;
        for(int i = 0; i < num_dets; i++)
        {
            float* cur_bbox = p_bbox +  6 * i;
            //ImageBBox2dType* bbox = (ImageBBox2dType*)p_bbox;
            ImageBBox2dType bbox_t;
            bbox_t.class_id = *cur_bbox;
            cur_bbox++;
            bbox_t.box2d.x1 = ((*cur_bbox) - rect_crop_.x) / scale_;
            cur_bbox++;
            bbox_t.box2d.y1 = ((*cur_bbox) - rect_crop_.y) / scale_;
            cur_bbox++;
            bbox_t.box2d.width = (*cur_bbox) / scale_;
            cur_bbox++;
            bbox_t.box2d.height = (*cur_bbox) / scale_;
            cur_bbox++;
            bbox_t.confidence_ = *cur_bbox;
            bboxs.push_back(bbox_t);
        }
        objects.push_back(bboxs);
    }
   
}

template<typename Scalar>
void CenterNetDetector<Scalar>::InitConfig(const std::string& config_file)
{
    try
    {
        LOG(INFO)<<"Read config file: "<<config_file;
        ProtoHelpFun::ReadProtoFromTextFile(config_file, &config_);
        LOG(INFO)<<"parse config file done...";
    }
    catch(const std::exception& e)
    {
        LOG(ERROR)<<"Init config "<<config_file<<", failure"<<e.what();
        throw "init config file failed";
    }
}
template<typename Scalar>
std::string CenterNetDetector<Scalar>::GetClassName(const unsigned int index)
{
    (void) index;
    return " ";
}
template class CenterNetDetector<float>;
}