#include <sys/stat.h>
#include <fstream>
#include <glog/logging.h>

#include "SampleDetector.h"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "ji_utils.h"

#define USE_ASPECT_RATIO 1
#define DUMP_FILE 0
#define USE_MULTICLASS_NMS 1

static bool if_file_exists(const char *filename)
{
    struct stat my_stat;
    return (stat(filename, &my_stat) == 0);
}

SampleDetector::SampleDetector()
{
}
bool SampleDetector::Init(const std::string &model_name, const std::string &name_file, float thresh)
{
    m_thresh = thresh;
    if(!if_file_exists(name_file.c_str()))
    {
        SDKLOG(INFO)<<"file not exists:"<<name_file;
        return false;
    }
    std::ifstream ifs(name_file);
    if (ifs.is_open()) 
    {
        std::string line;
        while(std::getline(ifs, line)) 
        {
            //line = line.substr(0, line.length() - 1);
            m_class_names.push_back(line);
        }
    }
    ifs.close();

    BMNNHandlePtr handle = std::make_shared<BMNNHandle>(m_dev_id);
    m_bmContext =  std::make_shared<BMNNContext>(handle, model_name.c_str());
    // 1 get network
    m_bmNetwork = m_bmContext->network(0);
    // 2. get input
    m_max_batch = m_bmNetwork->maxBatch();
    auto tensor = m_bmNetwork->inputTensor(0);
    m_net_h = tensor->get_shape()->dims[2];
    m_net_w = tensor->get_shape()->dims[3];
    // 3. get output
    output_num = m_bmNetwork->outputTensorNum();
    assert(output_num == 1 || output_num == 3);
    min_dim = m_bmNetwork->outputTensor(0)->get_shape()->num_dims;

    // 4. initialize bmimages
    //  aligned to 64
        int aligned_net_w = FFALIGN(m_net_w, 64);
    SDKLOG(INFO) << "input h:" << m_net_h << "input w:" << m_net_w << " aligned_net_w:" << aligned_net_w;
    int strides[3] = {aligned_net_w, aligned_net_w, aligned_net_w};
    SDKLOG(INFO) << "max_batch:" << m_max_batch;
    m_resized_imgs.resize(m_max_batch);
    m_converto_imgs.resize(m_max_batch);
    for(int i=0; i<m_max_batch; i++)
    {
        auto ret = bm_image_create(m_bmContext->handle(), m_net_h, m_net_w,
                                   FORMAT_RGB_PLANAR,
                                   DATA_TYPE_EXT_1N_BYTE,
                                   &m_resized_imgs[i], strides);
        assert(BM_SUCCESS == ret);
        SDKLOG(INFO) << "end bm image create" << ret;
    }


    bm_image_alloc_contiguous_mem(m_max_batch, m_resized_imgs.data());

    bm_image_data_format_ext img_dtype = DATA_TYPE_EXT_FLOAT32;
    if (tensor->get_dtype() == BM_INT8)
    {
        img_dtype = DATA_TYPE_EXT_1N_BYTE_SIGNED;
    }
    auto ret = bm_image_create_batch(m_bmContext->handle(), m_net_h, m_net_w,
                                     FORMAT_RGB_PLANAR,
                                     img_dtype,
                                     m_converto_imgs.data(), m_max_batch);

    SDKLOG(INFO) << "end bm image batch" << ret;

    assert(BM_SUCCESS == ret);
    // 5. convertto
    float input_scale = tensor->get_scale();
    input_scale = input_scale * 1.0 / 255.f;
    converto_attr.alpha_0 = input_scale;
    converto_attr.beta_0 = 0;
    converto_attr.alpha_1 = input_scale;
    converto_attr.beta_1 = 0;
    converto_attr.alpha_2 = input_scale;
    converto_attr.beta_2 = 0;
    
    SDKLOG(INFO)<< "init done";
    return true;
}
float SampleDetector::get_aspect_scaled_ratio(int src_w, int src_h, int dst_w, int dst_h, bool *pIsAligWidth)
{
    float ratio;
    float r_w = (float)dst_w / src_w;
    float r_h = (float)dst_h / src_h;
    if (r_h > r_w)
    {
        *pIsAligWidth = true;
        ratio = r_w;
    }
    else
    {
        *pIsAligWidth = false;
        ratio = r_h;
    }
    return ratio;
}

int SampleDetector::argmax(float* data, int dsize)
{
    float max_value = 0.0;
    int max_index = 0;
    for(int i = 0; i < dsize; ++i) 
    {
        float value = data[i];
        if (value > max_value) 
        {
            max_value = value;
            max_index = i;
        }
    } 

    return max_index;
}

float SampleDetector::sigmoid(float x)
{
    return 1.0/(1 + expf(-x));
}


void SampleDetector::NMS(std::vector<BoxInfo>& objects, float thresh)
{
    int length = objects.size();
    int index = length - 1;

    std::sort(objects.begin(), objects.end(), [](const BoxInfo& a, const BoxInfo& b) {
      return a.score < b.score;
      });

    std::vector<float> areas(length);
    for (int i=0; i<length; i++)
    {
        areas[i] = int(objects[i].x2 - objects[i].x1) * int(objects[i].y2 - objects[i].y1);
    }
    while (index  > 0)
    {
        int i = 0;
        while (i < index)
        {
            float left    = std::max(objects[index].x1, objects[i].x1);
            float top     = std::max(objects[index].y1, objects[i].y1);
            float right   = std::min(objects[index].x2, objects[i].x2);
            float bottom  = std::min(objects[index].y2, objects[i].y2);
            float overlap = std::max(0.0f, right - left) * std::max(0.0f, bottom - top);
            if (overlap / (areas[index] + areas[i] - overlap) > thresh)
            {
                areas.erase(areas.begin() + i);
                objects.erase(objects.begin() + i);
                index --;
            }
            else
            {
                i++;
            }
        }
        index--;
    }

}

int SampleDetector::Preprocess(const std::vector<bm_image>& images)
{
    std::shared_ptr<BMNNTensor> input_tensor = m_bmNetwork->inputTensor(0);
    int image_n = images.size();
    //1. resize image
    int ret = 0;
    for(int i = 0; i < image_n; ++i) 
    {
        bm_image image1 = images[i];
        bm_image image_aligned;
        bool need_copy = image1.width & (64-1);
        if(need_copy)
        {
            int stride1[3], stride2[3];
            bm_image_get_stride(image1, stride1);
            stride2[0] = FFALIGN(stride1[0], 64);
            stride2[1] = FFALIGN(stride1[1], 64);
            stride2[2] = FFALIGN(stride1[2], 64);
            bm_image_create(m_bmContext->handle(), image1.height, image1.width, image1.image_format, image1.data_type, &image_aligned, stride2);
            bm_image_alloc_dev_mem(image_aligned, BMCV_IMAGE_FOR_IN);
            bmcv_copy_to_atrr_t copyToAttr;
            memset(&copyToAttr, 0, sizeof(copyToAttr));
            copyToAttr.start_x = 0;
            copyToAttr.start_y = 0;
            copyToAttr.if_padding = 1;
            bmcv_image_copy_to(m_bmContext->handle(), copyToAttr, image1, image_aligned);
        } 
        else 
        {
            image_aligned = image1;
        }
#if USE_ASPECT_RATIO
        bool isAlignWidth = false;
        float ratio = get_aspect_scaled_ratio(images[i].width, images[i].height, m_net_w, m_net_h, &isAlignWidth);
        bmcv_padding_atrr_t padding_attr;
        memset(&padding_attr, 0, sizeof(padding_attr));
        padding_attr.dst_crop_sty = 0;
        padding_attr.dst_crop_stx = 0;
        padding_attr.padding_b = 114;
        padding_attr.padding_g = 114;
        padding_attr.padding_r = 114;
        padding_attr.if_memset = 1;
        if (isAlignWidth) 
        {
            padding_attr.dst_crop_h = images[i].height*ratio;
            padding_attr.dst_crop_w = m_net_w;

            int ty1 = (int)((m_net_h - padding_attr.dst_crop_h) / 2);
            padding_attr.dst_crop_sty = ty1;
            padding_attr.dst_crop_stx = 0;
        }else
        {
            padding_attr.dst_crop_h = m_net_h;
            padding_attr.dst_crop_w = images[i].width*ratio;

            int tx1 = (int)((m_net_w - padding_attr.dst_crop_w) / 2);
            padding_attr.dst_crop_sty = 0;
            padding_attr.dst_crop_stx = tx1;
        }

        bmcv_rect_t crop_rect{0, 0, image1.width, image1.height};
        auto ret = bmcv_image_vpp_convert_padding(m_bmContext->handle(), 1, image_aligned, &m_resized_imgs[i], &padding_attr, &crop_rect);
#else
        auto ret = bmcv_image_vpp_convert(m_bmContext->handle(), 1, images[i], &m_resized_imgs[i]);
#endif
#if DUMP_FILE
        cv::Mat resized_img;
        cv::bmcv::toMAT(&m_resized_imgs[i], resized_img);
        std::string fname = cv::format("resized_img_%d.jpg", i);
        cv::imwrite(fname, resized_img);
#endif
        if(need_copy) bm_image_destroy(image_aligned);
    }
    //2. converto
    ret = bmcv_image_convert_to(m_bmContext->handle(), image_n, converto_attr, m_resized_imgs.data(), m_converto_imgs.data());
    CV_Assert(ret == 0);

    //3. attach to tensor
    if(image_n != m_max_batch) image_n = m_bmNetwork->get_nearest_batch(image_n); 
    bm_device_mem_t input_dev_mem;
    bm_image_get_contiguous_device_mem(image_n, m_converto_imgs.data(), &input_dev_mem);
    input_tensor->set_device_mem(&input_dev_mem);
    input_tensor->set_shape_by_dim(0, image_n);  // set real batch number

    return 0;

}

int SampleDetector::Postprocess(const std::vector<bm_image>& images, std::vector<std::vector<BoxInfo>>& boxes)
{
    std::vector<cv::Rect> bbox_vec;
    std::vector<BoxInfo> yolobox_vec;
    std::vector<std::shared_ptr<BMNNTensor>> outputTensors(output_num);
    for(int i=0; i<output_num; i++)
    {
        outputTensors[i] = m_bmNetwork->outputTensor(i);
    }
    for(int batch_idx = 0; batch_idx < images.size(); ++ batch_idx)
    {
        yolobox_vec.clear();
        auto& frame = images[batch_idx];
        int frame_width = frame.width;
        int frame_height = frame.height;

        int tx1 = 0, ty1 = 0;
#if USE_ASPECT_RATIO
        bool isAlignWidth = false;
        float ratio = get_aspect_scaled_ratio(frame.width, frame.height, m_net_w, m_net_h, &isAlignWidth);
        if (isAlignWidth) 
        {
            ty1 = (int)((m_net_h - (int)(frame_height*ratio)) / 2);
        }else
        {
            tx1 = (int)((m_net_w - (int)(frame_width*ratio)) / 2);
        }
#endif

        int min_idx = 0;
        int box_num = 0;
        for(int i=0; i<output_num; i++)
        {
            auto output_shape = m_bmNetwork->outputTensor(i)->get_shape();
            auto output_dims = output_shape->num_dims;
            assert(output_dims == 3 || output_dims == 5);
            if(output_dims == 5)
            {
                box_num += output_shape->dims[1] * output_shape->dims[2] * output_shape->dims[3];
            }

            if(min_dim>output_dims)
            {
                min_idx = i;
                min_dim = output_dims;
            }
        }
        auto out_tensor = outputTensors[min_idx];
        int nout = out_tensor->get_shape()->dims[min_dim-1];
        m_class_num = nout - 5;

        float* output_data = nullptr;
        std::vector<float> decoded_data;

        if(min_dim ==3 && output_num !=1)
        {
            SDKLOG(INFO)<<"--> WARNING: the current bmodel has redundant outputs";
            SDKLOG(INFO)<<"you can remove the redundant outputs to improve performance";
        }
        if(min_dim == 5)
        {
            SDKLOG(INFO)<<"post 1: get output and decode";
            const std::vector<std::vector<std::vector<int>>> anchors{
                    {{10, 13}, {16, 30}, {33, 23}},
                    {{30, 61}, {62, 45}, {59, 119}},
                    {{116, 90}, {156, 198}, {373, 326}}};
            const int anchor_num = anchors[0].size();
            assert(output_num == (int)anchors.size());
            assert(box_num>0);
            if((int)decoded_data.size() != box_num*nout)
            {
                decoded_data.resize(box_num*nout);
            }
            float *dst = decoded_data.data();
            for(int tidx = 0; tidx < output_num; ++tidx) 
            {
                auto output_tensor = outputTensors[tidx];
                int feat_c = output_tensor->get_shape()->dims[1];
                int feat_h = output_tensor->get_shape()->dims[2];
                int feat_w = output_tensor->get_shape()->dims[3];
                int area = feat_h * feat_w;
                assert(feat_c == anchor_num);
                int feature_size = feat_h*feat_w*nout;
                float *tensor_data = (float*)output_tensor->get_cpu_data() + batch_idx*feat_c*area*nout;
                for (int anchor_idx = 0; anchor_idx < anchor_num; anchor_idx++)
                {
                    float *ptr = tensor_data + anchor_idx*feature_size;
                    for (int i = 0; i < area; i++) 
                    {
                        dst[0] = (sigmoid(ptr[0]) * 2 - 0.5 + i % feat_w) / feat_w * m_net_w;
                        dst[1] = (sigmoid(ptr[1]) * 2 - 0.5 + i / feat_w) / feat_h * m_net_h;
                        dst[2] = pow((sigmoid(ptr[2]) * 2), 2) * anchors[tidx][anchor_idx][0];
                        dst[3] = pow((sigmoid(ptr[3]) * 2), 2) * anchors[tidx][anchor_idx][1];
                        dst[4] = sigmoid(ptr[4]);
                        float score = dst[4];
                        if (score > m_thresh) 
                        {
                            for(int d=5; d<nout; d++)
                            {
                                dst[d] = sigmoid(ptr[d]);
                            }
                        }
                        dst += nout;
                        ptr += nout;
                    }
                }
            }
            output_data = decoded_data.data();
        } 
        else 
        {
            SDKLOG(INFO)<<"post 1: get output";
            assert(box_num == 0 || box_num == out_tensor->get_shape()->dims[1]);
            box_num = out_tensor->get_shape()->dims[1];
            output_data = (float*)out_tensor->get_cpu_data() + batch_idx*box_num*nout;
        }
        SDKLOG(INFO)<<"post 2: filter boxes";
        for (int i = 0; i < box_num; i++) 
        {
            float* ptr = output_data+i*nout;
            float score = ptr[4];
            int class_id = argmax(&ptr[5], m_class_num);
            float confidence = ptr[class_id + 5];
            if (confidence * score > m_thresh)
            {
                float centerX = (ptr[0]+1 - tx1)/ratio - 1;
                float centerY = (ptr[1]+1 - ty1)/ratio - 1;
                float width = (ptr[2]+0.5) / ratio;
                float height = (ptr[3]+0.5) / ratio;

                BoxInfo box;
                box.x1 = int(centerX - width / 2);
                if (box.x1 < 0) box.x1 = 0;
                box.y1 = int(centerY - height / 2);
                if (box.y1 < 0) box.y1 = 0;
                box.x2 = box.x1 + width;
                box.y2 = box.y1 + height;
                box.label = class_id;
                if(class_id < m_class_names.size())
                {
                    box.name = m_class_names[class_id];
                }
                else 
                {
                    box.name = "";
                    SDKLOG(INFO)<<"class_id wrong, greater than class_names";
                }
                box.score = confidence * score;
                yolobox_vec.push_back(box);
            }
        }

        SDKLOG(INFO)<<"post 3: nms";
#if USE_MULTICLASS_NMS
    std::vector<std::vector<BoxInfo>> class_vec(m_class_num);
    for (auto& box : yolobox_vec)
    {
        class_vec[box.label].push_back(box);
    }
    for (auto& cls_box : class_vec)
    {
        NMS(cls_box, m_nms_thresh);
    }
    yolobox_vec.clear();
    for (auto& cls_box : class_vec)
    {
        yolobox_vec.insert(yolobox_vec.end(), cls_box.begin(), cls_box.end());
    }
#else
    NMS(yolobox_vec, m_nms_thresh);
#endif

    boxes.push_back(yolobox_vec);
  }

  return 0;

}



bool SampleDetector::UnInit()
{
    return true;
}

SampleDetector::~SampleDetector()
{
    UnInit();
    bm_image_free_contiguous_mem(m_max_batch, m_resized_imgs.data());
    bm_image_free_contiguous_mem(m_max_batch, m_converto_imgs.data());
    for(int i=0; i<m_max_batch; i++)
    {
    	bm_image_destroy(m_converto_imgs[i]);
    	bm_image_destroy(m_resized_imgs[i]);
	    SDKLOG(INFO)<<"release..";
    }
}

bool SampleDetector::ProcessImage(const std::vector<bm_image>& input_images, std::vector<std::vector<BoxInfo>> &detected_objs)
{
    int ret = 0;
    //preprocess
    ret = Preprocess(input_images);
    //forward
    ret = m_bmNetwork->forward();
    //postprocesss
    ret = Postprocess(input_images, detected_objs);

    return true;
}


