//
// Created by lidongwei on 2023/6/25.
//

#include "post_processing.h"

////////////////////////////语义分割结果解析///////////////////////////////////////

__global__ void
segment_processing_kenel(float *segment_data, uint8_t *output_data, int feature_map_width, int feature_map_height,
                         int batch, int classes, uint8_t *labels_color, float threshold) {
    int x = blockDim.x * blockIdx.x + threadIdx.x;
    int y = blockDim.y * blockIdx.y + threadIdx.y;
    float value_1,value_2;//存储最大值
    int max_index=0;//存储最大值索引

    value_1 = segment_data[(x + y * feature_map_width)];
    for (int classes_i = 1; classes_i < classes; classes_i++) {
        value_2 = segment_data[classes_i * feature_map_width * feature_map_height +
                               (x + y * feature_map_width)];
        if (value_1 < value_2) {
            max_index = classes_i;
            value_1 = value_2;
        }
    }

    ///////解析类别/////
    uint8_t *pst = &output_data[(x + y * feature_map_width)];

    if (value_1 >= threshold) {
        *pst = (uint8_t) labels_color[max_index];
    } else {
        *pst = (uint8_t) labels_color[-1];
    }

}

std::vector<uint8_t>
handle::post_processing::segment_processing(std::vector<float> segment_data, int classes, cv::Size feature_map_size,
                                            float object_threshold, uint8_t *labels_color) {
    int batch = segment_data.size() / classes / feature_map_size.width / feature_map_size.height;
    int feature_map_width = feature_map_size.width;
    int feature_map_height = feature_map_size.height;

    std::vector<uint8_t> output_data_host(batch * feature_map_width * feature_map_height);//用来存储解析后的结果数据

    uint8_t *output_data_device = nullptr;
    float *segment_data_device = nullptr;
    uint8_t *labels_color_device=nullptr;

    cudaStream_t stream= nullptr;
    cudaStreamCreate(&stream);

    checkRuntime(cudaMalloc(&output_data_device, output_data_host.size()));
    checkRuntime(cudaMalloc(&segment_data_device, segment_data.size() * sizeof(float)));
    checkRuntime(cudaMalloc(&labels_color_device, sizeof(labels_color)));

    checkRuntime(cudaMemcpy(segment_data_device, segment_data.data(), segment_data.size() * sizeof(float),
                            cudaMemcpyHostToDevice));
    checkRuntime(cudaMemcpy(labels_color_device, labels_color, sizeof(labels_color),
                            cudaMemcpyHostToDevice));

    dim3 block_size(32, 32);
    dim3 grid_size(feature_map_width / 32, feature_map_height / 32);

    segment_processing_kenel<<<grid_size, block_size, 0, stream>>>(segment_data_device, output_data_device,
                                                                    feature_map_width, feature_map_height, batch,
                                                                    classes, labels_color_device, object_threshold);


//    cudaError_t cudaStatus = cudaGetLastError();
//    if (cudaStatus != cudaSuccess) {
//        printf("Kernel execution failed: %s\n", cudaGetErrorString(cudaStatus));
//        // 其他错误处理逻辑
//    }
    cudaStreamSynchronize(stream);
    checkRuntime(cudaMemcpy(output_data_host.data(),output_data_device,output_data_host.size(),cudaMemcpyDeviceToHost));

    checkRuntime(cudaFree(output_data_device));
    checkRuntime(cudaFree(segment_data_device));
    checkRuntime(cudaFree(labels_color_device));

    return output_data_host;

}

cv::Mat handle::post_processing::drow(std::vector<uint8_t> result, cv::Mat img, cv::Size result_size) {
    std::vector<uint8_t> result_(result_size.height*result_size.width*3);
    cv::Mat result2img(result_size.height,result_size.width,CV_8UC3);//将result转到cv::Mat
    cv::Mat result_img(img.rows,img.cols,CV_8UC3);

    std::copy(result.begin(), result.end(), result_.begin());
    std::copy(result.begin(), result.end(), result_.begin() + result_size.height * result_size.width);
    std::copy(result.begin(), result.end(), result_.begin() + 2 * result_size.height * result_size.width);

    // 定义融合权重
    double alpha = 0.5; // 第一张图像的权重
    double beta = 0.5;  // 第二张图像的权重
    double gamma = 0.0; // 亮度值的调整参数

    std::copy(result_.begin(),result_.end(),result2img.data);
    result2img=handle::pretreatment::CHW2HWC(result2img);

    // 调整第二张图像与第一张图像的大小相同
    cv::resize(result2img, result2img, img.size());

    // 图像融合
    cv::addWeighted(img, alpha, result2img, beta, gamma, result_img);

    return result_img;
}

