/**
 * @file ArmorClassifierBase.cpp
 * @brief
 * @author LiuZhihao (2488288779@qq.com)
 * @date 2023-12-10
 *
 * @copyright Copyright (C) 2023, HITCRT_VISION, all rights reserved.
 *
 * @par 修改日志:
 * <table>ArmorClassifierBase
 * <tr><th>Date       <th>Author  <th>Description
 * <tr><td>2023-12-10 <td>LiuZhihao  <td>
 * </table>
 */
#include "ArmorClassifierBase.h"

namespace hitcrt {
float sigmoid(float x) { return (1 / (1 + exp(-x))); }
/**
 * 构造函数
 */
ArmorClassifierBase::ArmorClassifierBase(
    ClassifierChannelType classifierChannelType,
    ClassifierIfConf classifierIfConf)
    : m_classifierChannelType(classifierChannelType),
      m_classifierIfConf(classifierIfConf) {};
ArmorClassifierBase::~ArmorClassifierBase() {};
// 初始化
/**
 *
 * @param xml_path
 * @param max_conf
 * @param class_number
 * @param classifierDevice
 * @return
 */
bool ArmorClassifierBase::init(const std::string xml_path, const float max_conf,
                               const int class_number,
                               const ClassifierDevice classifierDevice) {
    std::cout << "load model:" << xml_path << std::endl;
    m_xmlPath = xml_path;
    m_maxConf = max_conf;
    m_classNumber = class_number;
    m_classifierDevice = classifierDevice;
    // 读取xml文件
    std::shared_ptr<ov::Model> model = m_core.read_model(m_xmlPath);
    // 编译模型
    m_compiledModel = m_core.compile_model(
        model, toString(m_classifierDevice),
        ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
    // 创建推理请求
    m_inferRequest = m_compiledModel.create_infer_request();
    std::cout << "数字识别分类器初始化成功" << std::endl;

    return true;
}

/**
 * @brief
 * @param[in] srcPoints     My Param doc
 * @param[in] srcImg        My Param doc
 * @return Pattern
 * @author LiuZhihao (2488288779@qq.com)
 */
Pattern ArmorClassifierBase::apply(const std::vector<cv::Point2f>& srcPoints,
                                   const cv::Mat& srcImg) {
    if (srcImg.empty()) {
        return Pattern::UNKNOWN;
    }
    setSrcImg(srcImg);  // 浅拷贝
    if (srcPoints.size() != 4) {
        std::cerr << "Wrong srcPoints Num!" << std::endl;
        return Pattern::UNKNOWN;
    }

    cv::Mat numPic;
    getNumPic(srcPoints, numPic);
    if (m_classifierChannelType == ClassifierChannelType::OneChannel) {
        cvtColorImpl(numPic);
    }
   // cv::imshow("numpic", numPic);
    //cv::waitKey(10);
    int classID = inference(numPic);
    //std::cout << "classID = " << classID << std::endl;
    return getPattern(classID);
}

/**
 * @brief
 * @param[in] srcPoints     My Param doc
 * @param[in] inputSample   My Param doc
 * @param[in] outputSample  My Param doc
 * @author LiuZhihao (2488288779@qq.com)
 */
void ArmorClassifierBase::perspectiveTransImpl(
    const std::vector<cv::Point2f>& srcPoints, const cv::Mat& inputSample,
    cv::Mat& outputSample) {
    std::vector<cv::Point2f> dstPoints;
    dstPoints.reserve(4);
    dstPoints.push_back(cv::Point2f(0, 35));
    dstPoints.push_back(cv::Point2f(135, 35));
    dstPoints.push_back(cv::Point2f(0, 90));
    dstPoints.push_back(cv::Point2f(135, 90));
    cv::Size dstSize = cv::Size(135, 125);

    cv::Mat transMtx;
    // std::vector<cv::Point2f> srcPoints;
    // srcPoints.reserve(4);
    // srcPoints.push_back(leftBar.m_top /*.m_upper_left*/);
    // srcPoints.push_back(rightBar.m_top /*.m_upper_right*/);
    // srcPoints.push_back(leftBar.m_bottom /*.m_bottom_left*/);
    // srcPoints.push_back(rightBar.m_bottom /*.m_bottom_right*/);
    transMtx = cv::getPerspectiveTransform(srcPoints, dstPoints);

    cv::warpPerspective(inputSample, outputSample, transMtx, dstSize);
}

/**
 * @brief
 * @param[in] srcPoints     My Param doc
 * @param[in] outputSample  My Param doc
 * @author LiuZhihao (2488288779@qq.com)
 */
void ArmorClassifierBase::getNumPic(const std::vector<cv::Point2f>& srcPoints,
                                    cv::Mat& outputSample) {
    // 仿射变换
    perspectiveTransImpl(srcPoints, m_srcImg, outputSample);
    cv::Rect imgRect = cv::Rect(25, 5, 85, 115);
    outputSample = outputSample(imgRect);
    cv::resize(outputSample, outputSample, cv::Size(52, 52));
}

/**
 * @brief
 * @param[in] sample        My Param doc
 * @author LiuZhihao (2488288779@qq.com)
 */
void ArmorClassifierBase::cvtColorImpl(cv::Mat& sample) {
    // TODO较耗时，可以尝试加速

    // std::vector<cv::Mat> channels;
    // cv::split(sample, channels);  // channel: b-g-r
    // channels.at(Param::ENEMY == Color::RED ? 2 : 0) =
    //     channels.at(Param::ENEMY == Color::RED ? 0 : 2);  // 减小灯条影响
    // cv::merge(channels, sample);
    cv::cvtColor(sample, sample, cv::COLOR_BGR2GRAY);

    // 为减小灯条影响取绿色通道
    // std::vector<cv::Mat> channels;
    // cv::split(sample, channels);
    // sample = channels.at(1);

    // 自适应阈值在以下参数下效果和OTSU差不多,但是在装甲板有遮挡的情况下会上下边缘出现一个白色条纹
    //  cv::adaptiveThreshold(tagnum,tagnum,255,cv::ADAPTIVE_THRESH_MEAN_C,cv::THRESH_BINARY,27,0);

    sample.convertTo(sample, -1, 3, 0);  // 增强对比度减小噪点和红点影响
    cv::Mat redPoint;
    int redPointThresh =
        200;  // 需要根据曝光调对比度和这个，小心正常数字也会被滤掉
    cv::threshold(sample, redPoint, redPointThresh, 255, cv::THRESH_BINARY);
    cv::threshold(sample, sample, redPointThresh, 255,
                  cv::THRESH_TOZERO_INV);  // 去除红点
    cv::GaussianBlur(sample, sample, cv::Size(5, 5), 0);
    // opencvtutorial里的操作，可以解决上一步去除红点后图像没法过OTSU，还可以细调调，思路是高斯滤波保证图像双峰，目前虽然没法保证100%过阈值但比之前0%好多了
    // 或者找到更好去除红点方法
    cv::threshold(sample, sample, 0, 255, cv::THRESH_OTSU);
    cv::bitwise_or(sample, redPoint, sample);
    cv::erode(sample, sample,
              cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
    cv::dilate(sample, sample,
               cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
}

Pattern ArmorClassifierBase::getPattern(const int classID) {
    switch (classID) {  // 这个奇怪的顺序是神经网络的锅！
        case 0:
            return Pattern::HERO;
        case 1:
            return Pattern::INFANTRY_4_BALANCE;
        case 2:
            return Pattern::INFANTRY_5_BALANCE;
        case 3:
            return Pattern::ENGINEER;
        case 4:
            return Pattern::INFANTRY_3;
        case 5:
            return Pattern::INFANTRY_4;
        case 6:
            return Pattern::INFANTRY_5;
        case 7:
            return Pattern::OUTPOST;
        case 8:
            return Pattern::SENTRY;
        case 9:
            return Pattern::BASE;
        case 10:
            return Pattern::INFANTRY_3_BALANCE;
        case 11:
            return Pattern::UNKNOWN;
            std::cout << "----------------------UNKNOWN!---------------------"
                      << std::endl;
        default:
            return Pattern::UNKNOWN;
    }
}

/**
 * @brief
 * @param[in] inframe       My Param doc
 * @return int
 * @author LiuZhihao (2488288779@qq.com)
 */
// 处理图像获取结果
int ArmorClassifierBase::inference(const cv::Mat& inframe) {
    if (inframe.empty()) {
        return -2;
    }
    // 获取输入节点及其形状
    m_inputNode = m_inferRequest.get_input_tensor();
    m_tensorShape = m_inputNode.get_shape();
    const size_t input_h = m_tensorShape[2];  //(NCHW)
    const size_t input_w = m_tensorShape[3];
    const size_t input_channels = m_tensorShape[1];
    const size_t img_size = input_h * input_w;
    // 把图片写入内存
    auto* input_tensor_data = m_inputNode.data<float>();
    // 写内存
    if (m_classifierChannelType == ClassifierChannelType::ThreeChannel) {
        for (size_t row = 0; row < input_h; row++) {
            for (size_t col = 0; col < input_w; col++) {
                for (size_t ch = 0; ch < 3; ch++) {
                    input_tensor_data[img_size * ch + input_w * row + col] =
                        float(inframe.at<cv::Vec3b>(row, col)[ch]) / 255.0f;
                }
            }
        }
    } else if (m_classifierChannelType == ClassifierChannelType::OneChannel) {
        for (size_t row = 0; row < 52; row++) {
            for (size_t col = 0; col < 52; col++) {
                input_tensor_data[52 * row + col] =
                    float(inframe.at<uint8_t>(row, col)) / 255.0f;
            }
        }
    } else {
        std::cerr << "写内存, 通道输入不是三通道或单通道二值化";
    }

    // 执行预测
    // int64 time_before_infer;
    // time_before_infer = cv::getTickCount();
    m_inferRequest.infer();
    // std::cout << "----------infer time:" << ": " << (cv::getTickCount() -
    // time_before_infer) / static_cast<float>(cv::getTickFrequency()) * 1000 <<
    // "ms"
    //           << std::endl;7
    // 后处理
    std::vector<float> softmax_values;
    const int num_classes = m_classNumber + 1;
    auto output = m_inferRequest.get_output_tensor();
    int outputSize = output.get_size();
    const auto* output_buffer = output.data<const float>();

    float softmax_sum = 0;
    float softmax_single;
    // std::cout<<"output_buff = ";
    for (size_t i = 0; i < num_classes; i++) {
        softmax_single = std::exp(output_buffer[i]);
        softmax_values.emplace_back(softmax_single);  // softmax
        softmax_sum += softmax_single;
        // std::cout << output_buffer[i] << " ";
    }
    // std::cout<<std::endl;
    // std::cout << "Softmax values:";
    // for (size_t i = 0; i < softmax_values.size(); i++) {
    //     std::cout << " " << softmax_values[i] / softmax_sum;
    // }
    // std::cout << std::endl;
    auto max_iter =
        std::max_element(softmax_values.begin(), softmax_values.end());
    if (*max_iter / softmax_sum > 0.5) {  // 取迭代器max中的内容
        int class_id = std::distance(std::begin(softmax_values), max_iter);
        return class_id;
    } else {
        return 11;  // false output
    }
}
}  // namespace hitcrt