#include "Detector.h"
#include <chrono>

using namespace detect;

double calc_distance(double x1, double y1, double x2, double y2) {
    double distance = sqrt(pow((x2 - x1), 2) + pow((y2 - y1), 2));
    return distance;
}

int equal_point(cv::Mat &image, vector<retina::FaceObject> &face_objects, float distanceThresh) {
    /**
     * 将原图resize为统一尺寸，然后进行判断
     *
     * **/
    int width = image.cols;
    int height = image.rows;
    float w_ratio = 224 / (float) width;
    float h_ratio = 224 / (float) height;
    FaceObject face_object = face_objects[0];
    cv::Point2f image_center = {(width * w_ratio) / 2, (height * h_ratio) / 2};
    cv::Point2f face_center = face_object.landmark[2];
    face_center.x = face_center.x * w_ratio;
    face_center.y = face_center.y * h_ratio;
    // ::draw_face_objects(image, face_objects);
    // 计算两个点的直线距离
    double distance = calc_distance(image_center.x, image_center.y, face_center.x, face_center.y); // 计算距离
    if (distance > distanceThresh) {
        return 0;
    } else {
        return 1;
    }
}


// 计算两个矩形的 IoU
double calculateNormalIoU(const cv::Rect& rect1, const cv::Rect& rect2) {
    cv::Rect intersection = rect1 & rect2;
    float area_intersection = intersection.area();
    float area_union = rect1.area() + rect2.area() - area_intersection;
    if (area_union <= 0) return 0.0;
    return static_cast<double>(area_intersection) / area_union;
}

double calculateInterIoU(const cv::Rect& rect1, const cv::Rect& rect2) {
    cv::Rect intersection = rect1 & rect2;
    float area_intersection = intersection.area();
    float area_union = rect1.area() + rect2.area() - area_intersection;
    if (area_union <= 0) return 0.0;
    if (static_cast<double>(area_intersection) / rect2.area() >= 1) {
        return static_cast<double>(area_intersection) / rect1.area();
    }
    return 0.0;
}

FaceResult equal_iou(cv::Mat &image, vector<retina::FaceObject> &face_objects, float iouNormal, float iouInter, float TLXRatio,float TLYRatio,float RBXRatio,float RBYRatio) {
    /**
     * 将原图resize为统一尺寸，然后进行判断
     *
     * **/
    FaceResult result;
    string logStr = "enter equal_iou function---";
    int width = image.cols;
    int height = image.rows;

    cv::Rect faceShowRec;
    faceShowRec = cv::Rect(width * TLXRatio, height * TLYRatio, width * (RBXRatio - TLXRatio), height * (RBYRatio - TLYRatio));

    FaceObject face_object = face_objects[0];
    cv::Rect faceDetectRect = face_object.rect;

    double iouInterRatio = calculateInterIoU(faceShowRec, face_object.rect);
    double iouNormalRatio = calculateNormalIoU(faceShowRec, face_object.rect);
    logStr += "w:" + to_string(width) + ", h:" + to_string(height) + "\nx1:" + to_string(faceShowRec.x) + ",y1:" + to_string(faceShowRec.y)
    + ",w1:" + to_string(faceShowRec.width) + ",h1:" + to_string(faceShowRec.height) + "\nx2:" + to_string(faceDetectRect.x) + ",y2:" +
    to_string(faceDetectRect.y) + ", w2:" + to_string(faceDetectRect.width) + ",h2:" + to_string(faceDetectRect.height);
    logStr += " iouNormalResult:" + to_string(iouNormalRatio) + " iouInterResult:" + to_string(iouInterRatio) + "\niouNormalThresh:" +
        to_string(iouNormal) + "\ iouInterThresh:" + to_string(iouInter);
    result.logStr = logStr;
    result.faceRect = faceShowRec;
    result.detRect = faceDetectRect;
    result.iouInterRatio = iouInterRatio;
    result.iouNormalRatio = iouNormalRatio;
    return result;
}

detect::FaceResult Detector::model_predict(Mat &image, float iouNormal, float iouInter, float TLXRatio, float TLYRatio,float RBXRatio,float RBYRatio) {
    FaceResult result;
    if (image.empty()) {
        result.logStr = "the image is empty>>>";
        return result;
    }
    vector<retina::FaceObject> face_object;

    retina_face_predict(image, face_object); // 模型推理层
    // 多张人脸
    if (face_object.size() < 1) {
        result.logStr = "have no face>>>";
        return result;
    }
    if (face_object.size() > 1) {
        result.logStr = "have many face>>>";
        return result;
    }
    // 只有一张人脸，判断图片和人脸的距离，如果超过距离，则图片不合规；如果在距离范围内，则合规
    // result.flag = equal_point(image, face_object, distanceThresh);
    FaceResult result1 = equal_iou(image, face_object, iouNormal, iouInter,
        TLXRatio, TLYRatio, RBXRatio, RBYRatio);
    if (result1.iouNormalRatio >= iouNormal or result1.iouInterRatio >= iouInter) {
        result.flag = 1;
    }else{
        result.flag = 0;
    }
    result.logStr = result1.logStr;
    result.faceRect = result1.faceRect;
    result.detRect = result1.detRect;
    result.object = face_object[0];
    return result;

}

Detector *Detector::g_pDetector = new Detector();

// 0 图像不可用 1 图像可用
int Detector::ComputerPixelMean(Mat detectImage, float luminanceMiniThresh, float luminanceMaxThresh) const {
    if (detectImage.empty()) {
        return 0;
    }
    // 计算图片三个通道的平均值
    Scalar MeanValue = mean(detectImage);
    // 三个通道的均值，再取平均值
    double mean_value = (MeanValue.val[0] + MeanValue.val[1] + MeanValue.val[2]) / 3;
    if (mean_value < luminanceMiniThresh || mean_value > luminanceMaxThresh) {
        return 0;
    } else {
        return 1;
    }
}

inline double calculateVariance(cv::Mat &src) {
    cv::Scalar mean, stddev;
    cv::meanStdDev(src, mean, stddev);
    double variance = stddev.val[0] * stddev.val[0];
    return variance;
}

int Detector::ComputerBlurry(Mat &image, FaceObject face_obj, float blueThresh) const {
    if (image.empty()) {
        return 0;
    }
    Mat standard_img;
    cv::resize(image(face_obj.rect), standard_img, cv::Size(224, 224)); // 尺寸转

    Mat gray_img;
    cv::cvtColor(standard_img, gray_img, cv::COLOR_BGR2GRAY); // 转灰度数据

    Mat dst;
    Laplacian(gray_img, dst, CV_64F);
    // 先将检测到的人脸图片，转成标准尺寸，然后转化为二值化的灰度图，计算人脸的拉普拉斯方差，方差越大，图片越清晰
    double variance = calculateVariance(dst);
    if (variance > blueThresh) {
        return 1;
    } else {
        return 0;
    }
}

detect::Status detect::main_func(cv::Mat &image, Status &image_status, float luminanceMiniThresh, float luminanceMaxThresh,
float blueThresh, float iouNormal, float iouInter, float TLXRatio,float TLYRatio,float RBXRatio,float RBYRatio) {
    auto start = std::chrono::high_resolution_clock::now();

    Detector *detector = Detector::GetInstance();

    FaceResult face_result;
    face_result = detector->model_predict(image, iouNormal, iouInter,
        TLXRatio, TLYRatio, RBXRatio, RBYRatio); // 调用模型层
    image_status.FaceStatus = face_result.flag;
    image_status.object = face_result.object;
    image_status.logStr = face_result.logStr;
    image_status.faceRect = face_result.faceRect;
    image_status.detRect = face_result.detRect;
    image_status.inputModelHeight = face_result.object.height;
    image_status.inputModelWidth = face_result.object.width;
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    image_status.costTime = duration.count();
    image_status.logStr = image_status.logStr + "\n" + face_result.object.costTime + "\n model cost time:" + std::to_string(duration.count()) + " ms";

    // TODO 调用模型，判断是否有多张人脸；并计算模型检测到人脸的中心点与原图中心点的距离，如果在距离范围内，则合规；否则不合规
    if (face_result.flag == 0) {
        return image_status; // 当人脸不合规的时候时候全部返回0
    }

    int flag;
    cv::Mat detectImage = image(face_result.object.rect);
    flag = detector->ComputerPixelMean(detectImage, luminanceMiniThresh, luminanceMaxThresh); // 先检查整体照片是否合规
    // TODO 图像亮度是否在区间范围内，如果在区间范围内，图片可用
    image_status.Luminance = flag;
    end = std::chrono::high_resolution_clock::now();
    duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    image_status.costTime = duration.count();
    image_status.logStr = image_status.logStr + "\n cost time1:" + std::to_string(duration.count()) + " ms";
    if (flag == 0) {
        return image_status;
    }

    // TODO 计算检测图片清晰度，计算检测到人脸的拉普拉斯方差，方差越大，图片越清晰；方差需要大约阈值
    flag = detector->ComputerBlurry(image, face_result.object, blueThresh);
    image_status.Blurry = flag;
    end = std::chrono::high_resolution_clock::now();
    duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    image_status.costTime = duration.count();
    image_status.logStr = image_status.logStr + "\n cost time2:" + std::to_string(duration.count()) + " ms";

    return image_status;
}

detect::Status detect::predict(cv::Mat &image, float luminanceMiniThresh, float luminanceMaxThresh,
float blueThresh, float iouNormal, float iouInter, float TLXRatio,float TLYRatio,float RBXRatio,float RBYRatio) {
    Status image_status{};
    if (image.empty()) {
        image_status.logStr = "the image Mat is empty>>>";
        return image_status;
    }
    detect::main_func(image, image_status, luminanceMiniThresh, luminanceMaxThresh, blueThresh, iouNormal, iouInter,
        TLXRatio, TLYRatio, RBXRatio, RBYRatio);
    return image_status;
}

detect::Status detect::predict(char *buffer, long file_size, float luminanceMiniThresh, float luminanceMaxThresh,
float blueThresh, float iouNormal, float iouInter, float TLXRatio,float TLYRatio,float RBXRatio,float RBYRatio) {
    Status image_status{};
    cv::Mat image = cv::imdecode(cv::Mat(1, file_size, CV_8UC1, buffer), cv::IMREAD_COLOR);
    detect::main_func(image, image_status, luminanceMiniThresh, luminanceMaxThresh, blueThresh, iouNormal, iouInter,
        TLXRatio, TLYRatio, RBXRatio, RBYRatio);
    for (const auto &obj: image_status.object.landmark) {
        vector<float> point = {obj.x, obj.y};
        image_status.landmark.push_back(point);
    }
    return image_status;
}