#include "benchmark.h"
#include "cpu.h"
#include "datareader.h"
#include "net.h"
#include "gpu.h"

#include <iostream>
#include <cmath>
#include <stdio.h>
#include <vector>
#include <algorithm>
#include <thread>  // 对于 std::this_thread
#include <chrono>  // 对于 std::chrono


#define USE_NCNN_SIMPLEOCV
#define CV_PI 3.14159265358979323846


#if defined(USE_NCNN_SIMPLEOCV)
#include "simpleocv.h"
#else
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif

struct KeyPoint
{
    cv::Point2f p;
    float prob;
};

// 计算两点之间的向量
cv::Point2f calculateVector(const cv::Point2f& p1, const cv::Point2f& p2) {
    return cv::Point2f(p2.x - p1.x, p2.y - p1.y);
}

// 计算向量的夹角（以度为单位）
float calculateAngle(const cv::Point2f& vec1, const cv::Point2f& vec2) {
    float dot = vec1.x * vec2.x + vec1.y * vec2.y;
    float magnitudeVec1 = sqrt(vec1.x * vec1.x + vec1.y * vec1.y);
    float magnitudeVec2 = sqrt(vec2.x * vec2.x + vec2.y * vec2.y);
    float angle = acos(dot / (magnitudeVec1 * magnitudeVec2));
    return angle * (180.0 / CV_PI); // 弧度转换为度
}

// 判断是否低头
bool isLookingDown(const std::vector<KeyPoint>& keypoints) {
    // 检查关键点是否有效
    if (keypoints[0].prob > 0.2f && keypoints[5].prob > 0.2f && keypoints[6].prob > 0.2f) {
        // 计算肩膀中点
        cv::Point2f shoulderMidpoint = cv::Point2f((keypoints[5].p.x + keypoints[6].p.x) / 2, 
                                                   (keypoints[5].p.y + keypoints[6].p.y) / 2);
        // 计算鼻子到肩膀中点的向量
        cv::Point2f noseToShoulderVector = calculateVector(keypoints[0].p, shoulderMidpoint);
        // 水平线向量
        cv::Point2f horizontalVector(1, 0); // 水平向右的单位向量

        // 计算夹角
        float angle = calculateAngle(noseToShoulderVector, horizontalVector);

        // 判断是否低于30度
        return angle < 30.0;
    }
    return false;
}


std::string checkBodyTilt(const std::vector<KeyPoint>& keypoints) {
    // 确保关键点 6 (右肩) 和 12 (右臀) 是可靠的
    // if (keypoints[6].prob > 0.2f && keypoints[12].prob > 0.2f) {
        // 计算右肩到右臀的向量
        cv::Point2f shoulderToHipVector = cv::Point2f(
            keypoints[12].p.x - keypoints[6].p.x, 
            keypoints[12].p.y - keypoints[6].p.y
        );

        // 计算向量与垂直方向的夹角
        float angle = atan2(shoulderToHipVector.y, shoulderToHipVector.x) * (180.0 / CV_PI);
        // 调整角度范围为 -90 到 90
        angle = angle > 90 ? angle - 180 : angle;
        
        std::cout << "angle = " << angle << std::endl; // 输出倾斜的角度

        // 根据角度判断身体状态
        if (angle < 84 && angle > 0) {
            return "Forward"; // 前倾
        } else if (angle < 0 && angle > -80) {
            return "Backward"; // 后仰
        } else {
            return "Normal"; // 正常
        }
    // }
    return "Normal";
}

static void draw_pose(cv::Mat& image, const std::vector<KeyPoint>& keypoints)
{
       // 计算新点（假设是关键点 5 和 6 的中点）
    cv::Point2f midpoint;
    if (keypoints[5].prob > 0.2f && keypoints[6].prob > 0.2f) {
        midpoint = cv::Point2f((keypoints[5].p.x + keypoints[6].p.x) / 2, 
                               (keypoints[5].p.y + keypoints[6].p.y) / 2);
    }
    // draw bone
    static const int joint_pairs[][2] = {
        {0, 0}, {5, 6}, {5, 7}, {5, 11}, {6, 8},{6, 12}
    };
    for (int i = 0; i < sizeof(joint_pairs) / sizeof(joint_pairs[0]); i++)
    {
        const KeyPoint& p1 = keypoints[joint_pairs[i][0]];
        const KeyPoint& p2 = keypoints[joint_pairs[i][1]];
        if (p1.prob < 0.2f || p2.prob < 0.2f)
            continue;
        cv::line(image, p1.p, p2.p, cv::Scalar(255, 0, 0), 2);
    }
        // 绘制新点和从关键点 0 到新点的连线
    if (keypoints[0].prob > 0.2f) {
        cv::line(image, keypoints[0].p, midpoint, cv::Scalar(255, 0, 0), 2); // 从0到新点
        cv::circle(image, midpoint, 3, cv::Scalar(0, 0, 255), -1); // 新点
    }
    // draw joint
    for (size_t i = 0; i < keypoints.size(); i++)
    {
        const KeyPoint& keypoint = keypoints[i];
        //fprintf(stderr, "%.2f %.2f = %.5f\n", keypoint.p.x, keypoint.p.y, keypoint.prob);
        if (keypoint.prob < 0.2f)
            continue;
                // 绘制头部、肩膀、手肘、胯部和脖子的关键点
        if (i == 0 || i == 5 || i == 6 || i == 7 || i == 8 || i == 11 || i == 12/* || 胯部的索引 */) {
            cv::circle(image, keypoint.p, 3, cv::Scalar(0, 255, 0), -1);
        }
        // cv::circle(image, keypoint.p, 3, cv::Scalar(0, 255, 0), -1);
    }
}

int runpose(cv::Mat& roi, ncnn::Net &posenet, int pose_size_width, int pose_size_height, std::vector<KeyPoint>& keypoints,float x1, float y1)
{
    int w = roi.cols;
    int h = roi.rows;
    ncnn::Mat in = ncnn::Mat::from_pixels_resize(roi.data, ncnn::Mat::PIXEL_BGR2RGB,\
                                                 roi.cols, roi.rows, pose_size_width, pose_size_height);
    //数据预处理
    const float mean_vals[3] = {0.485f * 255.f, 0.456f * 255.f, 0.406f * 255.f};
    const float norm_vals[3] = {1 / 0.229f / 255.f, 1 / 0.224f / 255.f, 1 / 0.225f / 255.f};
    in.substract_mean_normalize(mean_vals, norm_vals);

    ncnn::Extractor ex = posenet.create_extractor();
    ex.set_num_threads(4);
    ex.input("data", in);
    ncnn::Mat out;
    ex.extract("hybridsequential0_conv7_fwd", out);
    keypoints.clear();
    for (int p = 0; p < out.c; p++)
    {
        const ncnn::Mat m = out.channel(p);

        float max_prob = 0.f;
        int max_x = 0;
        int max_y = 0;
        for (int y = 0; y < out.h; y++)
        {
            const float* ptr = m.row(y);
            for (int x = 0; x < out.w; x++)
            {
                float prob = ptr[x];
                if (prob > max_prob)
                {
                    max_prob = prob;
                    max_x = x;
                    max_y = y;
                }
            }
        }

        KeyPoint keypoint;
        keypoint.p = cv::Point2f(max_x * w / (float)out.w+x1, max_y * h / (float)out.h+y1);
        keypoint.prob = max_prob;
        keypoints.push_back(keypoint);
    }
    return 0;
}


int demo(cv::Mat& image, ncnn::Net &detectornet, int detector_size_width, int detector_size_height, \
         ncnn::Net &posenet, int pose_size_width, int pose_size_height)
{
    cv::Mat bgr = image.clone();
    int img_w = bgr.cols;
    int img_h = bgr.rows;

    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB,\
                                                 bgr.cols, bgr.rows, detector_size_width, detector_size_height);

    //数据预处理
    const float mean_vals[3] = {0.f, 0.f, 0.f};
    const float norm_vals[3] = {1/255.f, 1/255.f, 1/255.f};
    in.substract_mean_normalize(mean_vals, norm_vals);

    ncnn::Extractor ex = detectornet.create_extractor();
    ex.set_num_threads(4);
    ex.input("data", in);
    ncnn::Mat out;
    ex.extract("output", out);

    for (int i = 0; i < out.h; i++)
    {
        printf("==================================\n");
        float x1, y1, x2, y2, score, label;
        float pw,ph,cx,cy;
        const float* values = out.row(i);
        
        x1 = values[2] * img_w;
        y1 = values[3] * img_h;
        x2 = values[4] * img_w;
        y2 = values[5] * img_h;

        pw = x2-x1;
        ph = y2-y1;
        cx = x1+0.5*pw;
        cy = y1+0.5*ph;

        x1 = cx - 0.7*pw;
        y1 = cy - 0.6*ph;
        x2 = cx + 0.7*pw;
        y2 = cy + 0.6*ph;

        score = values[1];
        label = values[0];

        //处理坐标越界问题
        if(x1<0) x1=0;
        if(y1<0) y1=0;
        if(x2<0) x2=0;
        if(y2<0) y2=0;

        if(x1>img_w) x1=img_w;
        if(y1>img_h) y1=img_h;
        if(x2>img_w) x2=img_w;
        if(y2>img_h) y2=img_h;
        //截取人体ROI
        //printf("x1:%f y1:%f x2:%f y2:%f\n",x1,y1,x2,y2);
        cv::Mat roi;
        roi = bgr(cv::Rect(x1, y1, x2-x1, y2-y1)).clone();
        std::vector<KeyPoint> keypoints;
        runpose(roi, posenet, pose_size_width, pose_size_height,keypoints, x1, y1);
        draw_pose(image, keypoints);
        if(isLookingDown(keypoints)){
            std::cout  << "head down." << std::endl;
        }else{
             std::cout  << "head up." << std::endl;
        }
        // 调用 checkBodyTilt 函数并获取结果
        std::string tiltStatus = checkBodyTilt(keypoints);
        // 根据结果执行相应操作
        if (tiltStatus == "Forward") {
            std::cout << "Forward" << std::endl;//前倾
        } else if (tiltStatus == "Backward") {
            std::cout << "Backward" << std::endl;//后仰
        } else if (tiltStatus == "Normal") {
            std::cout << "Normal" << std::endl;
        } else {
            std::cout << "Normal" << std::endl;
        }
        // cv::rectangle (image, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 0, 255), 2, 8, 0);
        cv::rectangle(image, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 0, 255));

    }
    return 0;
}



int test_img() {
    //定义检测器
    ncnn::Net detectornet;  
    detectornet.load_param("ncnnmodel/person_detector.param");
    detectornet.load_model("ncnnmodel/person_detector.bin");
    int detector_size_width  =  320;
    int detector_size_height = 320;

    //定义人体姿态关键点预测器
    ncnn::Net posenet;  
    posenet.load_param("ncnnmodel/Ultralight-Nano-SimplePose.param");
    posenet.load_model("ncnnmodel/Ultralight-Nano-SimplePose.bin");
    int pose_size_width  =  192;
    int pose_size_height =  256;

    while (true) {  // 无限循环
        for (int i = 0; i <= 6; ++i) {
            std::string filename = std::to_string(i) + ".jpg";  // 构建文件名
            cv::Mat img = cv::imread(filename);  // 读取图片

            if (img.empty()) {
                printf("Error: Image not loaded.\n");
                continue;  // 如果图片不存在，继续下一个图片
            }

            // 对图片进行人体姿态评估
            demo(img, detectornet, detector_size_width, detector_size_height, posenet, pose_size_width, pose_size_height);


            std::string saveFilename = std::to_string(i) + "_processed";  // 构建保存文件名
            // 显示和保存处理后的图片
            cv::imshow(saveFilename, img);
            cv::waitKey(1);  // 显示图像，等待1ms以便显示窗口能够刷新
            // 等待3秒
            std::this_thread::sleep_for(std::chrono::seconds(3));
        }
    }

    return 0;
}

int main()
{
    test_img();
    return 0;
}
