#include "tools.h"

bool P3P_HERO(const std::vector<cv::Point3f> &objectPoints, const std::vector<cv::Point2f> &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec)
{
    tvec.release();
    rvec.release();
    tvec = (cv::Mat_<double>(3, 1) << 0.0, 0.0, 0.0);
    rvec = (cv::Mat_<double>(3, 1) << 0.0, 0.0, 0.0); // 初始化tvec和rvec

    std::vector<cv::Point2f> imgPointsUndistort = imagePoints;

    auto undistort = [](const cv::Mat &in, const cv::Mat &dis, cv::Point2f &point) -> void
    {
        double x = (point.x - in.at<double>(0, 2)) / in.at<double>(0, 0);
        double y = (point.y - in.at<double>(1, 2)) / in.at<double>(1, 1);
        double r_2 = pow(x, 2) + pow(y, 2);

        double k = 1;
        double r = 1;
        for (int i = 0; i < 3; ++i)
        {
            r *= r_2;
            k += dis.at<double>(i, 0) * r;
        }
        double p_1 = 2 * dis.at<double>(3, 0) * x * y + dis.at<double>(4, 0) * (r_2 + 2 * x * x);
        double p_2 = 2 * dis.at<double>(4, 0) * x * y + dis.at<double>(3, 0) * (r_2 + 2 * y * y);

        x = x * k + p_1;
        y = y * k + p_2;

        point.x = x * in.at<double>(0, 0) + in.at<double>(0, 2);
        point.y = y * in.at<double>(1, 1) + in.at<double>(1, 2);
    }; // 将去畸变写成Lambda表达式

    // undistort(cameraMatrix, distCoeffs, imgPointsUndistort[0]);
    // undistort(cameraMatrix, distCoeffs, imgPointsUndistort[1]);
    // undistort(cameraMatrix, distCoeffs, imgPointsUndistort[2]);//去畸变

    double AB = fabs(objectPoints[0].y - objectPoints[1].y);
    double AC = sqrt(pow(AB / 2, 2) + pow(objectPoints[2].x - objectPoints[0].x, 2));
    double BC = AC;

    double u = 0.25 + pow((objectPoints[2].x - objectPoints[0].x) / (objectPoints[1].y - objectPoints[0].y), 2);

    // std::cout << "u = " << u << std::endl;

    Eigen::Vector3d Oa = {(imgPointsUndistort[0].x - cameraMatrix.at<double>(0, 2)) / cameraMatrix.at<double>(0, 0), (imgPointsUndistort[0].y - cameraMatrix.at<double>(1, 2)) / cameraMatrix.at<double>(1, 1), 1};
    Eigen::Vector3d Ob = {(imgPointsUndistort[1].x - cameraMatrix.at<double>(0, 2)) / cameraMatrix.at<double>(0, 0), (imgPointsUndistort[1].y - cameraMatrix.at<double>(1, 2)) / cameraMatrix.at<double>(1, 1), 1};
    Eigen::Vector3d Oc = {(imgPointsUndistort[2].x - cameraMatrix.at<double>(0, 2)) / cameraMatrix.at<double>(0, 0), (imgPointsUndistort[2].y - cameraMatrix.at<double>(1, 2)) / cameraMatrix.at<double>(1, 1), 1}; // 向量的归一化坐标

    double cos_a_b = Oa.dot(Ob) / (Oa.norm() * Ob.norm());
    double cos_a_c = Oa.dot(Oc) / (Oa.norm() * Oc.norm());
    double cos_b_c = Ob.dot(Oc) / (Ob.norm() * Oc.norm()); // 求向量夹角

    // std::cout << "cos = " << cos_a_b << ' ' << cos_a_c << ' ' << cos_b_c << std::endl;

    double coefficients[4];
    double tmp_4 = pow(1 - 2 * u, 2) - pow(2 * u * cos_a_b, 2);                                                                                                                                                                                          // 4次项
    coefficients[3] = (8 * pow(u, 2) * (pow(cos_a_b, 2) * cos_a_c - cos_a_c) + 4 * u * (cos_a_b * cos_b_c + cos_a_c + 2 * cos_a_c) - 4 * cos_a_c) / tmp_4;                                                                                               // 3次项
    coefficients[2] = (2 * (1 - 2 * u) * (1 - 2 * u * pow(cos_b_c, 2)) + pow(2 * (u - 1) * cos_a_c + 2 * u * cos_a_b * cos_b_c, 2) - pow(2 * u * cos_a_b * cos_b_c, 2) - pow(4 * u, 2) * cos_a_b * cos_a_c * cos_b_c - pow(2 * u * cos_b_c, 2)) / tmp_4; // 2次项
    coefficients[1] = (2 * (2 * (u - 1) * cos_a_c + 2 * u * cos_a_b * cos_b_c) * (1 - 2 * u * pow(cos_b_c, 2)) + 8 * pow(u, 2) * cos_a_b * pow(cos_b_c, 3) + 8 * cos_a_c * pow(u * cos_b_c, 2)) / tmp_4;                                                 // 1次项
    coefficients[0] = (pow(1 - 2 * u * pow(cos_b_c, 2), 2) - pow(2 * u * pow(cos_b_c, 2), 2)) / tmp_4;                                                                                                                                                   // 0次项

    // std::cout << "coeff: " << coefficients[3] << ' ' << coefficients[2] << ' '  << coefficients[1] << ' ' << coefficients[0] << std::endl;

    Eigen::Matrix<double, 4, 4> coeff_mat;
    coeff_mat << 0, 0, 0, -coefficients[0],
        1, 0, 0, -coefficients[1],
        0, 1, 0, -coefficients[2],
        0, 0, 1, -coefficients[3];

    Eigen::Matrix<std::complex<double>, 4, 1> roots;
    roots = coeff_mat.eigenvalues(); // 解一元四次方程组

    std::vector<cv::Point2d> roots_real;

    for (int i = 0; i < 4; ++i) // 先筛掉复根
    {
        if (0 == roots(i, 0).imag())
        {
            double y_0 = cos_b_c - sqrt(pow(cos_b_c, 2) - 2 * cos_a_c * roots(i, 0).real() + pow(roots(i, 0).real(), 2));
            double y_1 = cos_b_c + sqrt(pow(cos_b_c, 2) - 2 * cos_a_c * roots(i, 0).real() + pow(roots(i, 0).real(), 2));
            if (y_0 > 0)
            {
                roots_real.push_back({roots(i, 0).real(), y_0});
            }
            if (y_1 > 0)
            {
                roots_real.push_back({roots(i, 0).real(), y_1});
            }
        }
    }

    if (0 == roots_real.size())
    {
        std::cerr << "error! no real roots! (from P3P_HERO)" << std::endl;
        return 0;
    }

    // for (auto& r : roots_real)
    // {
    //     std::cout << "x and y: " << r.x << "  " << r.y << std::endl;
    // }

    std::vector<cv::Point2d> roots_true;
    for (int i = 0; i < roots_real.size(); ++i) // 再筛掉不符合实际的根
    {
        double OC_norm = sqrt(pow(AB, 2) / (pow(roots_real[i].x, 2) + pow(roots_real[i].y, 2) - 2 * roots_real[i].x * roots_real[i].y * cos_a_b));
        double OA_norm = roots_real[i].x * OC_norm;
        double OB_norm = roots_real[i].y * OC_norm;
        double AB_H = sqrt(pow(OA_norm, 2) + pow(OB_norm, 2) - 2 * OA_norm * OB_norm * cos_a_b);
        double AC_H = sqrt(pow(OA_norm, 2) + pow(OC_norm, 2) - 2 * OA_norm * OC_norm * cos_a_c);
        double BC_H = sqrt(pow(OB_norm, 2) + pow(OC_norm, 2) - 2 * OB_norm * OC_norm * cos_b_c);

        if (fabs(AB - AB_H) < 0.1 && fabs(AC - AC_H) < 0.1 && fabs(BC - BC_H) < 0.1) // 符合实际的条件
        {
            roots_true.push_back(roots_real[i]);
        }
    }
    if (0 == roots_true.size())
    {
        std::cerr << "error! no real roots! (from P3P_HERO)" << std::endl;
        return 0;
    }
    // for (int i = 0; i < roots_true.size(); ++i)
    // {
    //     std::cout << roots_true[i] << std::endl;
    // }

    Eigen::Vector3d OA, OB, OC, OD, OO;
    // bool flg = 0;
    double delta = __DBL_MIN__;
    int n = 0;
    for (int i = 0; i < roots_true.size(); ++i) // 再根据和灯条的距离的远近
    {
        double OC_norm = sqrt(pow(AB, 2) / (pow(roots_true[i].x, 2) + pow(roots_true[i].y, 2) - 2 * roots_true[i].x * roots_true[i].y * cos_a_b));
        double OA_norm = roots_true[i].x * OC_norm;
        double OB_norm = roots_true[i].y * OC_norm; // 取模

        Eigen::Vector3d OC_tmp = Oc / Oc.norm() * OC_norm;                                    // 光心到灯条中点的向量
        Eigen::Vector3d OD_tmp = 0.5 * (Oa / Oa.norm() * OA_norm + Ob / Ob.norm() * OB_norm); // 光心到灯条中点的向量

        double delta_tmp = OC_tmp(2) - OD_tmp(2);
        if (delta_tmp > delta)
        {
            delta = delta_tmp;
            n = i;
            OA = Oa / Oa.norm() * OA_norm;
            OB = Ob / Ob.norm() * OB_norm;
            OC = OC_tmp;
            OD = OD_tmp;
            OO = 0.5 * (OC + OD);
        }
    }

    tvec.at<double>(0, 0) = OO(0);
    tvec.at<double>(1, 0) = OO(1);
    tvec.at<double>(2, 0) = OO(2); // 求解tvec

    Eigen::Vector3d OX;
    // RCLCPP_INFO(autoaim_node->get_logger(), "OC: %lf, %lf, %lf", OC.x(), OC.y(), OC.z());
    // RCLCPP_INFO(autoaim_node->get_logger(), "OD: %lf, %lf, %lf", OD.x(), OD.y(), OD.z());
    // RCLCPP_INFO(autoaim_node->get_logger(), "OO: %lf, %lf, %lf", OO.x(), OO.y(), OO.z());
    if (OC.x() > OD.x()) // C在右边
    {
        OX = OO - OD;
    }
    else
    {
        OX = OD - OO;
    }
    // RCLCPP_INFO(autoaim_node->get_logger(), "OX: %lf, %lf, %lf", OX.x(), OX.y(), OX.z());
    OX = OX / OX.norm();

    Eigen::Vector3d OY_;
    OY_ = OA - OB;

    Eigen::Vector3d OZ;
    OZ = OX.cross(OY_);
    OZ = OZ / OZ.norm();

    Eigen::Vector3d OY;
    OY = OZ.cross(OX);
    OY = OY / OY.norm();

    cv::Mat P = (cv::Mat_<double>(3, 3) << OX.x(), OY.x(), OZ.x(),
                 OX.y(), OY.y(), OZ.y(),
                 OX.z(), OY.z(), OZ.z()); // 求得过渡矩阵

    cv::Rodrigues(P, rvec); // 求得旋转向量

    return 1;
}

void camera_to_pixel(const cv::Mat &X_camera, cv::Mat &X_pixel, const cv::Mat &cametaMatrix) // 反投影函数
{
    X_pixel = 1 / X_camera.at<double>(2, 0) * cametaMatrix * X_camera;
}

void armor_to_camera(const cv::Mat &X_armor, cv::Mat &X_camera, const cv::Mat &rvec, const cv::Mat &tvec)
{
    cv::Mat P_c_a;
    cv::Rodrigues(rvec, P_c_a);
    X_camera = P_c_a * X_armor + tvec;
}

void camera_to_armor(const cv::Mat &X_camera, cv::Mat &X_armor, const cv::Mat &rvec, const cv::Mat &tvec)
{
    cv::Mat P_a_c;
    cv::Rodrigues(rvec, P_a_c);
    P_a_c = P_a_c.inv();
    X_armor = P_a_c * (X_camera - tvec);
}

void camera_to_ptz(const cv::Mat &X_camera, cv::Mat &X_ptz, const cv::Mat &offset)
{
    cv::Mat P_p_c = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, 0, 1, 0, -1, 0);
    X_ptz = P_p_c * X_camera + offset;
}

void ptz_to_camera(const cv::Mat &X_ptz, cv::Mat &X_camera, const cv::Mat &offset)
{
    cv::Mat P_c_p = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, 0, -1, 0, 1, 0);
    X_camera = P_c_p * (X_ptz - offset);
}

void ptz_to_land(const cv::Mat &X_ptz, cv::Mat &X_land, const Pose &pose)
{
    cv::Mat P_yaw = (cv::Mat_<double>(3, 3) << cos(pose.ptz_yaw), -sin(pose.ptz_yaw), 0, sin(pose.ptz_yaw), cos(pose.ptz_yaw), 0, 0, 0, 1);
    cv::Mat P_pitch = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, cos(pose.ptz_pitch), -sin(pose.ptz_pitch), 0, sin(pose.ptz_pitch), cos(pose.ptz_pitch));
    cv::Mat P_roll = (cv::Mat_<double>(3, 3) << cos(pose.ptz_roll), 0, -sin(pose.ptz_roll), 0, 1, 0, sin(pose.ptz_roll), 0, cos(pose.ptz_roll));
    X_land = P_yaw * P_pitch * P_roll * X_ptz;
}

void land_to_ptz(const cv::Mat &X_land, cv::Mat &X_ptz, const Pose &pose)
{
    cv::Mat P_yaw = (cv::Mat_<double>(3, 3) << cos(pose.ptz_yaw), -sin(pose.ptz_yaw), 0, sin(pose.ptz_yaw), cos(pose.ptz_yaw), 0, 0, 0, 1);
    cv::Mat P_pitch = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, cos(pose.ptz_pitch), -sin(pose.ptz_pitch), 0, sin(pose.ptz_pitch), cos(pose.ptz_pitch));
    cv::Mat P_roll = (cv::Mat_<double>(3, 3) << cos(pose.ptz_roll), 0, -sin(pose.ptz_roll), 0, 1, 0, sin(pose.ptz_roll), 0, cos(pose.ptz_roll));
    X_ptz = P_roll.inv() * P_pitch.inv() * P_yaw.inv() * X_land;
}

cv::Mat img, img_fx;

double estimate_o_yaw(const cv::Mat &tvec, const cv::Mat &offset, const Pose &pose, const double &half_w, const double &half_h, const double &armor_pitch, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, const cv::Mat *originalPoints)
{
    cv::Mat X_O = (cv::Mat_<double>(3, 1) << tvec.at<double>(0, 0), tvec.at<double>(1, 0), tvec.at<double>(2, 0));
    camera_to_ptz(X_O, X_O, offset);
    ptz_to_land(X_O, X_O, pose); // 先求装甲板中心点
    // std::cout << X_O << std::endl;
    cv::Mat X_O_pixel; // 先求装甲板中心点
    camera_to_pixel(tvec, X_O_pixel, cameraMatrix);
    // std::cout << X_O_pixel << std::endl;

    cv::Mat v_start = (cv::Mat_<double>(3, 1) << -1, 0, 0);
    camera_to_ptz(v_start, v_start, cv::Mat(0, 0, 0));
    ptz_to_land(v_start, v_start, pose);   // 先将v_start转至大地坐标系
    v_start.at<double>(2, 0) = 0;          // 将其投影在xOy平面上
    v_start = v_start / cv::norm(v_start); // 归一化
    // std::cout << v_start << std::endl;

    cv::Mat X_0[4];

    X_0[0] = (cv::Mat_<double>(3, 1) << half_w, 0, half_h);
    X_0[1] = (cv::Mat_<double>(3, 1) << -half_w, 0, half_h);
    X_0[2] = (cv::Mat_<double>(3, 1) << -half_w, 0, -half_h);
    X_0[3] = (cv::Mat_<double>(3, 1) << half_w, 0, -half_h); // 为装甲板角点赋初始值

    cv::Mat P_0 = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, cos(armor_pitch), -sin(armor_pitch), 0, sin(armor_pitch), cos(armor_pitch));
    cv::Mat P_1 = (cv::Mat_<double>(3, 3) << v_start.at<double>(1, 0), v_start.at<double>(0, 0), 0, -v_start.at<double>(0, 0), v_start.at<double>(1, 0), 0, 0, 0, 1);

    for (int i = 0; i < 4; ++i)
    {
        X_0[i] = P_1 * P_0 * X_0[i]; // 将角点旋转到v_start的方向
        // std::cout << i << ": " << 100* X_0[i] << std::endl;
    }

    // cv::Mat Points_tmp[4];
    std::vector<cv::Point2d> Points_tmp(4);
    auto undistort = [](const cv::Mat &in, const cv::Mat &dis, cv::Point2d &point) -> void
    {
        double x = (point.x - in.at<double>(0, 2)) / in.at<double>(0, 0);
        double y = (point.y - in.at<double>(1, 2)) / in.at<double>(1, 1);
        double r_2 = pow(x, 2) + pow(y, 2);

        double k = 1;
        double r = 1;
        for (int i = 0; i < 3; ++i)
        {
            r *= r_2;
            k += dis.at<double>(i, 0) * r;
        }
        double p_1 = 2 * dis.at<double>(3, 0) * x * y + dis.at<double>(4, 0) * (r_2 + 2 * x * x);
        double p_2 = 2 * dis.at<double>(4, 0) * x * y + dis.at<double>(3, 0) * (r_2 + 2 * y * y);

        x = x * k + p_1;
        y = y * k + p_2;

        point.x = x * in.at<double>(0, 0) + in.at<double>(0, 2);
        point.y = y * in.at<double>(1, 1) + in.at<double>(1, 2);
    }; // 将去畸变写成Lambda表达式

    for (int i = 0; i < 4; ++i)
    {
        // Points_tmp[i] = originalPoints[i];
        Points_tmp[i].x = originalPoints[i].at<double>(0, 0);
        Points_tmp[i].y = originalPoints[i].at<double>(1, 0);
        undistort(cameraMatrix, distCoeffs, Points_tmp[i]); // 对角点进行去畸变
        // std::cout << "Points_tmp: " << Points_tmp[i] << std::endl;
    }

    // 构造距离函数
    auto f = [&](const double &theta, const bool &draw_img) -> double
    {
        cv::Mat P = (cv::Mat_<double>(3, 3) << cos(theta), -sin(theta), 0, sin(theta), cos(theta), 0, 0, 0, 1);
        cv::Mat Points[4];

        for (int i = 0; i < 4; ++i)
        {
            Points[i] = P * X_0[i] + X_O;
            land_to_ptz(Points[i], Points[i], pose);
            ptz_to_camera(Points[i], Points[i], offset);
            camera_to_pixel(Points[i], Points[i], cameraMatrix); // 根据相机模型转到像素坐标系
        }

        double dis = 0;
        for (int i = 0; i < 4; ++i)
        {
            double dis_point_original = pow(Points_tmp[i].x - X_O_pixel.at<double>(0, 0), 2) + pow(Points_tmp[i].y - X_O_pixel.at<double>(1, 0), 2);
            double dis_point = pow(Points[i].at<double>(0, 0) - X_O_pixel.at<double>(0, 0), 2) + pow(Points[i].at<double>(1, 0) - X_O_pixel.at<double>(1, 0), 2);
            dis += pow(dis_point / dis_point_original - 1, 2);
            // dis += sqrt(pow(Points_tmp[i].x - Points[i].at<double>(0, 0), 2)) + sqrt(pow(Points_tmp[i].y - Points[i].at<double>(1, 0), 2));
        }
        double theta_left_original = atan2(Points_tmp[3].y - Points_tmp[0].y, Points_tmp[3].x - Points_tmp[0].x);
        double theta_right_original = atan2(Points_tmp[2].y - Points_tmp[1].y, Points_tmp[2].x - Points_tmp[1].x);
        double theta_left = atan2(Points[3].at<double>(1, 0) - Points[0].at<double>(1, 0), Points[3].at<double>(0, 0) - Points[0].at<double>(0, 0));
        double theta_right = atan2(Points[2].at<double>(1, 0) - Points[1].at<double>(1, 0), Points[2].at<double>(0, 0) - Points[1].at<double>(0, 0));
        dis += 300 * (pow(theta_left / theta_left_original - 1, 2) + pow(theta_right / theta_right_original - 1, 2));

        // if (draw_img)
        // {
        //     // //画旋转装甲板
        //     img = cv::Mat(960, 1280, CV_8UC3, cv::Scalar(256, 256, 256));

        //     for (int i = 0; i < 4; ++i)
        //     {
        //         cv::circle(img, cv::Point(Points[i].at<double>(0, 0), Points[i].at<double>(1, 0)), 1, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
        //         cv::line(img, cv::Point(Points[i % 4].at<double>(0, 0), Points[i % 4].at<double>(1, 0)), cv::Point(Points[(i + 1) % 4].at<double>(0, 0), Points[(i + 1) % 4].at<double>(1, 0)), cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
        //         cv::circle(img, cv::Point(Points_tmp[i].x, Points_tmp[i].y), 1, cv::Scalar(0, 0, 0), 1);
        //         cv::line(img, cv::Point(Points_tmp[i % 4].x, Points_tmp[i % 4].y), cv::Point(Points_tmp[(i + 1) % 4].x, Points_tmp[(i + 1) % 4].y), cv::Scalar(0, 0, 0), 1, cv::LINE_AA);
        //     }
        //     cv::putText(img, std::to_string(Points_tmp[0].x - Points[0].at<double>(0, 0)) + "  " + std::to_string(Points_tmp[0].y - Points[0].at<double>(1, 0)), cv::Point(0, 100), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255, 0, 0), 2, cv::LINE_AA);
        //     cv::putText(img, std::to_string(v_start.at<double>(0, 0)) + " " + std::to_string(v_start.at<double>(1, 0)) + " " + std::to_string(v_start.at<double>(2, 0)), cv::Point(0, 50), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255, 0, 0), 2, cv::LINE_AA);
        //     // 画函数图像
        //     //  std::cout << theta * 100 << ", " << dis << std::endl;
        //     //  cv::circle(img_fx, cv::Point(theta * 100, dis * 50), 1, cv::Scalar(255, 0, 0), 1, cv::LINE_AA);
        // }

        return dis;
    };

    // img_fx = cv::Mat(314, 314, CV_8UC3, cv::Scalar(256, 256, 256));
    // for (int i = 0; i < 314; ++i)
    // {
    //     f(i/100.0);
    //     cv::imshow("img", img);
    //     // cv::imshow("img_fx", img_fx);
    //     cv::waitKey(1);
    // }

    // 法一：三分法求极值
     int num_of_interval = 3;//区间划分个数
     double length_of_interval = M_PI;//区间长度
     double l = 0, r = length_of_interval / num_of_interval;//区间左右端点
     cv::Point2d local_minimum_val[num_of_interval];//局部最小值
     for (int k = 0; k < num_of_interval; ++k)
     {
         for (int i = 0; i < 12; ++i)//每个区间迭代次数
         {
             double mid_1 = l + (r - l) / 3;
             double mid_2 = r - (r - l) / 3;
             double f_mid_1 = f(mid_1, 0);
             double f_mid_2 = f(mid_2, 0);
             if (f_mid_1 > f_mid_2)
             {
                 l = mid_1;
             }
             else
             {
                 r = mid_2;
             }
         }
         local_minimum_val[k].x = (l + r) / 2;
         local_minimum_val[k].y = f((l + r) / 2, 0);
         l += length_of_interval / num_of_interval;
     }
     std::sort(local_minimum_val, local_minimum_val + num_of_interval, [](const cv::Point2d &a, const cv::Point2d &b) {return a.y < b.y;});
    local_minimum_val[0].x = 1.5 * (local_minimum_val[0].x - M_PI/2) + M_PI/2;
     double theta = atan2(-v_start.at<double>(0, 0), v_start.at<double>(1, 0)) + local_minimum_val[0].x;//v_start的方向加上偏移量

    // // 法二：直接遍历
    // int num_of_iteration = 314;
    // double length_of_interval = M_PI;
    // double l = 0;
    // double step_size = length_of_interval / num_of_iteration;
    // double minimum_val_point = l;
    // double minimum_val = __DBL_MAX__;
    // for (int i = 0; i < num_of_iteration; ++i)
    // {
    //     double tmp = f(l + i * step_size, 0);
    //     // cv::imshow("img", img);
    //     // cv::waitKey(1);
    //     if (tmp < minimum_val)
    //     {
    //         minimum_val_point = l + i * step_size;
    //         minimum_val = tmp;
    //     }
    // }
    // // std::cout << "minimum_val_point: " << minimum_val_point * 180 / M_PI << std::endl;
    // // f(minimum_val_point, 1);

    // // std::cout << atan2(-v_start.at<double>(0, 0), v_start.at<double>(1, 0)) * 180 / M_PI << std::endl;
    // minimum_val_point = 1.5 * (minimum_val_point - M_PI/2) + M_PI/2;
    // double theta = atan2(-v_start.at<double>(0, 0), v_start.at<double>(1, 0)) + minimum_val_point;//v_start的方向加上偏移量


    while (1)
    {
        if (theta > M_PI)
        {
            theta -= 2 * M_PI;
        }
        else if (theta <= -M_PI)
        {
            theta += 2 * M_PI;
        }
        else
        {
            break;
        }
    }

    return theta;
}
