#include "ImageHandler.h"

#include <iostream>

using namespace std;

#pragma region Image
void ImageHandler::GetShoreLinePoints(std::string image_path)
{
    auto start = std::chrono::high_resolution_clock::now();
    cv::Mat image = cv::imread(image_path + "6.png");

    // Lab分割测试
    cv::Mat region_mask;
    GetLabImage(image_path, image, region_mask);

    // // 转换为灰度图并获取直方图
    // cv::Mat image_gray;
    // GetGrayImageHist(image_path, image, image_gray);
    // cv::imwrite(image_path + "gray.png", image_gray);

    // // 按照灰度值分类
    // cv::Mat image_thre;
    // TriThreshold(image_gray, image_thre, 125.150);
    // cv::imwrite(image_path + "image_thre.png", image_thre);

    // cv::resize(image, image, cv::Size(300, 300));

    // // 区域生长
    // cv::Mat region_mask;
    // cv::Point seed_point(image.cols / 6 * 5, image.rows / 2);
    // RegionGrowing(image, region_mask, seed_point, 30.0f);
    // cv::imwrite(image_path + "region_mask.png", region_mask);

    // 边缘检测
    cv::Mat edge_image;
    GetCanny(region_mask, edge_image);
    cv::imwrite(image_path + "edge.png", edge_image);

    // 提取岸线
    std::vector<Point> edge_points;
    std::vector<cv::Point> shore_line;
    for (int i = 0; i < edge_image.rows; i++)
    {
        for (int j = 0; j < edge_image.cols; j++)
        {
            if (edge_image.at<uchar>(i, j) == 255)
            {
                Point point;
                point.x = j;
                point.y = i;
                point.index = i;
                edge_points.push_back(point);
                break;
            }
        }
    }
    std::vector<Segment> segments = Utils::RansacPoly(edge_points, 10, 40);
    std::cout << "Segments: " << segments.size() << std::endl;
    for (int i = 0; i < segments.size(); i++)
    {
        std::cout << "Segment " << i << ": " << segments[i].start.x << ", " << segments[i].start.y << " -> " << segments[i].end.x << ", " << segments[i].end.y << std::endl;
        if (i == 0)
        {
            cv::Point point{(int)((segments[i].start.y - segments[i].line.intercept) / segments[i].line.slope), (int)(segments[i].start.y)};
            shore_line.push_back(point);
        }
        else
        {
            double mindis = 20; // 判断交点与原点距离
            Point intersection = Utils::CalculateIntersection(segments[i - 1].line, segments[i].line);
            Point prev_start{(segments[i - 1].start.y - segments[i - 1].line.intercept) / segments[i - 1].line.slope, segments[i - 1].start.y, 0};
            Point prev_end{(segments[i - 1].end.y - segments[i - 1].line.intercept) / segments[i - 1].line.slope, segments[i - 1].end.y, 0};
            Point now_start{(segments[i].start.y - segments[i].line.intercept) / segments[i].line.slope, segments[i].start.y, 0};
            Point now_end{(segments[i].end.x - segments[i].line.intercept) / segments[i].line.slope, segments[i].end.y, 0};
            double prev_dis = std::sqrt(std::pow(prev_end.x - intersection.x, 2) + std::pow(prev_end.y - intersection.y, 2));
            double now_dis = std::sqrt(std::pow(now_start.x - intersection.x, 2) + std::pow(now_start.y - intersection.y, 2));
            if (Utils::IsPointOnRay(intersection, prev_start, prev_end) && Utils::IsPointOnRay(intersection, now_end, now_start) && (prev_dis < mindis || now_dis < mindis))
            {
                cv::Point point{(int)intersection.x, (int)intersection.y};
                shore_line.push_back(point);
            }
            else
            {
                cv::Point prev_point{(int)((segments[i - 1].end.y - segments[i - 1].line.intercept) / segments[i - 1].line.slope), (int)(segments[i - 1].end.y)};
                cv::Point now_point{(int)((segments[i].start.y - segments[i].line.intercept) / segments[i].line.slope), (int)(segments[i].start.y)};
                shore_line.push_back(prev_point);
                shore_line.push_back(now_point);
            }
        }

        if (i == segments.size() - 1)
        {
            cv::Point point{(int)((segments[i].end.y - segments[i].line.intercept) / segments[i].line.slope), (int)(segments[i].end.y)};
            shore_line.push_back(point);
        }
    }

    for (int i = 0; i < shore_line.size(); i++)
    {
        std::cout << shore_line[i].x << " " << shore_line[i].y << std::endl;
    }

    cv::Mat img_shore_line = image.clone();
    cv::polylines(
        img_shore_line,
        {shore_line},
        false,
        cv::Scalar(0, 255, 0),
        2,
        cv::LINE_AA);
    cv::imwrite(image_path + "shore_line.png", img_shore_line);
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    std::cout << "opencv耗时：" << duration.count() << " 毫秒" << std::endl;
}

void ImageHandler::GetGrayImageHist(std::string image_path, const cv::Mat &image, cv::Mat &image_gray)
{
    if (image.channels() == 3)
    {
        cv::cvtColor(image, image_gray, cv::COLOR_BGR2GRAY);
    }
    else
    {
        image_gray = image.clone();
    }

    // cv::equalizeHist(image_gray, image_gray);
    // cv::imwrite(image_path + "histGray.png", image_gray);

    if (image_gray.empty())
    {
        cerr << "灰度图无效，无法计算直方图" << endl;
        return;
    }

    // 计算直方图参数
    const int histSize = 256; // 直方图分箱数
    float range[] = {0, 256}; // 像素值范围
    const float *histRange = {range};
    bool uniform = true, accumulate = false;

    // 计算直方图
    cv::Mat hist;
    calcHist(&image_gray, // 输入图像
             1,           // 单通道
             0,           // 不使用掩码
             cv::Mat(),   // 空掩码
             hist,        // 输出直方图
             1,           // 一维直方图
             &histSize,   // 分箱数
             &histRange,  // 值范围
             uniform,
             accumulate);

    // 创建直方图可视化图像
    int hist_w = 512, hist_h = 400; // 直方图图像尺寸
    int bin_w = cvRound((double)hist_w / histSize);
    cv::Mat histImage(hist_h, hist_w, CV_8UC3, cv::Scalar(0, 0, 0));

    // 归一化直方图数据到图像高度
    normalize(hist, hist, 0, histImage.rows, cv::NORM_MINMAX, -1, cv::Mat());

    // 绘制直方图（优化版）
    for (int i = 1; i < histSize; i++)
    {
        line(histImage,
             cv::Point(bin_w * (i - 1), hist_h - cvRound(hist.at<float>(i - 1))),
             cv::Point(bin_w * (i), hist_h - cvRound(hist.at<float>(i))),
             cv::Scalar(255, 255, 255), // 白色线条
             2, cv::LINE_AA);
    }

    // 添加坐标轴（可选）
    arrowedLine(histImage, cv::Point(10, hist_h - 10), cv::Point(hist_w - 10, hist_h - 10), cv::Scalar(200, 200, 200), 1); // X轴
    arrowedLine(histImage, cv::Point(10, hist_h - 10), cv::Point(10, 10), cv::Scalar(200, 200, 200), 1);                   // Y轴

    // X轴标签（像素值范围）
    cv::putText(histImage, "0", cv::Point(10, hist_h - 5),
                cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(200, 200, 200), 1);
    cv::putText(histImage, "255", cv::Point(hist_w - 30, hist_h - 5),
                cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(200, 200, 200), 1);

    // 中间刻度标记（示例：每50个像素值一个标记）
    for (int val = 50; val < 255; val += 50)
    {
        int x_pos = cvRound(val * bin_w);
        cv::line(histImage,
                 cv::Point(x_pos, hist_h - 10),
                 cv::Point(x_pos, hist_h - 15), // 刻度线长度5像素
                 cv::Scalar(200, 200, 200), 1);
        cv::putText(histImage, std::to_string(val),
                    cv::Point(x_pos - 10, hist_h - 20), // 文字位置微调
                    cv::FONT_HERSHEY_SIMPLEX, 0.4,
                    cv::Scalar(200, 200, 200), 1);
    }

    // 保存直方图
    cv::imwrite(image_path + "histogram.png", histImage);
}

void ImageHandler::TriThreshold(const cv::Mat &image, cv::Mat &image_thresh, int low_thresh, int high_thresh)
{
    // 输入验证
    if (image.empty() || image.channels() != 1)
    {
        cerr << "需要单通道灰度图输入" << endl;
        return;
    }

    // 初始化三值图（使用3通道便于彩色显示）
    image_thresh = cv::Mat::zeros(image.size(), CV_8UC3);

    // 创建掩模
    cv::Mat mask_low = (image < low_thresh);
    cv::Mat mask_mid = (image >= low_thresh) & (image < high_thresh);
    cv::Mat mask_high = (image >= high_thresh);

    // 着色（BGR格式）
    image_thresh.setTo(cv::Scalar(255, 0, 0), mask_low);  // 低值区域-红色
    image_thresh.setTo(cv::Scalar(0, 255, 0), mask_mid);  // 中间区域-绿色
    image_thresh.setTo(cv::Scalar(0, 0, 255), mask_high); // 高值区域-蓝色
}

void ImageHandler::RegionGrowing(const cv::Mat &image, cv::Mat &result_mask, cv::Point seed, float threshold, bool use_lab)
{
    // 参数验证
    if (image.empty() || seed.x < 0 || seed.y < 0 || seed.x >= image.cols || seed.y >= image.rows)
    {
        cerr << "Invalid input parameters" << endl;
        return;
    }

    // 转换色彩空间
    cv::Mat processing_image;
    if (use_lab)
    {
        cv::cvtColor(image, processing_image, cv::COLOR_BGR2Lab);
    }
    else
    {
        processing_image = image.clone();
    }

    // 初始化数据结构
    const int connectivity = 8;
    cv::Mat visited = cv::Mat::zeros(image.size(), CV_8UC1);
    result_mask = cv::Mat::zeros(image.size(), CV_8UC1);
    std::queue<cv::Point> growth_queue;

    // 设置种子点
    growth_queue.push(seed);
    visited.at<uchar>(seed) = 1;
    result_mask.at<uchar>(seed) = 255;

    // 获取初始颜色
    cv::Vec3f mean_color = processing_image.at<cv::Vec3b>(seed);
    int count = 1;

    // 定义邻域偏移量
    const cv::Point neighbors[] = {
        {-1, -1}, {0, -1}, {1, -1}, {-1, 0}, {1, 0}, {-1, 1}, {0, 1}, {1, 1}};

    while (!growth_queue.empty())
    {
        cv::Point center = growth_queue.front();
        growth_queue.pop();

        // 遍历邻域
        for (int i = 0; i < connectivity; ++i)
        {
            cv::Point neighbor = center + neighbors[i];

            // 边界检查
            if (neighbor.x < 0 || neighbor.y < 0 ||
                neighbor.x >= image.cols || neighbor.y >= image.rows)
                continue;

            // 跳过已访问像素
            if (visited.at<uchar>(neighbor))
                continue;

            visited.at<uchar>(neighbor) = 1;

            // 计算颜色差异
            cv::Vec3b current_color = processing_image.at<cv::Vec3b>(neighbor);
            float diff = cv::norm(mean_color - cv::Vec3f(current_color));

            // 生长条件判断
            if (diff <= threshold)
            {
                result_mask.at<uchar>(neighbor) = 255;
                growth_queue.push(neighbor);

                // 更新平均颜色（增量计算）
                mean_color = (mean_color * count + cv::Vec3f(current_color)) / (count + 1);
                count++;
            }
        }
    }
}

void ImageHandler::GetCanny(const cv::Mat &image, cv::Mat &result_image, float threshold1, float threshold2, int apertureSize)
{
    // 转为灰度图
    cv::Mat image_gray = image.clone();
    // cvtColor(image, image_gray, COLOR_BGR2GRAY);

    // 高斯平滑
    cv::Mat img_blur;
    GaussianBlur(image_gray, img_blur, cv::Size(3, 3), 0);

    // Canny边缘检测
    Canny(img_blur, result_image, 50, 150, 3);
}

void ImageHandler::GetContours(const cv::Mat &image, cv::Mat &result_image)
{
    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::findContours(image, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    drawContours(result_image, contours, -1, cv::Scalar(0, 255, 0), 2);
}

void ImageHandler::OtsuThresholdLAB(const cv::Mat &gray_image, cv::Mat &thres_image)
{
    // 参数校验
    if (gray_image.empty() || gray_image.channels() != 1)
    {
        std::cerr << "需要灰度图像输入" << std::endl;
        return;
    }

    cv::threshold(gray_image, thres_image, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
}

void ImageHandler::GetLabImage(std::string image_path, const cv::Mat &image, cv::Mat &thres_image)
{
    image_path = image_path + "Lab/";

    cv::Mat image_clone = image.clone();

    // 添加船体掩膜
    // cv::Rect mask_roi{image_clone.cols / 2 - 65, image_clone.rows / 2 - 366, 126, 732};
    // cv::Mat subImg = image_clone(mask_roi);
    // subImg.setTo(255);
    // cv::imwrite(image_path + "origin.png", image_clone);

    // 转换为LAB空间
    cv::Mat lab_image;
    cv::cvtColor(image_clone, lab_image, cv::COLOR_BGR2Lab);
    cv::imwrite(image_path + "lab_image.png", lab_image);

    // 分离通道
    std::vector<cv::Mat> lab_channels;
    cv::split(lab_image, lab_channels);

    // 通道归一化处理
    cv::Mat l_channel, a_channel, b_channel;

    // L通道（0-100 -> 0-255）
    lab_channels[0].convertTo(l_channel, CV_8UC1, 255.0 / 100.0);

    // A通道（-127-+127 -> 0-255）
    cv::normalize(lab_channels[1], a_channel, 0, 255, cv::NORM_MINMAX, CV_8UC1);

    // B通道（-127-+127 -> 0-255）
    cv::normalize(lab_channels[2], b_channel, 0, 255, cv::NORM_MINMAX, CV_8UC1);

    // 保存结果
    cv::imwrite(image_path + "L_image.png", l_channel);
    cv::imwrite(image_path + "A_image.png", a_channel);
    cv::imwrite(image_path + "B_image.png", b_channel);

    // 转为二值图像
    cv::Mat L_binary, A_binary, B_binary;
    OtsuThresholdLAB(l_channel, L_binary);
    OtsuThresholdLAB(a_channel, A_binary);
    OtsuThresholdLAB(b_channel, B_binary);

    // 保存结果
    cv::imwrite(image_path + "L_binary.png", L_binary);
    cv::imwrite(image_path + "A_binary.png", A_binary);
    cv::imwrite(image_path + "B_binary.png", B_binary);

    // 融合图像
    cv::Mat merged_or;
    cv::bitwise_or(A_binary, B_binary, merged_or);
    cv::imwrite(image_path + "A2B.png", merged_or);
    cv::Mat merged_and;
    cv::bitwise_and(L_binary, merged_or, merged_and);
    cv::imwrite(image_path + "L2A2B.png", merged_and);

    // 获取轮廓
    cv::Mat merged = merged_or;
    // 轮廓提取与最大轮廓处理
    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(merged, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

    if (!contours.empty())
    {
        // 寻找最大轮廓
        auto max_contour = std::max_element(contours.begin(), contours.end(),
                                            [](const std::vector<cv::Point> &a, const std::vector<cv::Point> &b)
                                            {
                                                return cv::contourArea(a) < cv::contourArea(b);
                                            });

        // 创建二值掩膜
        cv::Mat largest_contour_mask = cv::Mat::zeros(merged.size(), CV_8UC1);
        cv::drawContours(largest_contour_mask, std::vector<std::vector<cv::Point>>{*max_contour},
                         -1, cv::Scalar(255), cv::FILLED);
        thres_image = largest_contour_mask.clone();

        // 保存结果
        cv::imwrite(image_path + "largest_contour.png", largest_contour_mask);

        // 新增多边形拟合逻辑
        std::vector<cv::Point> approx_curve;
        double epsilon = 0.01 * cv::arcLength(*max_contour, true); // 动态计算精度参数
        cv::approxPolyDP(*max_contour, approx_curve, epsilon, true);

        // 可视化拟合结果
        cv::Mat poly_image = image_clone;
        std::vector<std::vector<cv::Point>> contours_to_draw{approx_curve};
        cv::drawContours(poly_image, contours_to_draw, -1, cv::Scalar(0, 255, 0), 2);

        // 标注顶点坐标
        for (const auto &pt : approx_curve)
        {
            cv::circle(poly_image, pt, 5, cv::Scalar(0, 0, 255), -1);
            cv::putText(poly_image,
                        "(" + std::to_string(pt.x) + "," + std::to_string(pt.y) + ")",
                        pt + cv::Point(10, -5),
                        cv::FONT_HERSHEY_SIMPLEX,
                        0.4,
                        cv::Scalar(200, 200, 200),
                        1);
        }
        cv::imwrite(image_path + "fitted_polygon.png", poly_image);
    }
    else
    {
        std::cerr << "未检测到有效轮廓" << std::endl;
    }
}
#pragma endregion