#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
// 全局变量
cv::Mat grayFrame, thresholdedFrame;
int lH = 45, lS = 17, lV = 16, hH = 149, hS = 123, hV = 61;
cv::Point initialClickPoint;
cv::Mat frame, hsvFrame, mask;
bool mouseIsDragging;

// 函数声明
void deblurImage(Mat &blurredImage, Mat &deblurredImage, double sigma = 10.0, double K = 0.01);
Mat getMotionKernel(Size ksize, double theta, double sigma);
// 滑动条回调函数
void onThresholdChange(int, void *)
{
}
void onMouse(int event, int x, int y, int flags, void *userdata)
{
    if (event == cv::EVENT_LBUTTONDOWN)
    {
        initialClickPoint = cv::Point(x, y);
        mouseIsDragging = true;
    }
    else if (event == cv::EVENT_LBUTTONUP)
    {
        mouseIsDragging = false;
        cv::Vec3b hsv = hsvFrame.at<cv::Vec3b>(y, x);
        std::cout << "HSV: " << (int)hsv[0] << ", " << (int)hsv[1] << ", " << (int)hsv[2] << std::endl;
    }
}
int main()
{
    // 创建一个VideoCapture对象来捕获摄像头
    cv::VideoCapture cap(0); // 0表示默认摄像头，如果有多个摄像头，可以尝试其他编号

    // 检查摄像头是否成功打开
    if (!cap.isOpened())
    {
        std::cerr << "无法打开摄像头" << std::endl;
        return -1;
    }
    cap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
    cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);
    // 创建窗口
    cv::namedWindow("摄像头");
    cv::namedWindow("阈值处理");
    cv::namedWindow("process Image");
    // 创建滑动条
    cv::createTrackbar("lH", "阈值处理", &lH, 255, onThresholdChange);
    cv::createTrackbar("lS", "阈值处理", &lS, 255, onThresholdChange);
    cv::createTrackbar("lV", "阈值处理", &lV, 255, onThresholdChange);
    cv::createTrackbar("hH", "阈值处理", &hH, 255, onThresholdChange);
    cv::createTrackbar("hS", "阈值处理", &hS, 255, onThresholdChange);
    cv::createTrackbar("hV", "阈值处理", &hV, 255, onThresholdChange);
    cv::setMouseCallback("process Image", onMouse);

    cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
    cv::Mat openedImage;

    while (true)
    {
        // 从摄像头读取一帧
        cap >> frame;

        // 检查是否成功读取帧
        if (frame.empty())
        {
            std::cerr << "无法读取帧" << std::endl;
            break;
        }
        cv::Mat deblurredImage;
        deblurImage(frame, deblurredImage);
        // 将帧转换为HSV颜色空间
        cv::cvtColor(frame, hsvFrame, cv::COLOR_BGR2HSV);

        // 定义黑色的HSV范围
        cv::Scalar lowerBlack = cv::Scalar(lH, lS, lV); // 0, 0, 0
        cv::Scalar upperBlack = cv::Scalar(hH, hS, hV); //(180, 255, 30)

        // 创建掩码以检测黑色区域
        cv::inRange(hsvFrame, lowerBlack, upperBlack, mask);
        // 膨胀操作
        cv::morphologyEx(mask, openedImage, cv::MORPH_OPEN, kernel);
        // 显示原始帧和掩码
        cv::imshow("摄像头", frame);
        cv::imshow("process Image", deblurredImage);

        // 按下ESC键退出循环
        if (cv::waitKey(30) == 27)
        {
            break;
        }
    }

    // 释放VideoCapture对象
    cap.release();

    // 关闭所有OpenCV窗口
    cv::destroyAllWindows();

    return 0;
}

void deblurImage(Mat &blurredImage, Mat &deblurredImage, double sigma, double K)
{
    // 创建运动模糊核
    Mat kernel = getMotionKernel(Size(31, 31), 0, sigma);

    // 归一化核
    kernel /= sum(kernel)[0];

    // 使用维纳滤波进行去模糊
    deblurredImage = Mat(blurredImage.size(), blurredImage.type());
    Point anchor = Point(-1, -1);
    filter2D(blurredImage, deblurredImage, -1, kernel, anchor, 0, BORDER_REFLECT);

    // 使用维纳滤波进行去模糊
    Mat restoredImage;
    Mat noisePowerSpectrum = Mat::ones(blurredImage.size(), CV_32F) * K;
    Mat blurredImageSpectrum;
    Mat kernelSpectrum;

    // 将输入图像转换为 CV_32F 类型
    Mat blurredImageFloat;
    blurredImage.convertTo(blurredImageFloat, CV_32FC1);

    dft(blurredImageFloat, blurredImageSpectrum, DFT_COMPLEX_OUTPUT);
    dft(kernel, kernelSpectrum, DFT_COMPLEX_OUTPUT);
    Mat kernelSpectrumConj = kernelSpectrum.clone();
    flip(kernelSpectrumConj, kernelSpectrumConj, -1);
    Mat denominator = kernelSpectrum.mul(kernelSpectrumConj) + noisePowerSpectrum;
    Mat numerator = blurredImageSpectrum.mul(kernelSpectrumConj);
    Mat restoredSpectrum = numerator / denominator;
    idft(restoredSpectrum, restoredImage, DFT_SCALE | DFT_REAL_OUTPUT);
    restoredImage.convertTo(deblurredImage, blurredImage.type());
}

Mat getMotionKernel(Size ksize, double theta, double sigma)
{
    Mat kernel(ksize, CV_32F);
    Point center(ksize.width / 2, ksize.height / 2);
    for (int i = 0; i < ksize.height; i++)
    {
        for (int j = 0; j < ksize.width; j++)
        {
            double x = j - center.x;
            double y = i - center.y;
            double r = sqrt(x * x + y * y);
            double angle = atan2(y, x);
            double value = exp(-r * r / (2 * sigma * sigma)) * (angle >= theta && angle < theta + CV_PI);
            kernel.at<float>(i, j) = value;
        }
    }
    return kernel;
}