#include "cvcamera.h"

float hue_ranges[] = { 0,180 };
const float* ranges = hue_ranges;

CVCamera::CVCamera(int fps)
    :m_VideoFPS(fps),m_IsRecord(CameraStatus::Normal), m_VideoIsTrack(btnStatus::Close)
{
    // 设置定时器时间
    m_GetMat = new QTimer();
    m_GetMat->setInterval(60 / fps);
    m_apture = new VideoCapture();
    QList<QCameraInfo> cameras = QCameraInfo::availableCameras();       // 获取当前主机的所有摄像头信息
    for(int i = 0; i < cameras.size(); i++)
    {
        // deviceName 是摄像头内部的名称，通常是系统内部调用其值对人类阅读并不友好
        // description 是返回对应摄像头设备的描述，描述摄像头的型号或者其他标识信息
        m_CameraS.append(cameras[i].description());
    }

    connect(m_GetMat, &QTimer::timeout, this, [=](){                     // 设置定时器每隔m_VideoFPS秒之后从设置的摄像头视频流中获取帧画面
    (*m_apture) >> m_CurrentImg;                                         // 从m_apture中获取视频流
    // 对获取到的Mat帧画面进行数值修改
    if(this->m_Gamma != 1.0)
        this->m_CurrentImg = setMatGamma(m_CurrentImg, m_Gamma);        // 修改对应Mat对象的图像伽马值
    setLum    (m_CurrentImg, m_Lum);                                    // 修改对应亮度
    setComp   (m_CurrentImg, m_Comp);                                   // 修改对应对比度
    setChroma (m_CurrentImg, m_Chroma);                                 // 修改对应饱和度
    // 判断是否需要追踪视频，并进行处理
    if (m_VideoIsTrack == btnStatus::Open)
    {
        MyCamShift();
    }
    // 将Mat类型转换为QImage对象，并保存至m_CurrentPixmap中通过senderPixmap发送出去
    QImage tmpImg = QImage((const unsigned char*)m_CurrentImg.data, m_CurrentImg.cols, m_CurrentImg.rows, QImage::Format_RGB888).rgbSwapped();
    m_CurrentPixmap = QPixmap::fromImage(tmpImg);

    // 判断一下是否需要保存视频
    if(m_IsRecord == CameraStatus::Record) {
        m_Video.write(m_CurrentImg);
    }else if(m_IsRecord == CameraStatus::Save){
        m_Video.release();
        m_IsRecord = CameraStatus::Normal;
    }
    emit(senderPixmap(m_CurrentPixmap));
    });
}

bool CVCamera::setImgSetting(const double gamma, const double lum, const double comp, const double chroma, const int fps)
{
    DLOG("修改数值");
    {
        QMutexLocker locker(&m_Mutex);          // 原则使用锁保证主线程对数值修改时的线程安全（但是这里保护与否影响不大）
        if(gamma  != -1)  m_Gamma    = gamma;
        if(lum    != -1)  m_Lum      = lum;
        if(comp   != -1)  m_Comp     = comp;
        if(chroma != -1)  m_Chroma   = chroma;
        if(fps    != -1)  m_VideoFPS = fps;
    }
    return true;
}

void CVCamera::run()
{
    // 保证该线程不会释放（一旦该线程释放，后面摄像头获取等功能就全由主线程执行会卡顿）
    exec();
}

bool CVCamera::openCamera(int index)
{
    if(index < 0 || index > m_CameraS.size())
    {
         ELOG("传入摄像头索引范围错误");
         return false;
    }
    m_CurrentCameraIndex = index;
    m_apture->open(m_CurrentCameraIndex);

    if(!m_GetMat->isActive()) m_GetMat->start();
    return true;
}

bool CVCamera::setMatGamma(Mat &img, const double gamma)
{
    if(gamma < 0)
    {
        ELOG("伽马值范围错误!!!");
        return false;
    }
    Mat dst;
    Mat lookUpTable(1, 256, CV_8U);                                         // 初始化对应查找表
    uchar* p = lookUpTable.ptr();
    for(int i = 0; i < 256; i++)
    {
        p[i] = saturate_cast<uchar> (pow(i / 255.0, gamma) * 255.0);        // 相当于通过公式将查找表上的伽马值映射到一张表中，saturate_cast表示当()中取值范围超过或者低于<>中的范围时就使用<>的边界值作为值
    }
    LUT(img, lookUpTable, dst);                                             // 将查找表中伽马值与图像进行融合后保存至Mat对象det中
    img = dst;
    return true;
}

bool CVCamera::setLum(Mat &img, const double lum)
{
    img = img + cv::Scalar(lum, lum, lum);
    return true;
}

bool CVCamera::setComp(Mat &img, const double comp)
{
    img.convertTo(img, -1, comp, 0); // 应用对比度变换
    return true;
}

bool CVCamera::setChroma(Mat &img, const double chroma)
{
    // 转换到HSV颜色空间
        cv::Mat hsvImage;
        cv::cvtColor(img, hsvImage, cv::COLOR_BGR2HSV);

        // 调整饱和度
        for (int y = 0; y < hsvImage.rows; ++y)
        {
            for (int x = 0; x < hsvImage.cols; ++x)
            {
                hsvImage.at<cv::Vec3b>(y, x)[1] = cv::saturate_cast<uchar>(hsvImage.at<cv::Vec3b>(y, x)[1] + chroma);
            }
        }

        // 转换回BGR颜色空间
        cv::cvtColor(hsvImage, img, cv::COLOR_HSV2BGR);
        return true;
}

bool CVCamera::setExposureNum(int value)
{
    if(value < 10)
    {
        ELOG("曝光时长过低");
        return false;
    }
    m_Exposure = value;
    DLOG("曝光时长修改成功");
    return true;
}

bool CVCamera::setBufferNum(int value)
{
    DLOG("增益修改成功");
    return true;
}

void CVCamera::setRecordVideo(btnStatus btn)
{
    if(btn == btnStatus::Open){
        DLOG("开始录制视频");
        QString fileName = "/Video_" + QDateTime::currentDateTime().toString("yyyyMMddhhmmss") + ".avi";                        // 设置当前视频文件名称
        m_VideoImagePath = XMLFunc::xmlReadKeyValue(XMLFunc::ConfigXmlPath, XMLVideoImagePath);
        if(m_VideoImagePath.isEmpty()) m_VideoImagePath = QCoreApplication::applicationDirPath() + VideoImagePathDefaultPath;   // 如果从xml文件中获取对应Value失败则选择默认路径进行保存
        m_VideoImagePath += fileName;

        int frame_width = static_cast<int>(m_apture->get(CAP_PROP_FRAME_WIDTH));
        int frame_height = static_cast<int>(m_apture->get(CAP_PROP_FRAME_HEIGHT));
        m_Video.open(m_VideoImagePath.toStdString(), VideoWriter::fourcc('X', 'V', 'I', 'D'), 30, Size(frame_width, frame_height));
        m_IsRecord = CameraStatus::Record;
    }else {
        DLOG("结束视频录制");
        m_IsRecord = CameraStatus::Save;
    }
}

void CVCamera::GetImage()
{
    QString fileName = "/Image_" + QDateTime::currentDateTime().toString("yyyyMMddhhmmss") + ".jpg";                        // 设置当前视频文件名称
    m_VideoImagePath = XMLFunc::xmlReadKeyValue(XMLFunc::ConfigXmlPath, XMLVideoImagePath);
    if(m_VideoImagePath.isEmpty()) m_VideoImagePath = QCoreApplication::applicationDirPath() + VideoImagePathDefaultPath;   // 如果从xml文件中获取对应Value失败则选择默认路径进行保存
    m_VideoImagePath += fileName;
    // 这里当获取视频画面时间小于1s时会出现覆盖的情况出现，不过一般应该不会出现。
     if(cv::imwrite(m_VideoImagePath.toStdString(), m_CurrentImg)) // 使用 OpenCV 的 imwrite 函数保存图像
     {
        qDebug() << "保存图片成功";
     }
}

void CVCamera::OpenVideoTrack(bool isTrack)
{
    if (!isTrack)
    {
        m_VideoIsTrack = btnStatus::Close;
        return;
    }
    Mat blur, hsv, mask, hue, tmpPSDImage;
    m_CurrentImg.copyTo(tmpPSDImage);
    Rect2d first_img = selectROI("CAMShift", tmpPSDImage);
    m_Selection.x = first_img.x;
    m_Selection.y = first_img.y;
    m_Selection.width = first_img.width;
    m_Selection.height = first_img.height;
    destroyWindow("CAMShift");
    qDebug() << "ROI的x值为:		" << m_Selection.x << endl;
    qDebug() << "ROI的y值为:		" << m_Selection.y << endl;
    qDebug() << "ROI的width值为:	" << m_Selection.width << endl;
    qDebug() << "ROI的height值为:	" << m_Selection.height << endl;

    {
        // 启用高斯滤波进行去噪(摄像机在夜晚时会产生比较多的高斯噪声)
        GaussianBlur(tmpPSDImage, blur, Size(5, 5), 3, 3);
        // 将图片由bgr转换为hsv（因为在opencv中都是以hsv格式进行处理）
        cvtColor(blur, hsv, COLOR_BGR2HSV);
        // 去除掉比较暗的像素
        inRange(hsv, Scalar(0, 30, 40), Scalar(180, 255, 255), mask);
        // 创建一个和hsv一样大小且深度一致的图片
        hue = Mat(hsv.size(), hsv.depth());
        // fromto映射，指定被赋值通道与赋值到的位置组成的索引对
        int channels[] = { 0, 0 };
        // 指定被赋值通道与要赋值的位置channels[]组成的索引对
        size_t npairs = 1;
        // 输入矩阵通道，重新排列到输出矩阵通道(HSV中的H值)将hsv图片以灰度的形式打开
        mixChannels(&hsv, 1, &hue, 1, channels, npairs);
    }
    // 计算ROI直方图
    Mat ROI(hue, m_Selection);
    Mat MaskROI(mask, m_Selection);
    // 计算直方图
    int histSize = 16;
    // 统计当前图像中每个像素值或者颜色值的频率分布，大小为MaskROI的大小， 输出为m_Histogram;
    calcHist(&ROI, 1, 0, MaskROI, m_Histogram, 1, &histSize, &ranges);
    if (m_Histogram.empty())
    {
        ELOG("m_Histogram直方图为空");
        return;
    }
    // 归一化
    normalize(m_Histogram, m_Histogram, 0, 255, NORM_MINMAX);
    // 所有工作做完毕没有问题，此时才开启追踪功能
    m_VideoIsTrack = btnStatus::Open;
}

void CVCamera::MyCamShift()
{
    Mat blur, hsv, mask, hue, BackProjection;
    // 启用高斯滤波进行去噪
    GaussianBlur(m_CurrentImg, blur, Size(5, 5), 3, 3);
    // 转换到hsv空间
    cvtColor(blur, hsv, COLOR_BGR2HSV);
    inRange(hsv, Scalar(0, 30, 40), Scalar(180, 255, 255), mask);
    hue = Mat(hsv.size(), hsv.depth());
    int channels[] = { 0, 0 };
    size_t npairs = 1;
    mixChannels(&hsv, 1, &hue, 1, channels, npairs);
    // 将输入模板图像指定颜色直方图的像素位置投影到输出图像中， 从而可以通过输出图像快速找到图像中与特定颜色分布相似的区域
    calcBackProject(&hue, 1, 0, m_Histogram, BackProjection, &ranges);
    BackProjection &= mask;

    // CAMShift处理
    RotatedRect trackBox = CamShift(
        BackProjection,
        m_Selection,
        TermCriteria((TermCriteria::COUNT | TermCriteria::EPS), 10, 1));

    // 绘制图案
    ellipse(m_CurrentImg, trackBox, Scalar(255, 0, 255), 3, 8);
    std::cout << "x: " << trackBox.center.x << " y: " << trackBox.center.y << std::endl;
}

