#include <gst/gst.h>
#include <iostream>
#include <string>
#include <stdexcept>
#include <thread>
#include <chrono>
#include <functional>
#include <queue>
#include <mutex>
#include <condition_variable>



#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <string>
struct VideoConfig {
    std::string device;
    std::string format;
    int width;
    int height;
    int framerate;
};

VideoConfig g_Config;

#include <opencv2/opencv.hpp>
cv::VideoWriter *g_VideoWrite = nullptr;


class VideoCapture {
public:
    using FrameCallback = std::function<void(GstSample*)>;

    // 构造函数：初始化 VideoCapture 对象并设置 GStreamer 的配置
    VideoCapture(const FrameCallback& callback,
                const std::string& device = "/dev/video0",
                const std::string& format = "NV12",
                int width = 1920, int height = 1080, int framerate = 30)
	    : frame_callback_(callback),device_(device),
        format_(format), width_(width), height_(height), framerate_(framerate),
        main_loop_(nullptr), pipeline_(nullptr){

	    gst_init(nullptr, nullptr);
        std::cout << "GStreamer initialized." << std::endl;

        // 创建 GStreamer 管道和元素
        createPipeline();
        createElements();
        setElementProperties();
        addElementsToPipeline();
        linkElements();

         // 创建用于处理视频帧的线程
        processing_thread_ = std::thread([this]() {
            process_frames();
        });
    }

    // 析构函数：清理资源
    ~VideoCapture() {
        // 通知处理线程退出
        exit_processing_thread_ = true;
        frame_queue_cond_.notify_one();
        if (processing_thread_.joinable()) {
            processing_thread_.join();
        }
        // 清理 GStreamer 资源
        if (pipeline_) {
            gst_element_set_state(pipeline_, GST_STATE_NULL);
            gst_object_unref(pipeline_);
        }

        if (main_loop_) {
            g_main_loop_unref(main_loop_);
        }

        std::cout << "Pipeline stopped and resources freed." << std::endl;
    }

    // 启动视频捕获
    void start() {
        if (!main_loop_) {
            main_loop_ = g_main_loop_new(NULL, FALSE);
        }
        gst_element_set_state(pipeline_, GST_STATE_PLAYING);
        std::cout << "Pipeline started." << std::endl;

        // 在新线程中运行 GMainLoop
        main_loop_thread_ = std::thread([this](){
			g_main_loop_run(main_loop_);
	});
    }

     // 停止视频捕获
    void stop() {
        if (main_loop_) {
            g_main_loop_quit(main_loop_);

	    if(main_loop_thread_.joinable()){
	    	main_loop_thread_.join();
	    }
        }
        gst_element_set_state(pipeline_, GST_STATE_READY);
        std::cout << "Pipeline stopped." << std::endl;

    }

private:
    FrameCallback frame_callback_;
    std::thread main_loop_thread_;
    std::string device_;
    std::string format_;
    int width_;
    int height_;
    int framerate_;
    GMainLoop *main_loop_;
    GstElement *pipeline_, *source_, *capsfilter_, *sink_;

    std::queue<GstSample*> frame_queue_;
    std::mutex frame_queue_mutex_;
    std::condition_variable frame_queue_cond_;
    std::thread processing_thread_;
    bool exit_processing_thread_ = false;
    
    // 处理视频帧的线程函数
    void process_frames() {
        while (true) {
            std::unique_lock<std::mutex> lock(frame_queue_mutex_);
            frame_queue_cond_.wait(lock, [this]() {
                return !frame_queue_.empty() || exit_processing_thread_;
            });

            if (exit_processing_thread_ && frame_queue_.empty()) {
                break;
            }

            auto sample = frame_queue_.front();
            frame_queue_.pop();
            lock.unlock();

            frame_callback_(sample);
            gst_sample_unref(sample);
        }
    }

    // GStreamer 回调函数，当有新的视频帧时被调用
    static GstFlowReturn new_sample_handler(GstElement* sink, VideoCapture* vc) {
        GstSample* sample = nullptr;
        g_signal_emit_by_name(sink, "pull-sample", &sample);
        if (sample) {
            std::unique_lock<std::mutex> lock(vc->frame_queue_mutex_);
            vc->frame_queue_.push(sample);
            vc->frame_queue_cond_.notify_one();
        }
        return GST_FLOW_OK;
    }

     // 创建 GStreamer 管道
    void createPipeline() {
        pipeline_ = gst_pipeline_new("video-capture-pipeline");
        if (!pipeline_) throw std::runtime_error("Failed to create pipeline.");
    }

     // 创建 GStreamer 元素
    void createElements() {
        source_ = gst_element_factory_make("v4l2src", "source");
        if (!source_) throw std::runtime_error("Failed to create source element.");

        capsfilter_ = gst_element_factory_make("capsfilter", "filter");
        if (!capsfilter_) throw std::runtime_error("Failed to create capsfilter element.");

	    sink_ = gst_element_factory_make("appsink","sink");
        if (!sink_) throw std::runtime_error("Failed to create sink element.");
    }

    void setElementProperties() {
	    int white_balance_temperature = 4000;
	    g_object_set(G_OBJECT(source_),"white-balance-temperature",white_balance_temperature,NULL);

        g_object_set(G_OBJECT(source_), "device", device_.c_str(), NULL);

        // 创建并设置视频格式的 GstCaps
        GstCaps *caps = gst_caps_new_simple("video/x-raw",
                                            "format", G_TYPE_STRING, format_.c_str(),
                                            "width", G_TYPE_INT, width_,
                                            "height", G_TYPE_INT, height_,
                                            "framerate", GST_TYPE_FRACTION, framerate_, 1,
                                            NULL);
        g_object_set(G_OBJECT(capsfilter_), "caps", caps, NULL);
        gst_caps_unref(caps);

        //g_object_set(G_OBJECT(sink_), "location", output_file_.c_str(), NULL);
        g_object_set(G_OBJECT(sink_),"emit-signals",TRUE,"sync",FALSE,NULL);
        g_signal_connect(sink_, "new-sample", G_CALLBACK(new_sample_handler), this);
    }

    // 将元素添加到 GStreamer 管道
    void addElementsToPipeline() {
        gst_bin_add_many(GST_BIN(pipeline_), source_, capsfilter_, sink_, NULL);
    }

    // 连接 GStreamer 元素
    void linkElements() {
        if (!gst_element_link_many(source_, capsfilter_, sink_, NULL)) {
            throw std::runtime_error("Elements could not be linked.");
        }
    }
};




VideoConfig readConfig(const std::string& filename) {
    boost::property_tree::ptree pt;
    boost::property_tree::ini_parser::read_ini(filename, pt);

    VideoConfig config;
    config.device = pt.get<std::string>("VideoCapture.Device", "/dev/video0");
    config.format = pt.get<std::string>("VideoCapture.Format", "NV12");
    config.width = pt.get<int>("VideoCapture.Width", 1920);
    config.height = pt.get<int>("VideoCapture.Height", 1080);
    config.framerate = pt.get<int>("VideoCapture.Framerate", 30);

    return config;
}

#define ENABLE_SIMPLE_READ 0
#define ENABLE_SIMPLE_OPENCV 1

#if ENABLE_ONLY_SAVE_STREAM
#include <fstream>
std::ofstream video_file("output.nv12",std::ios::binary);
//每一帧回调的数据都放到这里，此处例程是将收到的数据存储起来放到output.nv12上
void process_frame(GstSample* sample)
{
	if(!sample) return;

	GstBuffer* buffer = gst_sample_get_buffer(sample);
	GstMapInfo map;
	if(gst_buffer_map(buffer, &map, GST_MAP_READ)){
		video_file.write(reinterpret_cast<char*>(map.data),map.size);
		gst_buffer_unmap(buffer,&map);		
	}
}
#endif

#if ENABLE_SIMPLE_READ
//此处的例程是仅仅打印收到的每一帧的数据size，可以在这里做个抓拍只存一帧图
void process_frame(GstSample* sample)
{
	if(!sample) return;

	GstBuffer* buffer = gst_sample_get_buffer(sample);
	GstMapInfo map;
	if(gst_buffer_map(buffer, &map, GST_MAP_READ)){
        //在这里加入你的处理，如果存视频就参考上面的 ENABLE_ONLY_SAVE_STREAM 
        //如果存单张，那就加个计数器，存1个buffer就行
        std::cout << "Got a frame,size = " << map.size << std::endl;
		gst_buffer_unmap(buffer,&map);		
	}
}
#endif

#if ENABLE_SIMPLE_OPENCV

void ConvertNV12ToJPEG(const uchar* nv12Data, int width, int height, const std::string& jpegFilename) {
    // 创建 NV12 图像的 Mat 对象
    cv::Mat nv12Img(height + height / 2, width, CV_8UC1, const_cast<uchar*>(nv12Data));

    // 创建 BGR 图像的 Mat 对象
    cv::Mat bgrImg;
    
    // 将 NV12 转换为 BGR
    cv::cvtColor(nv12Img, bgrImg, cv::COLOR_YUV2RGB_NV12);

    // 保存为 JPEG
    cv::imwrite(jpegFilename, bgrImg);
}


void ConvertFrameToMJPEG(const uchar* nv12Data, int width, int height, cv::VideoWriter& videoWriter) {
    // 创建 NV12 图像的 Mat 对象
    cv::Mat nv12Img(height + height / 2, width, CV_8UC1, const_cast<uchar*>(nv12Data));

    // 创建 BGR 图像的 Mat 对象
    cv::Mat bgrImg;

    // 将 NV12 转换为 BGR
    cv::cvtColor(nv12Img, bgrImg, cv::COLOR_YUV2BGR_NV12);

    // 将转换后的帧写入视频写入器
    videoWriter.write(bgrImg);
}

//此处的例程是仅仅打印收到的每一帧的数据size，可以在这里做个抓拍只存一帧图
void process_frame(GstSample* sample)
{
    int width = g_Config.width;
    int height = g_Config.height;
    static int iNum = 0;

	if(!sample) return;

	GstBuffer* buffer = gst_sample_get_buffer(sample);
	GstMapInfo map;
	if(gst_buffer_map(buffer, &map, GST_MAP_READ)){
        //在这里加入你的处理，如果存视频就参考上面的 ENABLE_ONLY_SAVE_STREAM 
        //如果存单张，那就加个计数器，存1个buffer就行
        //std::cout << "Got a frame,size = " << map.size << std::endl;
        unsigned char* nv12_data = reinterpret_cast<unsigned char*>(map.data);
        if(iNum == 0)
        {
            iNum++;
            
            // // 创建Y分量的Mat
            // cv::Mat y_mat(height, width, CV_8UC1, nv12_data);
            // // 创建UV分量的Mat
            // cv::Mat uv_mat(height / 2, width / 2, CV_8UC2, nv12_data + width * height);
            // // 创建用于存放BGR图像的Mat
            // cv::Mat bgr_mat;
            // // 将NV12转换为BGR格式
            // cv::cvtColorTwoPlane(y_mat, uv_mat, bgr_mat, cv::COLOR_YUV2BGR_NV12);

            // cv::imwrite("output.jpg", bgr_mat);

            std::cout << "output.jpg" << std::endl;

            // 转换并保存为 JPEG
            ConvertNV12ToJPEG(nv12_data, width, height, "output.jpeg");
        }

        // 创建 NV12 图像的 Mat 对象
        cv::Mat nv12Img(height + height / 2, width, CV_8UC1, nv12_data);

        // 创建 BGR 图像的 Mat 对象
        cv::Mat bgrImg;

        // 将 NV12 转换为 BGR
        cv::cvtColor(nv12Img, bgrImg, cv::COLOR_YUV2BGR_NV12);

        // 将转换后的帧写入视频
        g_VideoWrite->write(bgrImg);
        
		gst_buffer_unmap(buffer,&map);		
	}
}
#endif


int main() {
    try {

        g_Config = readConfig("config.ini");

         // 初始化视频写入器，使用 MJPG 编码器，保存为 AVI 格式
        cv::VideoWriter videoWriter("output.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), 30, cv::Size(g_Config.width, g_Config.height));

        g_VideoWrite = &videoWriter;

        if (!videoWriter.isOpened()) {
            std::cerr << "Failed to open video writer" << std::endl;
            return -1;
        }
        
        VideoCapture capture(process_frame, g_Config.device, g_Config.format, 
                             g_Config.width, g_Config.height, g_Config.framerate);

        capture.start();

        // 添加逻辑以在适当的时候调用 capture.stop();
        std::this_thread::sleep_for(std::chrono::seconds(10));

        // 停止捕获
        capture.stop();

        videoWriter.release();

        g_VideoWrite = nullptr;

    } catch (const std::runtime_error& e) {
        std::cerr << "Exception: " << e.what() << std::endl;
        return -1;
    }
    return 0;
}

