#include <cstdio>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>

#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

#include <opencv2/videoio/videoio.hpp>
#include <opencv2/video.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#include <X11/Xlib.h> // auto get screen size
#include <gst/gst.h>
#include <iostream>
#include <gst/app/app.h>
#include <gst/gstbuffer.h>
#include "ini.h"
#include "queue.hpp"
#define HDMI_DEVICE "hw:1,0" 


using namespace std;
using namespace cv;

constexpr int max_channel   = 4;  // AHD camera channel
constexpr int max_img_count = 16; // Frame pool count.

/**
 * create memory pool for images gotten from camera.
 */
Queue<cv::Mat*> _idleimgbuf[max_channel + 1];
Queue<cv::Mat*> _imgdata[max_channel + 1];

Queue<short*> _idleaudbuf[max_channel + 1];  // 空闲音频缓冲区

Queue<short*> _auddata[max_channel + 1];


/**
 * Query the display screen size.
 */
void getScreenResolution(int &width, int &height) {
    Display *disp = XOpenDisplay(NULL);
    Screen *scrn = DefaultScreenOfDisplay(disp);
    width = scrn->width;
    height = scrn->height;
}

/**
 * dump configure ini parameters.
 */
void outputData(mINI::INIStructure const &ini) {
    for (auto const &it : ini) {
        auto const &section = it.first;
        auto const &collection = it.second;
        std::cout << "[" << section << "]" << std::endl;
        for (auto const &it2 : collection) {
            auto const &key = it2.first;
            auto const &value = it2.second;
            std::cout << key << "=" << value << std::endl;
        }
        std::cout << std::endl;
    }
}

/**
 * data structure for camera settings.
 */
typedef struct _CAM_CFG {
    int id;          // camera id: /dev/video0...videox
    bool isenabled;  // camera status: enable/disable.
    string aid;
} CAM_CFG, *PCAM_CFG;


int width = 1920, height = 1080;

/**
 * camera thread, got the camera stream over gstreamer.
 */
static int camera_thread(bool& isrunning, int channel, int cam_id, string cam_aid) {
    //std::cout << __func__ << ", channel = " << channel << ", id = " << cam_id << std::endl;
    /**
     * load image from camera
     */
    // std::string capfmt = "v4l2src device=/dev/video" 
    //                       + std::to_string(cam_id)
    //                       + " ! video/x-raw,format=NV12,width="
    //                       + std::to_string(width) + ",height="
    //                       + std::to_string(height)
    //                       + ",framerate=30/1 ! videoconvert ! video/x-raw,format=BGR ! appsink";

    // cv::VideoCapture cap(capfmt, cv::CAP_GSTREAMER);
    // std::string capfmt = "v4l2src device=/dev/video" 
    //                       + std::to_string(cam_id)   // 使用 cam_id 替代 /dev/videoXX
    //                       + " ! video/x-raw, format=BGR, width=" // 设置 BGR 格式
    //                       + std::to_string(width) + ",height="   // 设置图像宽高
    //                       + std::to_string(height) 
    //                       + ",framerate=30/1 ! videoconvert ! appsink";
    
    //cv::VideoCapture cap(capfmt);//传入字符串格式数据，并指定使用gstreamer后端处理视频数据，即通过opencv-gst来读视频
    std::string capfmt= "v4l2src device=/dev/video" + std::to_string(cam_id) + " ! video/x-raw, format=BGR, width=" + std::to_string(width) + ", height=" + std::to_string(height) + ", framerate=30/1 ! videoconvert ! appsink";
    // cv::VideoCapture cap("v4l2src device=/dev/video20 ! video/x-raw, format=BGR, width=1920, height=1080, framerate=30/1 ! videoconvert ! appsink");
    cv::VideoCapture cap(capfmt);
    if (!cap.isOpened()) {
        std::cout << "please open camera first!" << std::endl;
        return -1;
    }






    int argc = 0; // 如果你不需要命令行参数
    char** argv = nullptr;
 std::cout << "nihao1" << std::endl;
    gst_init(&argc, &argv); // 初始化 GStreamer

    // 创建管道
//    GstElement *pipeline = gst_parse_launch(
//     "alsasrc device=hw"+ std:: to_string(cam_aid) + " ! audioconvert ! audioresample ! appsink name=appsink sync=false", 
//    nullptr);

    std::string pipeline_desc = "alsasrc device=hw:" + cam_aid + " ! audioconvert ! audioresample ! appsink name=appsink sync=false";
    GstElement* pipeline = gst_parse_launch(pipeline_desc.c_str(), nullptr);




        //  printf("cam_aid resolution: %d\n", cam_aid);
    if (!pipeline) {
        std::cerr << "Failed to create pipeline" << std::endl;
        return -1;
    }

   // 获取 appsink 元素
    GstElement *appsink = gst_bin_get_by_name(GST_BIN(pipeline), "appsink");

    gst_element_set_state(pipeline, GST_STATE_PAUSED);         // 设置管道为 READY 状态，表示管道准备好但不开始播放
    //gst_element_set_state(pipeline, GST_STATE_PLAYING);
    std::cout << "Pipeline state set to READY (not playing yet)" << std::endl; 

    // 用来存储音频数据的缓冲区
    std::vector<uint8_t> audio_data;

    // 读取音频数据
    GstSample *sample = nullptr;
    while (isrunning) {
    // 从 appsink 拉取一个样本
    sample = gst_app_sink_pull_sample(GST_APP_SINK(appsink));
    if (sample) {
        GstBuffer *buffer = gst_sample_get_buffer(sample); // 获取音频数据缓冲区
        if (buffer) {
            // 获取缓冲区的数据和大小
            GstMapInfo map_info;
            if (gst_buffer_map(buffer, &map_info, GST_MAP_READ)) {
                // 在映射成功后，获取数据和大小
                gpointer data = map_info.data;
                gsize size = map_info.size;
                
                // 插入音频数据到 audio_data
                audio_data.insert(audio_data.end(), (uint8_t*)data, (uint8_t*)data + size);

                // 输出存储的音频数据大小
                std::cout << "Stored " << size << " bytes of audio data" << std::endl;

                // 完成操作后，取消映射
                gst_buffer_unmap(buffer, &map_info);
            } else {
                // 映射失败
                std::cerr << "Failed to map GstBuffer" << std::endl;
            }

            // 释放样本
            gst_sample_unref(sample);
        } else {
            std::cerr << "Failed to get buffer from sample" << std::endl;
        }
    } else {
        std::cerr << "Failed to pull sample from appsink" << std::endl;
    }
 std::cout << "nihao2" << std::endl;
    // 你可以添加条件来停止循环，或者让它一直运行
    // 例如，检测播放时间或停止信号
}













    cv::Mat first;
    cap >> first; // skip the first one.
    cap >> first;

    for (int i = 0; i < max_img_count; i++) {
        _idleimgbuf[channel].push(new cv::Mat(first));
    }

    for (int i = 0; i < max_img_count; i++) {
    // 将 audio_data 的内容转换为 short*，音频数据通常是 16-bit（2 字节）的短整型
    short* audio_buffer = new short[audio_data.size() / sizeof(short)];
    
    // 将 audio_data 复制到 audio_buffer 中
    std::memcpy(audio_buffer, audio_data.data(), audio_data.size());
    
    // 将转换后的音频缓冲区压入队列
    _idleaudbuf[channel].push(audio_buffer);
   }
    int index = 0;
    while (isrunning) {
        cv::Mat *pimg = NULL;
        short *paud = NULL;
        _idleimgbuf[channel].pop(pimg);
        if (NULL != pimg) {
            cap >> *pimg;
            if (pimg->empty()) {
                _idleimgbuf[channel].push(pimg);
                std::cout << "capture image is empty, channel = " << channel << std::endl;
            } else {
                _imgdata[channel].push(pimg);
            }
        } else {
            /**
             * wait a moment to avoid consume high CPU performance.
             */
            std::cout << "no buffer" << std::endl;
            usleep(100);
            continue;
        }
    // 处理音频数据   
        _idleaudbuf[channel].pop(paud);  // 从空闲音频缓冲区中获取指针
        if (paud != NULL) {
             paud = reinterpret_cast<short*>(audio_data.data());
            if (audio_data.empty()) {
                _idleaudbuf[channel].push(paud);
                std::cout << "capture audio is empty, channel = " << channel << std::endl;
            } else {
                _auddata[channel].push(paud);
            }
        } else {
            std::cout << "no audio buffer" << std::endl;
            usleep(100);  
            continue;
        }










    } // end while loop
    //std::cout << "it's going to out, channel = " << channel << std::endl;
    cap.release();
  std::cout << "nihao3" << std::endl;
    std::cout << "thead exit, camer id = " << cam_id << ", channel id = " << channel << std::endl;
    return 0;
   
}





void play_audio_from_queue_through_hdmi() {
    // 初始化 GStreamer
    gst_init(nullptr, nullptr);

    // 创建一个将音频数据播放到 HDMI 的管道
    std::string pipe_desc = "appsrc ! audioconvert ! audioresample ! alsasink device=" + std::string(HDMI_DEVICE);

    // 创建管道
    GstElement *pipeline = gst_parse_launch(pipe_desc.c_str(), nullptr);
    if (!pipeline) {
        std::cerr << "Failed to create pipeline" << std::endl;
        return;
    }

    // 获取 appsrc 元素，用于将音频数据输入管道
    GstElement *appsrc = gst_bin_get_by_name(GST_BIN(pipeline), "appsrc");
    if (!appsrc) {
        std::cerr << "Failed to get appsrc element" << std::endl;
        return;
    }

    // 设置 appsrc 元素的属性：设置为非阻塞模式
    g_object_set(appsrc, "is-live", TRUE, nullptr);
    g_object_set(appsrc, "format", GST_FORMAT_TIME, nullptr);

    // 启动管道
    gst_element_set_state(pipeline, GST_STATE_PLAYING);

    // 临时音频数据存储
    std::vector<uint8_t> audioplay_data;
    
// 创建 GstBuffer，填充音频数据
    GstBuffer* buffer = gst_buffer_new_and_alloc(audioplay_data.size());
    gst_buffer_fill(buffer, 0, audioplay_data.data(), audioplay_data.size());

    // 推送 GstBuffer 到 appsrc
    GstFlowReturn ret = gst_app_src_push_buffer(GST_APP_SRC(appsrc), buffer);
    if (ret != GST_FLOW_OK) {
        std::cerr << "Failed to push buffer into appsrc" << std::endl;
    }
      std::cout << "nihao4" << std::endl;
    usleep(100000); // 暂停 100ms，模拟数据处理过程
    // 停止管道并清理资源
    gst_element_set_state(pipeline, GST_STATE_NULL);
    gst_object_unref(GST_OBJECT(pipeline));
}

int main(int argc, char **argv) {


    if (argc != 2) {
        printf("Usage:%s ahdcam configfile, for example: ahdcam sample.ini\n", argv[0]);
        return -1;
    }

    /**
     * query screen size.
     */
    int scrn_width, scrn_height;
    getScreenResolution(scrn_width, scrn_height);
    printf("Screen resolution: %dx%d\n", scrn_width, scrn_height);
    printf("Screen3 resolution: %dx%d\n", width, height);
    // if (scrn_width < 3840) {
    //     width = scrn_width / 2;
    //     height = 1080;
    // }

    // if (scrn_height < 1080) {
    //     height = scrn_height / 3;
    //     width = height * 640 / 480;
    // }

    /**
     * read camera configuration from ini file.
     */
     printf("nihaos"); 
    auto const &filename = argv[1];
    mINI::INIFile file(filename);
    mINI::INIStructure ini;
    if (file.read(ini) != true) {
        std::cout << "read camera configuration file failed!" << std::endl;
        return -1;
    }

    //std::cout << filename << std::endl;
    //outputData(ini);
    int camera_id[max_channel + 1];
std::cout << "nihao6" << std::endl;
    
    CAM_CFG cam_cfg[max_channel + 1] = { 0 };
    for (int i = 0; i < max_channel; i++) {
        std::string section = "camera" + std::to_string(i + 1);
        if (ini[section.c_str()]["enable"] == "1") {
            cam_cfg[i].isenabled = true;

        } else {
            cam_cfg[i].isenabled = false;
        }

        cam_cfg[i].id = std::stoi(ini[section.c_str()]["id"]);
        cam_cfg[i].aid = ini[section.c_str()]["aid"];
        //std::cout << section << " enabled = " << cam_cfg[i].isenabled
        //      << ", id = " << cam_cfg[i].id << std::endl;
    }

    /**
     * make sure there are AHD cameras.
     */
    cv::Mat cfg;
std::cout << "nihao7" << std::endl;
    for (int i = 0; i < max_channel; i++) {
        if (cam_cfg[i].isenabled) {
            // std::string capfmt = "v4l2src device=/dev/video" 
            //                      + std::to_string(cam_cfg[i].id)
            //                      + " ! video/x-raw,format=NV12,width="
            //                      + std::to_string(width) + ",height="
            //                      + std::to_string(height)
            //                      + ",framerate=30/1 ! videoconvert ! video/x-raw,format=BGR ! appsink";
            // //cout << "cammand line = " << capfmt << std::endl;
            // cv::VideoCapture cap(capfmt, cv::CAP_GSTREAMER);
            //  std::string capfmt = "v4l2src device=/dev/video" 
            //               + std::to_string(cam_cfg[i].id)   // 使用 cam_id 替代 /dev/videoXX
            //               + " ! video/x-raw, format=BGR, width=" // 设置 BGR 格式
            //               + std::to_string(width) + ",height="   // 设置图像宽高
            //               + std::to_string(height) 
            //               + ",framerate=30/1 ! videoconvert ! appsink";
            std::string capfmt="v4l2src device=/dev/video" + std::to_string(cam_cfg[i].id) + " ! video/x-raw, format=BGR, width=" + std::to_string(width) + ", height=" + std::to_string(height) + ", framerate=30/1 ! videoconvert ! appsink";
            cv::VideoCapture cap(capfmt);//传入字符串格式数据，并指定使用gstreamer后端处理视频数据，即通过opencv-gst来读视频
            //cv::VideoCapture cap("v4l2src device=/dev/video20 ! video/x-raw, format=BGR, width=1920, height=1080, framerate=30/1 ! videoconvert ! appsink");
             std::cout << capfmt<< std::endl;
          
            //cv::VideoCapture cap("v4l2src device=/dev/video20 ! video/x-raw, format=BGR, width=1920, height=1080, framerate=30/1 ! videoconvert ! appsink");

            if (!cap.isOpened()) {
                std::cout << "open camera device=/dev/video"
                          << std::to_string(cam_cfg[i].id) << " failed!"
                          << std::endl;
            } else {
                cap >> cfg; // skip the first frame
                cap >> cfg;
                cap.release();
                //cout << "close this camera" << std::endl;
                break;
            }
        }
    }

    if (cfg.empty()) {
        std::cout << "Query camera parameters failed!" << std::endl;
        return -1;
    }
  std::cout << "nihao5" << std::endl;
    /**
     * create the dedicated thread to capture image from camera.
     */
    bool isrunning = true;
    std::thread *pthrd[max_channel] = { 0 };
    for (int i = 0; i < max_channel; i++) {
        if (cam_cfg[i].isenabled) {
            pthrd[i] = new std::thread(&camera_thread, std::ref(isrunning), i, cam_cfg[i].id, cam_cfg[i].aid);
        }
    }

    /**
     * combine each channel image into this one window.
     */
    //cv::Mat showimg[max_channel + 1];
    //cv::Mat tmpimg;
    std::string title = "camera demoe";
    namedWindow(title);
    cv::moveWindow(title, 0, 0); 
    Mat blankimg(height / 2, width  , CV_8UC3, Scalar(255, 255, 255));
    printf("Screen2 resolution: %dx%d\n", width, height);


    int row = 0, col = 0;
    Mat showimgs[9];
    Mat extrow[3];
    vector < Mat > rowimgs;
    //std::cout << "Enter the main loop!" << endl;
    while (true) {
        rowimgs.clear();        
        for (int row = 0; row < 2; row++) {
            //std::cout << "row = " << row << std::endl;
            vector < Mat > colimgs;
            for (int col = 0; col < 2; col++) {
                if (cam_cfg[col + row * 2].isenabled) {
                    cv::Mat *pimg = nullptr;
                    //std::cout << "row = " << row << ", col = " << col << std::endl;
                    _imgdata[col + row * 2].pop(pimg);
                    if (nullptr == pimg) {
                        usleep(100);
                        continue;
                    }
                    showimgs[col + row * 2] = pimg->clone();
                    _idleimgbuf[col + row * 2].push(pimg);
                    cv::resize(showimgs[col + row * 2], showimgs[col + row * 2], cv::Size(width , height / 2));
                    colimgs.push_back(showimgs[col + row * 2]);  //当前行的每一列的数据
                    int channel = col + row * 2;
                    //while (!_auddata[channel].empty()) {
                    std::vector<uint8_t> audioplay_data;
                    //short* paud = _auddata[channel].front();
                    short* paud = nullptr;
                    _auddata[channel].pop(paud);
                    if (paud = nullptr)  {
                        usleep(100);
                        continue;
                    }
                    audioplay_data.clear();  // 清空之前的音频数据
                    size_t data_size = sizeof(short) * 1024;  // 假设每次处理 1024 个采样点
                    audioplay_data.reserve(data_size);

                    // 将音频数据转换为 8 位 uint8_t
                    for (size_t i = 0; i < 1024; ++i) {
                        audioplay_data.push_back((uint8_t)(paud[i] & 0xFF));      // 获取低 8 位
                        audioplay_data.push_back((uint8_t)((paud[i] >> 8) & 0xFF)); // 获取高 8 位
                    }

                    
                   // }            

                    //cv::resize(showimg[i], imgRoi[i], imgRoi[i].size());
                } else {
                    //std::cout << "push blank image" << std::endl;
                    //showimgs[col + row * 3] = blankimg.clone();
                    colimgs.push_back(blankimg);
                }
            }
            //std::cout << "row = " << row << std::endl;
            hconcat(colimgs, extrow[row]);
            rowimgs.push_back(extrow[row]);
        }

        Mat result;
        vconcat(rowimgs, result);
        imshow(title, result);
        play_audio_from_queue_through_hdmi();
       

        /**
         * exit when user close the window or any key pressed.
         */
        if (waitKey(30) >= 0) {
            //std::cout << "got exit key" << std::endl;
            isrunning = false;
            break;
        }

        if (cv::getWindowProperty(title, cv::WND_PROP_AUTOSIZE) == -1) {
            std::cout << "user close this window" << std::endl;
            isrunning = false;
            break;
         }

    }

    /**
     * wait a moment, so the camera threads was completed.
     */
    usleep(3000);
    for (int i = 0; i < max_channel; i++) {
        if (cam_cfg[i].isenabled) {
            pthrd[i]->join();
			delete pthrd[i];
        }
    }

    /**
     * free the queue buffer.
     */
    for (int i = 0; i < max_channel; i++) {
        //std::cout << "free resource, channel = " << i + 1 << std::endl;
        while (_idleimgbuf[i].size() > 0) {
            cv::Mat *pmat = NULL;
            _idleimgbuf[i].pop(pmat);
            if (NULL != pmat) {
                delete pmat;
            }
        }

        while (_imgdata[i].size() > 0) {
            cv::Mat *pmat = NULL;
            _imgdata[i].pop(pmat);
            if (NULL != pmat) {
                delete pmat;
            }
        }
    }
    

for (int i = 0; i < max_channel; i++) {
    // 释放 _idleaudbuf 队列中的音频缓冲区
    while (_idleaudbuf[i].size() > 0) {
        short* paud = _idleaudbuf[i].pop();  // 从队列中弹出音频缓冲区
        if (paud != nullptr) {
            delete[] paud;  // 释放动态分配的音频缓冲区
        }
    }

    // 释放 _auddata 队列中的音频缓冲区
    while (_auddata[i].size() > 0) {
        short* paud = _auddata[i].pop();  // 从队列中弹出音频缓冲区
        if (paud != nullptr) {
            delete[] paud;  // 释放动态分配的音频缓冲区
        }
    }
}

    std::cout << "Bye!" << std::endl;
    return 0;
}



