#include "v4l2_video.h"

#include <cstring>
#include <fcntl.h>
#include <iostream>
#include <unistd.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#include <sys/mman.h>

/*
* 1. 首先是打开摄像头设备；
* 2. 查询设备的属性或功能；
* 3. 设置设备的参数，譬如像素格式、 帧大小、 帧率；
* 4. 申请帧缓冲、 内存映射；
* 5. 帧缓冲入队；
* 6. 开启视频采集；
* 7. 帧缓冲出队、对采集的数据进行处理；
* 8. 处理完后，再次将帧缓冲入队，往复；
* 9. 结束采集。
 */
CV4L2Video::CV4L2Video(const char* device_name)
    : device_name_(device_name), dev_fd_(-1)
{
}

CV4L2Video::~CV4L2Video()
= default;

bool CV4L2Video::open_video()
{
    dev_fd_ = ::open(device_name_.c_str(), O_RDWR);
    return dev_fd_ != -1;
}

void CV4L2Video::close_video()
{
    if (dev_fd_ > 2) close(dev_fd_);
    v4l2_free_buffer();
}

bool CV4L2Video::is_capture() const
{
    struct v4l2_capability type{};
    ioctl(dev_fd_, VIDIOC_QUERYCAP, &type);
    return (V4L2_CAP_VIDEO_CAPTURE & type.capabilities);
}

void CV4L2Video::enum_video_pixel_format(std::vector<std::string>& fmt) const
{
    struct v4l2_fmtdesc fmt_desc{};
    fmt_desc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt_desc.index = 0;
    while (ioctl(dev_fd_, VIDIOC_ENUM_FMT, &fmt_desc) == 0)
    {
        fmt.emplace_back(reinterpret_cast<char*>(fmt_desc.description));
        fmt_desc.index++;
    }
}

void CV4L2Video::enum_video_frame_size(std::vector<Resolution>& fm_size) const
{
    std::vector<uint32_t> pixel;
    get_video_pixel_format(pixel);
    struct v4l2_frmsizeenum frm_size {};
    frm_size.index = 0;
    frm_size.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    for (unsigned int pix : pixel)
    {
        frm_size.pixel_format = pix;
        while (0 == ioctl(dev_fd_, VIDIOC_ENUM_FRAMESIZES, &frm_size))
        {
            printf("frame_size<%d*%d>\n", frm_size.discrete.width, frm_size.discrete.height);
            fm_size.emplace_back(frm_size.discrete.width, frm_size.discrete.height);
            frm_size.index++;
        }
    }
}

void CV4L2Video::enum_video_frame_interval(std::vector<uint32_t>& fps) const
{
    std::map<uint32_t, std::vector<Resolution>> fm_size;
    get_video_pixel_frame_size(fm_size);
    v4l2_frmivalenum frmival = {0};
    frmival.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    for (const auto& res : fm_size)
    {
        frmival.pixel_format = res.first;
        for (const auto& r : res.second)
        {
            frmival.index = 0;
            frmival.width = r.first;
            frmival.height = r.second;
            while (0 == ioctl(dev_fd_, VIDIOC_ENUM_FRAMEINTERVALS, &frmival))
            {
                fps.emplace_back(frmival.discrete.denominator / frmival.discrete.numerator);
                frmival.index++;
            }
        }
    }
}

/*
 * 使用指令 VIDIOC_S_FMT 设置格式时，实际设置的参数并不一定等于我们指定的参数，譬如上面我们
指定视频帧宽度为 800、高度为 480，但这个摄像头不一定支持这种视频帧大小，或者摄像头不支持
V4L2_PIX_FMT_RGB565 这种像素格式； 通常在这种情况下， 底层驱动程序并不会按照我们指定的参数进
行设置， 它会对这些参数进行修改，譬如，如果摄像头不支持 800*480， 那么底层驱动可能会将其修改为
640*480（假设摄像头支持这种分辨率）；所以，当 ioctl()调用返回后，我们还需要检查返回的 struct v4l2_format
类型变量，以确定我们指定的参数是否已经生效：
 */
bool CV4L2Video::set_video_parameter(uint32_t pixel_format, Resolution resolution, uint32_t fps) const
{
    v4l2_format fmt{};
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt.fmt.pix.width = resolution.first;
    fmt.fmt.pix.height = resolution.second;
    fmt.fmt.pix.pixelformat = pixel_format;
    if (0 > ioctl(dev_fd_, VIDIOC_S_FMT, &fmt))
    {
        return false;
    }
    /*** 判断是否已经设置为我们要求的RGB565像素格式
    如果没有设置成功表示该设备不支持RGB565像素格式 */
    if (pixel_format != fmt.fmt.pix.pixelformat)
    {
        return false;
    }
    /*** 判断是否已经设置为我们要求的分辨率
    如果没有设置成功表示该设备不支持800*480分辨率 */
    if (fmt.fmt.pix.width != resolution.first || fmt.fmt.pix.height != resolution.second)
    {
        return false;
    }

    v4l2_streamparm streamparm{};
    streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    ioctl(dev_fd_, VIDIOC_G_PARM, &streamparm);
    //判断是否支持帧率设置
    if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME)
    {
        streamparm.parm.capture.timeperframe.numerator = 1;
        streamparm.parm.capture.timeperframe.denominator = fps;
        if (0 > ioctl(dev_fd_, VIDIOC_S_PARM, &streamparm))
        {
            std::cerr << "ioctl error: VIDIOC_S_PARM: \n" << strerror(errno) << std::endl;
            return false;
        }
    }
    else
    {
        std::cerr << "不支持帧率设置" << std::endl;
    }

    return true;
}

bool CV4L2Video::start_capture() const
{
    /* 打开摄像头、摄像头开始采集数据 */
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

    if (0 > ioctl(dev_fd_, VIDIOC_STREAMON, &type))
    {
        fprintf(stderr, "ioctl error: VIDIOC_STREAMON: %s\n", strerror(errno));
        return false;
    }

    return true;
}

bool CV4L2Video::stop_capture() const
{
    /* 打开摄像头、摄像头开始采集数据 */
    v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

    if (0 > ioctl(dev_fd_, VIDIOC_STREAMOFF, &type))
    {
        fprintf(stderr, "ioctl error: VIDIOC_STREAMOFF: %s\n", strerror(errno));
        return false;
    }

    return true;
}

void CV4L2Video::get_frame_image(std::string& img, const ImageProcessCallback& func) const
{
    v4l2_buffer buf{};
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    while (true)
    {
        for (buf.index = 0; buf.index < DEFAULT_FRAME_NUM_; ++buf.index)
        {
            //出队
            if (0 > ioctl(dev_fd_, VIDIOC_DQBUF, &buf))
            {
                fprintf(stderr, "ioctl error: VIDIOC_DQBUF: %s\n", strerror(errno));
                return;
            }

            // 读取帧缓冲的映射区、获取一帧数据
            img.assign(reinterpret_cast<char*>(frame_buffer_[buf.index].start), buf.bytesused);
            //img = to_image(frame_buffer_[buf.index].start, buf.bytesused);
            if (nullptr != func)
            {
                func(img);
            }
            if (0 > ioctl(dev_fd_, VIDIOC_QBUF, &buf))
            {
                fprintf(stderr, "ioctl error: VIDIOC_QBUF: %s\n", strerror(errno));
                return;
            }
        }
    }
}

void CV4L2Video::get_frame_image(unsigned char* img_buf, int len, const ImgProcessCallback& func) const
{
    v4l2_buffer buf{};
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    while (true)
    {
        for (buf.index = 0; buf.index < DEFAULT_FRAME_NUM_; ++buf.index)
        {
            //出队
            if (0 > ioctl(dev_fd_, VIDIOC_DQBUF, &buf))
            {
                fprintf(stderr, "ioctl error: VIDIOC_DQBUF: %s\n", strerror(errno));
                return;
            }

            // 读取帧缓冲的映射区、获取一帧数据
            if (nullptr != func)
            {
                func(reinterpret_cast<unsigned char*>(frame_buffer_[buf.index].start), static_cast<int>(buf.bytesused));
            }
            if (0 > ioctl(dev_fd_, VIDIOC_QBUF, &buf))
            {
                fprintf(stderr, "ioctl error: VIDIOC_QBUF: %s\n", strerror(errno));
                return;
            }
        }
    }
}

void CV4L2Video::read(std::string& img)
{
    if (!init_flag_)
    {
        v4l2_buf_.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        v4l2_buf_.memory = V4L2_MEMORY_MMAP;
        v4l2_buf_.index = 0;
        init_flag_ = true;
    }
    if (v4l2_buf_.index < DEFAULT_FRAME_NUM_)
    {
        //出队
        if (0 > ioctl(dev_fd_, VIDIOC_DQBUF, &v4l2_buf_))
        {
            fprintf(stderr, "ioctl error: VIDIOC_DQBUF: %s\n", strerror(errno));
            return;
        }

        // 读取帧缓冲的映射区、获取一帧数据
        img.assign(reinterpret_cast<char*>(frame_buffer_[v4l2_buf_.index].start), v4l2_buf_.bytesused);
        if (0 > ioctl(dev_fd_, VIDIOC_QBUF, &v4l2_buf_))
        {
            fprintf(stderr, "ioctl error: VIDIOC_QBUF: %s\n", strerror(errno));
        }
        ++v4l2_buf_.index;
    }
    else
    {
        v4l2_buf_.index = 0;
    }
}

void CV4L2Video::get_video_pixel_format(std::vector<uint32_t>& pixel_fmt) const
{
    struct v4l2_fmtdesc fmt_desc{};
    fmt_desc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt_desc.index = 0;
    while (ioctl(dev_fd_, VIDIOC_ENUM_FMT, &fmt_desc) == 0)
    {
        pixel_fmt.emplace_back(fmt_desc.pixelformat);
        fmt_desc.index++;
    }
}

void CV4L2Video::get_video_pixel_frame_size(std::map<uint32_t, std::vector<Resolution>>& fm_size) const
{
    std::vector<uint32_t> pixel;
    get_video_pixel_format(pixel);
    v4l2_frmsizeenum frm_size {};
    frm_size.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    for (unsigned int pix : pixel)
    {
        frm_size.pixel_format = pix;
        frm_size.index = 0;
        std::vector<Resolution> res;
        while (0 == ioctl(dev_fd_, VIDIOC_ENUM_FRAMESIZES, &frm_size))
        {
            res.emplace_back(frm_size.discrete.width, frm_size.discrete.height);
            frm_size.index++;
        }
        fm_size[pix] = res;
    }
}

int CV4L2Video::v4l2_malloc_buffer(int frame_num /*=3*/)
{
    v4l2_requestbuffers req_buffer{};
    //申请帧缓冲
    req_buffer.count = frame_num;
    req_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    req_buffer.memory = V4L2_MEMORY_MMAP;
    if (0 > ioctl(dev_fd_, VIDIOC_REQBUFS, &req_buffer))
    {
        return -1;
    }

    //建立内存映射
    v4l2_buffer buffer{};
    buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buffer.memory = V4L2_MEMORY_MMAP;
    for (buffer.index = 0; buffer.index < frame_num; buffer.index++)
    {
        ioctl(dev_fd_, VIDIOC_QUERYBUF, &buffer);
        frame_buffer_[buffer.index].length = buffer.length;
        frame_buffer_[buffer.index].start = static_cast<unsigned short*>(mmap(nullptr, buffer.length, PROT_READ | PROT_WRITE, MAP_SHARED, dev_fd_, buffer.m.offset));
        if (MAP_FAILED == frame_buffer_[buffer.index].start)
        {
            return -1;
        }
    }

    //入队
    for (buffer.index = 0; buffer.index < frame_num; buffer.index++)
    {
        if (0 > ioctl(dev_fd_, VIDIOC_QBUF, &buffer))
        {
            fprintf(stderr, "ioctl error: VIDIOC_QBUF: %s\n", strerror(errno));
            return -1;
        }
    }
    return 0;
}

int CV4L2Video::v4l2_free_buffer()
{
    for (int i = 0; i < DEFAULT_FRAME_NUM_; ++i)
    {
        frame_buffer_[i].length = 0;
        if (frame_buffer_[i].start != nullptr)
        {
            munmap(frame_buffer_[i].start, frame_buffer_[i].length);
            frame_buffer_[i].start = nullptr;
        }
    }
    return 0;
}

/*
 * 摄像头如果是 'MJPG' (Motion-JPEG, compressed)像素格式
 * 读取的视频帧格式就是jpeg或者jpg，无需进行编解码
 */
#if 0
std::string CV4L2Video::to_image(void* data, size_t size)
{
    return {static_cast<char*>(data), size};
    struct jpeg_decompress_struct cinfo{};
    struct jpeg_error_mgr jerr{};
    //绑定默认错误处理函数
    cinfo.err = jpeg_std_error(&jerr);
    //创建 JPEG 解码对象
    jpeg_create_decompress(&cinfo);
    //打开jpg图像文件
    jpeg_mem_src(&cinfo, static_cast<unsigned char*>(data), size);
    //读取文件头信息
    jpeg_read_header(&cinfo, TRUE);
    printf("jpeg 图像大小: %d*%d\n", cinfo.image_width, cinfo.image_height);

    //开始解码
    jpeg_start_decompress(&cinfo);

    std::string img_data;
    unsigned int row_stride = cinfo.output_width * cinfo.output_components;
    //获取图像数据
    auto buffer = new unsigned char[row_stride];
    while (cinfo.output_scanline < cinfo.output_height)
    {
        jpeg_read_scanlines(&cinfo, &buffer, 1);
        img_data.append(reinterpret_cast<char*>(buffer), row_stride);
    }
    //解码完成
    jpeg_finish_decompress(&cinfo); //完成解码
    jpeg_destroy_decompress(&cinfo); //销毁对象
    delete[] buffer;
    return img_data;
}
#endif
