/*
**
** Copyright (C) 2010 Moko365 Inc
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
**     http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/

#define LOG_TAG "V4LCAMERA"
//#include <utils/Log.h>

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/mman.h>

#include <linux/videodev2.h>

//#include <ui/PixelFormat.h>

#include "V4L2Camera.h"

#include <android/log.h>
#include <mutex>

//定义日志打印宏函数
#define ALOGI(...)  __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define ALOGE(...)  __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
#define ALOGW(...)  __android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__)
#define ALOGD(...)  __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)

#ifndef MAX
#define MAX(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a > _b ? _a : _b; })
#define MIN(a, b) ({__typeof__(a) _a = (a); __typeof__(b) _b = (b); _a < _b ? _a : _b; })
#endif

#define CLEAR(x) memset(&(x), 0, sizeof(x))

#define CAPABILITY_NUM 29
struct {
    unsigned int type;
    char *name;
} enum_capability[]={
        {V4L2_CAP_VIDEO_CAPTURE, "V4L2_CAP_VIDEO_CAPTURE"},          // 设备支持视频采集接口。
        {V4L2_CAP_VIDEO_OUTPUT, "V4L2_CAP_VIDEO_OUTPUT"},           // 设备支持视频输出接口。
        {V4L2_CAP_VIDEO_OVERLAY, "V4L2_CAP_VIDEO_OVERLAY"},          // 设备支持视频覆盖界面。 叠加层通常将捕获的图像直接存储在图形卡的视频存储器中，并支持剪切。
        {V4L2_CAP_VBI_CAPTURE, "V4L2_CAP_VBI_CAPTURE"},            // 设备支持VBI捕获接口
        {V4L2_CAP_VBI_OUTPUT, "V4L2_CAP_VBI_OUTPUT"},             // 设备支持VBI输出接口
        {V4L2_CAP_SLICED_VBI_CAPTURE, "V4L2_CAP_SLICED_VBI_CAPTURE"},     // 是切片的VBI捕获设备
        {V4L2_CAP_SLICED_VBI_OUTPUT, "V4L2_CAP_SLICED_VBI_OUTPUT"},      // 是切片的VBI输出设备
        {V4L2_CAP_RDS_CAPTURE, "V4L2_CAP_RDS_CAPTURE"},            // RDS数据采集
        {V4L2_CAP_VIDEO_OUTPUT_OVERLAY, "V4L2_CAP_VIDEO_OUTPUT_OVERLAY"},   // 可以做视频输出叠加
        {V4L2_CAP_HW_FREQ_SEEK, "V4L2_CAP_HW_FREQ_SEEK"},           // 可以做硬件频率搜索
        {V4L2_CAP_RDS_OUTPUT, "V4L2_CAP_RDS_OUTPUT"},             // 是RDS编码器
        {V4L2_CAP_VIDEO_CAPTURE_MPLANE, "V4L2_CAP_VIDEO_CAPTURE_MPLANE"},   // 是一种支持多平面格式的视频捕获设备
        {V4L2_CAP_VIDEO_OUTPUT_MPLANE, "V4L2_CAP_VIDEO_OUTPUT_MPLANE"},    // 是支持多平面格式的视频输出设备
        {V4L2_CAP_VIDEO_M2M_MPLANE, "V4L2_CAP_VIDEO_M2M_MPLANE"},       // 是支持多平面格式的视频Mem-to-mem设备
        {V4L2_CAP_VIDEO_M2M, "V4L2_CAP_VIDEO_M2M"},              // 是视频记忆体设备
        {V4L2_CAP_TUNER, "V4L2_CAP_TUNER"},                  // 有一个调谐器
        {V4L2_CAP_AUDIO, "V4L2_CAP_AUDIO"},                  // 支持音频
        {V4L2_CAP_RADIO, "V4L2_CAP_RADIO"},                  // 是无线电设备
        {V4L2_CAP_MODULATOR, "V4L2_CAP_MODULATOR"},              // 有一个调节器
        {V4L2_CAP_SDR_CAPTURE, "V4L2_CAP_SDR_CAPTURE"},            // 是SDR捕获设备
        {V4L2_CAP_EXT_PIX_FORMAT, "V4L2_CAP_EXT_PIX_FORMAT"},         // 支持扩展像素格式
        {V4L2_CAP_SDR_OUTPUT, "V4L2_CAP_SDR_OUTPUT"},             // 是SDR输出设备
        {V4L2_CAP_META_CAPTURE, "V4L2_CAP_META_CAPTURE"},           // 可以将元数据捕获到内存中。
        {V4L2_CAP_READWRITE, "V4L2_CAP_READWRITE"},              // 读/写 系统调用
        {V4L2_CAP_ASYNCIO, "V4L2_CAP_ASYNCIO"},                // 异步I/O
        {V4L2_CAP_STREAMING, "V4L2_CAP_STREAMING"},              // 流式I/O
        {V4L2_CAP_META_OUTPUT, "V4L2_CAP_META_OUTPUT"},            // 设备可以从内存中读取元数据。
        {V4L2_CAP_TOUCH, "V4L2_CAP_TOUCH"},                  // 支持触摸界面的设备
        {V4L2_CAP_DEVICE_CAPS, "V4L2_CAP_DEVICE_CAPS"}           // 设置设备功能字段
};

// This value is 2 ^ 18 - 1, and is used to clamp the RGB values before their ranges
// are normalized to eight bits.
static const int kMaxChannelValue = 262143;

static inline uint32_t YUV2RGB(int nY, int nU, int nV) {
    nY -= 16;
    nU -= 128;
    nV -= 128;
    if (nY < 0) nY = 0;

    // This is the floating point equivalent. We do the conversion in integer
    // because some Android devices do not have floating point in hardware.
    // nR = (int)(1.164 * nY + 2.018 * nU);
    // nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
    // nB = (int)(1.164 * nY + 1.596 * nV);

    int nR = 1192 * nY + 1634 * nV;
    int nG = 1192 * nY - 833 * nV - 400 * nU;
    int nB = 1192 * nY + 2066 * nU;

    nR = MIN(kMaxChannelValue, MAX(0, nR));
    nG = MIN(kMaxChannelValue, MAX(0, nG));
    nB = MIN(kMaxChannelValue, MAX(0, nB));

    nR = (nR >> 10) & 0xff;
    nG = (nG >> 10) & 0xff;
    nB = (nB >> 10) & 0xff;

    return 0xff000000 | (nR << 16) | (nG << 8) | nB;
}

void *render_task_start(void *args) {
    ALOGD("enter: %s", __PRETTY_FUNCTION__);
    V4L2Camera *element = static_cast<V4L2Camera *>(args);
    element ->_start();
    return 0;//一定一定一定要返回0！！！
}


V4L2Camera::V4L2Camera()
        : start(0)
{
}

V4L2Camera::~V4L2Camera()
{
    std::lock_guard<std::mutex> lock(windowLock);
    if (window != 0) {
        ANativeWindow_release(window);
        window = 0;
    }

    setListener(0);
}

int V4L2Camera::Open(const char *filename)
{
    this->filename = filename;
    fd = open(filename, O_RDWR | O_NONBLOCK, 0);
    if (fd < 0) {
        ALOGE("[%s][%d] Error opening device: %s", __FUNCTION__, __LINE__, filename);
        return -1;
    }

    /* check video devive driver capability : 查询设备属性, 看看设备具有什么功能，比如是否具有视频输入特性 */
    int rev = ioctl(fd, VIDIOC_QUERYCAP, &cap);
    if(rev < 0){
        ALOGE("[%s][%d] [%s] Error VIDIOC_QUERYCAP", __FUNCTION__, __LINE__, filename);
        return -1;
    }
    ALOGI("[native_camera]      File: %s", filename);
    ALOGI("[native_camera]      Driver: %s", cap.driver);
    ALOGI("[native_camera]      Card: %s", cap.card);
    ALOGI("[native_camera]      Version: %u.%u.%u", (cap.version>>16)&0XFF, (cap.version>>8)&0XFF, cap.version&0XFF);
    ALOGI("[native_camera]      All Caps: 0x%08X", cap.capabilities);
    ALOGI("[native_camera]      Dev Caps: 0x%08X", cap.device_caps);

    //显示所有支持帧格式
    struct v4l2_fmtdesc fmtdesc;
    for(int i=0; i<CAPABILITY_NUM; i++){
        if((cap.capabilities & enum_capability[i].type) == enum_capability[i].type){
            ALOGD("capabilities: %s\n", enum_capability[i].name);

            if(V4L2_CAP_VIDEO_CAPTURE == enum_capability[i].type){
                V4L2_BUF_TYPE = V4L2_BUF_TYPE_VIDEO_CAPTURE;
            }else if(V4L2_CAP_VIDEO_CAPTURE_MPLANE == enum_capability[i].type){
                // 多平面
                V4L2_BUF_TYPE = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
            }

            memset(&fmtdesc,0,sizeof(fmtdesc));
            fmtdesc.index = 0;
            fmtdesc.type = enum_capability[i].type;

            //Step4: 查询当前设备支持的视频格式
            while(ioctl(fd,VIDIOC_ENUM_FMT, &fmtdesc) == 0)
            {
                fmtdesc.index++ ;
                ALOGD("%s:{pixelformat = %c%c%c%c}, description = '%s', flags = 0x%x\n",
                      enum_capability[i].name,
                      fmtdesc.pixelformat & 0xff,(fmtdesc.pixelformat >> 8)&0xff, (fmtdesc.pixelformat >> 16) & 0xff,(fmtdesc.pixelformat >> 24)&0xff,
                      fmtdesc.description,
                      fmtdesc.flags);
            }
        }
    }

    // 检测摄像头是否支持拍照
    // Verify we can use this device for video capture
    if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)
        && !(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
        && !(cap.capabilities & V4L2_CAP_STREAMING))
    {
        ALOGE("[%s][%d] [%s] Streaming capture not supported", __FUNCTION__, __LINE__, filename);
        close(fd);
        return -1;
    }

    return 0;
}

void V4L2Camera::Close()
{
    close(fd);
}

int V4L2Camera::Init()
{
    ALOGD("V4L2Camera::Init()");
    int ret;
    struct v4l2_requestbuffers rb;

    start = false;
    CLEAR(rb);
    rb.type = V4L2_BUF_TYPE;  // 传输流类型
    rb.memory = V4L2_MEMORY;

    rb.count = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE ? CAPTURE_BUF_SIZE : 1;

    ret = ioctl(fd, VIDIOC_REQBUFS, &rb);
    if (ret < 0) {
        ALOGE("[%s][%d] [%s] VIDIOC_REQBUFS failed! %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return -1;
    }

    if(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE){
        // 映射 Stream Buffers 信息
        CLEAR(v4l2_buffer_record);
        for(int i = 0; i < CAPTURE_BUF_SIZE; i++){
            // Get the information on the buffer that was created for us
            CLEAR(buffer);
            CLEAR(planes);

            buffer.type     = V4L2_BUF_TYPE;
            buffer.memory   = V4L2_MEMORY;
            buffer.index    = i;
            buffer.length   = 1;
            buffer.m.planes = &planes;

            if (ioctl(fd, VIDIOC_QUERYBUF, &buffer) < 0) {
                ALOGE("[%s][%d] [%s] VIDIOC_QUERYBUF failed! %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
                return -1;
            }
            v4l2_buffer_record[i].mLength =  buffer.m.planes->length;
            v4l2_buffer_record[i].mOffset = (size_t)buffer.m.planes->m.mem_offset;
            v4l2_buffer_record[i].mIndex  = i;

            v4l2_buffer_record[i].mStart = mmap (NULL, v4l2_buffer_record[i].mLength,
                                                 PROT_READ | PROT_WRITE,
                                                 MAP_SHARED, fd, v4l2_buffer_record[i].mOffset);
            if (v4l2_buffer_record[i].mStart == MAP_FAILED) {
                ALOGE("[%s][%d] [%s] mmap failed! %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
                return -1;
            }

            ALOGI("[native_camera] Buffer description:");
            ALOGI("[native_camera]  index : %d", v4l2_buffer_record[i].mIndex);
            ALOGI("[native_camera]  offset: %d", v4l2_buffer_record[i].mOffset);
            ALOGI("[native_camera]  length: %d", v4l2_buffer_record[i].mLength);
            ALOGI("[native_camera]  map start at %p", v4l2_buffer_record[i].mStart);
        }

        ALOGI("[native_camera] VIDIOC_QBUF Start\n");
        for(int i = 0; i < CAPTURE_BUF_SIZE; i++)
        {
            memset(&buffer, 0, sizeof(buffer));
            CLEAR(planes);

            buffer.type     = V4L2_BUF_TYPE;
            buffer.memory   = V4L2_MEMORY;
            buffer.m.planes = &planes;
            buffer.index    = i;
            buffer.length   = 1;        // 1
            buffer.m.planes->length   = v4l2_buffer_record[i].mLength;
            buffer.m.planes->m.mem_offset = (unsigned long)v4l2_buffer_record[i].mStart;

            ALOGI("[native_camera] VIDIOC_QBUF index=%d, length=%d \n", buffer.index, buffer.length);
            if (ioctl (fd, VIDIOC_QBUF, &buffer) < 0)
            {
                ALOGE("[%s][%d] [%s] VIDIOC_QBUF failed! %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
                return -1;
            }
        }
    }else{
        /* V4L2: map buffer  */
        CLEAR(buffer);

        buffer.index = 0;
        buffer.type = V4L2_BUF_TYPE;
        buffer.memory = V4L2_MEMORY;

        ret = ioctl(fd, VIDIOC_QUERYBUF, &buffer);
        if (ret < 0) {
            ALOGE("[%s][%d] [%s] VIDIOC_QUERYBUF failed! %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
            return -1;
        }
        ALOGI("[native_camera] Buffer description:");
        ALOGI("[native_camera]  length: %d", buffer.length);
        ALOGI("[native_camera]  offset %p", buffer.m.offset);

        /* Only map one */
        mem = (unsigned char *)mmap(NULL, buffer.length,
                                    PROT_READ | PROT_WRITE,
                                         MAP_SHARED, fd, buffer.m.offset);
        if (mem == MAP_FAILED) {
            ALOGE("[%s][%d] [%s] Unable map buffer %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
            return -1;
        }

        /* V4L2: queue buffer */
        ret = ioctl(fd, VIDIOC_QBUF, &buffer);
    }
    return 0;
}

void V4L2Camera::Uninit()
{

    if(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE){
        for(int i = 0; i < CAPTURE_BUF_SIZE; i++) {
            munmap(v4l2_buffer_record[i].mStart, v4l2_buffer_record[i].mLength);
        }
    }else{
        munmap(mem, buffer.length);
    }
    return ;
}

void V4L2Camera::StartStreaming()
{
    enum v4l2_buf_type type;
    int ret;

    if (start) return;

    type = V4L2_BUF_TYPE;

    ret = ioctl(fd, VIDIOC_STREAMON, &type);
    if (ret < 0) {
        ALOGE("[%s][%d] [%s] Unable query buffer: %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return;
    }

    if (window != 0) {
        pthread_create(&pid_start, 0, render_task_start, this);
    }

    start = true;
}

void V4L2Camera::StopStreaming()
{
    enum v4l2_buf_type type;
    int ret;

    if (!start) return;

    type = V4L2_BUF_TYPE;

    ret = ioctl(fd, VIDIOC_STREAMOFF, &type);
    if (ret < 0) {
        ALOGE("[%s][%d] [%s] Unable query buffer: %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return;
    }

    start = false;
}

int V4L2Camera::GrabRawFrame(void *raw_base)
{
    int ret;

    memset(&buffer, 0, sizeof(buffer));
    buffer.type = V4L2_BUF_TYPE;
    buffer.memory = V4L2_MEMORY;

    if(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE){
        memset(&planes, 0, sizeof(planes));
        buffer.m.planes = &planes;
        buffer.length   = 1;
    }

    int times = 0;
    /* V4L2: dequeue buffer */
    ret = ioctl(fd, VIDIOC_DQBUF, &buffer);
    if (ret < 0) {
//        ALOGE("[%s][%d] [%s] Unable query buffer: %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return ret;
    }
//    ALOGD("copy size :%d", buffer.bytesused);

    /* copy to userspace */
    if(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE){
        memcpy(raw_base, v4l2_buffer_record[buffer.index].mStart,  v4l2_buffer_record[buffer.index].mLength);
    }else{
        memcpy(raw_base, mem,  buffer.bytesused);
    }

    /* V4l2: queue buffer again after that */
    ret = ioctl(fd, VIDIOC_QBUF, &buffer);
    if (ret < 0) {
        ALOGE("[%s][%d] [%s] Unable query buffer: %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return ret;
    }

    return 0;
}

void V4L2Camera::Convert(void *r, void *p, unsigned int ppm)
{
    unsigned char *raw = (unsigned char *)r;
    unsigned char *preview = (unsigned char *)p;

    /* We don't need to really convert that */
//    if (pixelformat == PIXEL_FORMAT_RGB_888) {
//        /* copy to preview buffer */
//        memcpy(preview, raw, width*height*ppm);
//    }

    /* TODO: Convert YUYV to ARGB. */
    if (pixelformat == V4L2_PIX_FMT_YUYV) {
        int size = width * height * 2;
        int in;
        int out;

        unsigned char y1;
        unsigned char u;
        unsigned char y2;
        unsigned char v;

        uint32_t argb;
        for(in = 0, out = 0; in < size; in += 4, out += 8) {
            y1 = raw[in];
            u = raw[in + 1];
            y2 = raw[in + 2];
            v = raw[in + 3];

            //android　ARGB_8888 像素数据在内存中其实是以R G B A R G B A …的顺序排布的
            argb = YUV2RGB(y1,u,v);
            preview[out] = (argb >> 16) & 0xff;;
            preview[out + 1] = (argb >> 8) & 0xff;
            preview[out + 2] = argb & 0xff;
            preview[out + 3] = 0xff;

            argb = YUV2RGB(y2,u,v);
            preview[out + 4] = (argb >> 16) & 0xff;
            preview[out + 5] = (argb >> 8) & 0xff;
            preview[out + 6] = argb & 0xff;
            preview[out + 7] = 0xff;
        }
    }


    return;
}


std::list<Parameter> V4L2Camera::getParameters() {
    struct v4l2_fmtdesc fmtd;   //存的是摄像头支持的传输格式
    struct v4l2_frmsizeenum  frmsize;   //存的是摄像头对应的图片格式所支持的分辨率
    struct v4l2_frmivalenum  framival;  //存的是对应的图片格式，分辨率所支持的帧率
    Parameter parameter;
    Frame frame;

    parameters.clear();

    for (int i = 0; ; i++)
    {
        fmtd.index = i;
        fmtd.type = V4L2_BUF_TYPE;
        if (ioctl(fd, VIDIOC_ENUM_FMT, &fmtd) < 0)
            break;
        ALOGD("fmt %d: %s\n", i, fmtd.description);
        parameter.pixFormat = fmtd.pixelformat;
        parameter.frames.clear();
        // 查询这种图像数据格式下支持的分辨率
        for (int j = 0; ; j++)
        {
            frmsize.index = j;
            frmsize.pixel_format = fmtd.pixelformat;
            if (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmsize) < 0)
                break;
            ALOGD("w = %d, h = %d \n", frmsize.discrete.width, frmsize.discrete.height);

            frame.width = frmsize.discrete.width;
            frame.height = frmsize.discrete.height;
            frame.frameRate.clear();

            //查询在这种图像数据格式下这种分辨率支持的帧率
            for (int k = 0; ; k++)
            {
                framival.index = k;
                framival.pixel_format = fmtd.pixelformat;
                framival.width = frmsize.discrete.width;
                framival.height = frmsize.discrete.height;
                if (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &framival) < 0)
                    break;
                //下面是帧率的获取
                FrameRate frameRate;
                frameRate.numerator = framival.discrete.numerator;
                frameRate.denominator = framival.discrete.denominator;
                frame.frameRate.push_back(frameRate);
                ALOGD("frame interval: %d, %d\n", framival.discrete.numerator, framival.discrete.denominator);
            }

            parameter.frames.push_back(frame);
        }

        parameters.push_back(parameter);
    }

    return parameters;

}

int V4L2Camera::setPreviewSize(int width, int height, int pixformat) {
    int ret;
    struct v4l2_format format;

    ALOGD("setPreviewSize %d, %d, %d", width, height, pixformat);


    this->width = width;
    this->height = height;
    this->pixelformat = pixformat;

    format.type = V4L2_BUF_TYPE;
    format.fmt.pix.width = width;
    format.fmt.pix.height = height;
    format.fmt.pix.pixelformat = pixelformat;

    // MUST set
    /**
     * V4L2_FIELD_ANY	0	Application 可以请求使用这个参数，如果V4L2_FIELD_NONE, V4L2_FIELD_TOP, V4L2_FIELD_BOTTOM Ｖ4L2_FIELD_INTERLACE 中任何一个格式都支持．驱动选择使用哪一个格式依赖于硬件能力，以及请求的image尺寸，驱动选择一个然后返回这个格式。struct_buffer的field成员不可以为V4L2_FIELD_ANY.
     * V4L2_FIELD_NONE	1	Images是逐行格式，当驱动无法区分V4L2_FIELD_TOP和V4L2_FIELD_BOTTOM，可以使用这种field类型
     * V4L2_FIELD_TOP	2	Images仅仅包含top field
     * V4L2_FIELD_BOTTOM	3	Images仅仅包含bottom field，应用可能希望防止设备捕获interlaced的图片，因为这种图片会在运动物体周围产生毛状特效
     * V4L2_FIELD_INTERLACED	4	Images包含top和bottom field, 隔行交替，场序依赖于当前video的标准。NTSC首先传输bottom field, PAL则先传输top field。
     * V4L2_FIELD_SEQ_TB	5	Images包含top和bottom field, top field的行首先存放在memory中，然后紧跟着bottom field的行。 Fields一直以瞬间序存储，较老的放在内存前面。Images的尺寸和帧相关，而不是field
     * V4L2_FIELD_SEQ_BT	6	Images包含top和bottom field, bottom field的行首先存放在memory中，然后紧跟着top field的行。 Fields一直以瞬间序存储，较老的放在内存前面。Images的尺寸和帧相关，而不是field
     * V4L2_FIELD_ALTERATE	7	一个帧的两个field分别放在不同的buffer, 按照瞬间序，也就是说老的一个是第一个。driver或者应用指明field的奇偶性（奇偶性：当前的field是top 还是bottom field）. 任何两个连续的field构成一个frame，是否两个field是连续的，不需要drop掉他们，可以通过v4l2_buffer中的sequence 成员判定。Images的尺寸和frame相关而不是fields相关
     * V4L2_FIELD_INTERLACED_TB	8	Images 包含top和bottom field, 每行交替， top field在前面。top field首先传送
     * V4L2_FIELD_INTERLACED_BT	9	Images 包含top和bottom field, 每行交替， bottom field在前面。bottom field首先传送
     */
    format.fmt.pix.field = V4L2_FIELD_ANY;
    format.fmt.pix.priv = 1;

    ret = ioctl(fd, VIDIOC_S_FMT, &format);
    if (ret < 0) {
        ALOGE("[%s][%d] [%s] Unable to set format: %d - %s", __FUNCTION__, __LINE__, filename, errno, strerror(errno));
        return -1;
    }

    return 0;
}

void V4L2Camera::setSurface(ANativeWindow *window) {
    std::lock_guard<std::mutex> lock(windowLock);
    if (this->window != 0) {
        ANativeWindow_release(this->window);
    }

    this->window = window;
}

void V4L2Camera::_start() {
    unsigned char *raw = NULL;

    if(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == V4L2_BUF_TYPE){
        raw = new unsigned char[v4l2_buffer_record[0].mLength];
        ALOGD("_start raw buf.length %d", v4l2_buffer_record[0].mLength);
    } else {
        raw = new unsigned char[buffer.length];
        ALOGD("_start raw buf.length %d", buffer.length);
    }
    unsigned char *preview = new unsigned char[width * height * 4]; //ARGB的大小

    int ret;

    while (start) {
        // 读取图像数据
        ret = GrabRawFrame(raw);
        if (ret != 0) {
            usleep(50);
            continue;
        }

        // 将图像返回给Java层
        sendDataToJava(raw);

        // 转换像素格式
        Convert(raw, preview, 0);

        // 将图像画到画布上
        renderVideo(preview);
    }

    delete[] raw;
    delete[] preview;
}

void V4L2Camera::renderVideo(unsigned char *preview) {
    std::lock_guard<std::mutex> lock(windowLock);
    if (window == 0) {
        return;
    }
//    ALOGD("RenderVideoElement width:%d, height:%d", width,height);
    ANativeWindow_setBuffersGeometry(window, width,
                                     height,
                                     WINDOW_FORMAT_RGBA_8888);
    ANativeWindow_Buffer window_buffer;
    if (ANativeWindow_lock(window, &window_buffer, 0)) {
        ANativeWindow_release(window);
        window = 0;
        return;
    }
    //把buffer中的数据进行赋值（修改）
    uint8_t *dst_data = static_cast<uint8_t *>(window_buffer.bits);
    memcpy(dst_data, preview, width*height*4);

    ANativeWindow_unlockAndPost(window);

}

void V4L2Camera::setListener(JavaCallHelper *listener) {
    std::lock_guard<std::mutex> lock(listenerLock);
    if (this->listener != 0) {
        delete this->listener;
    }
    this->listener = listener;
}

void V4L2Camera::sendDataToJava(unsigned char *raw) {
    std::lock_guard<std::mutex> lock(listenerLock);
    int size = 0;
    int format = -1;

    if (pixelformat == V4L2_PIX_FMT_YUYV) {
        format = YUYV;
    }
    switch (pixelformat) {
        case V4L2_PIX_FMT_YUYV:
            size = width * height * 2;
            format = YUYV;
            break;
    }

//    ALOGD("pixel'yuyv'    :%d\n",('Y'|'U'<<8|'Y'<<16|'V'<<24));

//    ALOGD("pixFormat %d, size : %d  ", pixelformat, size);
    if (listener != 0 && size != 0) {
        listener->onDataCallback(raw, size, width, height, format);
    }
}



