#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <errno.h>
#include <SDL2/SDL.h>

#define EGL_EGLEXT_PROTOTYPES
#define GL_GLEXT_PROTOTYPES

#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>

#include <libdrm/drm_fourcc.h>

#define VIDEO_DEV "/dev/video10"
#define WIDTH 1920
#define HEIGHT 1080
#define BUFFER_COUNT 4

struct buffer {
    void *start;
    size_t length;
};

SDL_Window *win;
SDL_GLContext ctx;
GLuint tex_y, tex_uv;

GLfloat verts[] = {
    -1, -1, 0, 1,
     1, -1, 1, 1,
    -1,  1, 0, 0,
     1,  1, 1, 0,
};

const char *vs_src = R"(
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
    gl_Position = vec4(a_position, 0.0, 1.0);
    v_texcoord = a_texcoord;
}
)";

const char *fs_src = R"(
precision mediump float;
uniform sampler2D tex_y;
uniform sampler2D tex_uv;
varying vec2 v_texcoord;
void main() {
    float y = texture2D(tex_y, v_texcoord).r;
    vec2 uv = texture2D(tex_uv, v_texcoord).ra - vec2(0.5, 0.5);
    float r = y + 1.402 * uv.y;
    float g = y - 0.344 * uv.x - 0.714 * uv.y;
    float b = y + 1.772 * uv.x;
    gl_FragColor = vec4(r, g, b, 1.0);
}
)";

// 检查 ioctl 是否出错
static void xioctl(int fd, int request, void *arg) {
    if (ioctl(fd, request, arg) < 0) {
        perror("ioctl");
        exit(1);
    }
}

GLuint load_shader(GLenum type, const char *src) {
    GLuint s = glCreateShader(type);
    glShaderSource(s, 1, &src, NULL);
    glCompileShader(s);
    return s;
}

void upload_nv12(const char *path, GLuint tex_y, GLuint tex_uv) {
    FILE *f = fopen(path, "rb");
    if (!f) { perror("fopen"); exit(1); }

    size_t y_size = WIDTH * HEIGHT;
    size_t uv_size = y_size / 2;

    uint8_t *y = malloc(y_size);
    uint8_t *uv = malloc(uv_size);
    fread(y, 1, y_size, f);
    fread(uv, 1, uv_size, f);
    fclose(f);

    glBindTexture(GL_TEXTURE_2D, tex_y);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, WIDTH, HEIGHT, 0,
                 GL_LUMINANCE, GL_UNSIGNED_BYTE, y);

    glBindTexture(GL_TEXTURE_2D, tex_uv);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, WIDTH / 2, HEIGHT / 2, 0,
                 GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, uv);

    free(y);
    free(uv);
}

int initDisplay() {
    SDL_Init(SDL_INIT_VIDEO);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
    SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);

    win = SDL_CreateWindow("NV12 Viewer", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
                                       WIDTH, HEIGHT, SDL_WINDOW_OPENGL);
    ctx = SDL_GL_CreateContext(win);

    GLuint vs = load_shader(GL_VERTEX_SHADER, vs_src);
    GLuint fs = load_shader(GL_FRAGMENT_SHADER, fs_src);
    GLuint prog = glCreateProgram();
    glAttachShader(prog, vs);
    glAttachShader(prog, fs);
    glBindAttribLocation(prog, 0, "a_position");
    glBindAttribLocation(prog, 1, "a_texcoord");
    glLinkProgram(prog);
    glUseProgram(prog);

    glGenTextures(1, &tex_y);
    glGenTextures(1, &tex_uv);

    glBindTexture(GL_TEXTURE_2D, tex_y);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glBindTexture(GL_TEXTURE_2D, tex_uv);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glUniform1i(glGetUniformLocation(prog, "tex_y"), 0);
    glUniform1i(glGetUniformLocation(prog, "tex_uv"), 1);

    glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), verts);
    glEnableVertexAttribArray(0);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), verts + 2);
    glEnableVertexAttribArray(1);

//    const char *extensions = eglQueryString(eglDisplay, EGL_EXTENSIONS);
    return 0;
}

int display(char* data) {
//    upload_nv12("input.yuv", tex_y, tex_uv);
    glBindTexture(GL_TEXTURE_2D, tex_y);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, WIDTH, HEIGHT, 0,
                 GL_LUMINANCE, GL_UNSIGNED_BYTE, data);

    glBindTexture(GL_TEXTURE_2D, tex_uv);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, WIDTH / 2, HEIGHT / 2, 0,
                 GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, data + WIDTH * 1088);

    glActiveTexture(GL_TEXTURE0);
    glBindTexture(GL_TEXTURE_2D, tex_y);
    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D, tex_uv);

    glClear(GL_COLOR_BUFFER_BIT);
    glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
    SDL_GL_SwapWindow(win);
    return 0;
}

int destroyDisplay() {
    // 清理资源
//    glDeleteTextures(1, &texture);
//    eglTerminate(eglDisplay);
    SDL_GL_DeleteContext(ctx);
//    SDL_DestroyWindow(window);
    SDL_Quit();
    return 0;
}

__u32 getNalu(void *start) {
    static char buffer[20*1024*1024];
    static size_t length = 0;
    static size_t pos = 0;

    if (length == 0) {
        FILE *fp = fopen("input.h264", "rb");
        if (!fp) {
            perror("fopen");
            return 0;
        }

        length = fread(buffer, 1, 20*1024*1024, fp);
        fclose(fp);
    }

    if (pos == length) return 0;

    size_t start_pos = pos;
    pos += 4;

    char flag = 4;
    while (pos < length) {
        if (buffer[pos] == 0) {
            flag >>= 1;
        } else if (buffer[pos] == 1 && flag == 0) {
            pos -= 3;
            break;
        } else {
            flag = 4;
        }
        pos++;
    }

    memcpy(start, &buffer[start_pos], pos - start_pos);
    return pos - start_pos;
}

int main() {
    int fd = open(VIDEO_DEV, O_RDWR|O_NONBLOCK);
    if (fd < 0) {
        perror("open");
        return 1;
    }

    // 设置 OUTPUT（压缩流）格式
    struct v4l2_format fmt = {0};
    fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    fmt.fmt.pix_mp.width = WIDTH;
    fmt.fmt.pix_mp.height = HEIGHT;
    fmt.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
    fmt.fmt.pix_mp.num_planes = 1;
    xioctl(fd, VIDIOC_S_FMT, &fmt);

    // 请求 OUTPUT buffer
    struct v4l2_requestbuffers req = {
        .count = 1,
        .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
        .memory = V4L2_MEMORY_MMAP
    };
    xioctl(fd, VIDIOC_REQBUFS, &req);

    // mmap output buffers
    struct v4l2_buffer output_buf = {
        .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
        .memory = V4L2_MEMORY_MMAP,
        .index = 0
    };
    struct v4l2_plane output_planes[1];

    output_buf.length = 1;
    output_buf.m.planes = output_planes;
    xioctl(fd, VIDIOC_QUERYBUF, &output_buf);

    void *output_start = mmap(NULL, output_buf.m.planes[0].length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, output_buf.m.planes[0].m.mem_offset);

    // 启动 OUTPUT 流
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
    xioctl(fd, VIDIOC_STREAMON, &type);

    // 设置 CAPTURE（解码帧）格式
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    fmt.fmt.pix_mp.width = WIDTH;
    fmt.fmt.pix_mp.height = HEIGHT;
    fmt.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
    fmt.fmt.pix_mp.num_planes = 1;
    xioctl(fd, VIDIOC_S_FMT, &fmt);

    // 请求 CAPTURE buffer
    struct v4l2_requestbuffers req2 = {
        .count = BUFFER_COUNT,
        .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
        .memory = V4L2_MEMORY_MMAP
    };
    xioctl(fd, VIDIOC_REQBUFS, &req2);

    // mmap capture buffers
    struct buffer capture_buffers[BUFFER_COUNT];
    for (int i = 0; i < BUFFER_COUNT; i++) {
        struct v4l2_buffer buf = {
            .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
            .memory = V4L2_MEMORY_DMABUF,
            .index = i
        };
        struct v4l2_plane planes[1];
        buf.length = 1;
        buf.m.planes = planes;
        xioctl(fd, VIDIOC_QUERYBUF, &buf);
        capture_buffers[i].length = buf.m.planes[0].length;
        capture_buffers[i].start = mmap(NULL, buf.m.planes[0].length, PROT_READ | PROT_WRITE,
                                            MAP_SHARED, fd, buf.m.planes[0].m.mem_offset);
        xioctl(fd, VIDIOC_QBUF, &buf);
    }

    // 启动 CAPTURE 流
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    xioctl(fd, VIDIOC_STREAMON, &type);

    // 准备 SDL2 显示
    initDisplay();

    // 发送数据到解码器并显示输出帧
    while (1) {
        // 读取一帧数据（这里简单假设每次读取 4KB）
        output_planes[0].bytesused = getNalu(output_start);
        if (output_planes[0].bytesused <= 0) break;

        // 提交 H.264 数据
        if (ioctl(fd, VIDIOC_QBUF, &output_buf) < 0) {
            perror("VIDIOC_QBUF (output)");
            continue;
        }

        // 获取解码帧
        struct v4l2_buffer cbuf = {0};
        struct v4l2_plane cplanes[1];
        cbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
        cbuf.memory = V4L2_MEMORY_MMAP;
        cbuf.m.planes = cplanes;
        cbuf.length = 1;

        while (ioctl(fd, VIDIOC_DQBUF, &cbuf) == 0) {
            // 将 NV12 数据显示
            display(capture_buffers[cbuf.index].start);

            // 重新放入 buffer
            xioctl(fd, VIDIOC_QBUF, &cbuf);

            SDL_Delay(5); // 简单限速
        }

        // dequeue output buffer
        while (ioctl(fd, VIDIOC_DQBUF, &output_buf) < 0) {
            SDL_Delay(5); // 简单限速
        }

        SDL_PollEvent(NULL); // 允许关闭窗口
        SDL_Delay(5); // 简单限速
    }

    close(fd);
    destroyDisplay();
    return 0;
}
