#include "video_processor.h"
#include <iostream>
#include <iomanip>
#include <cuda_runtime.h>

std::string avErrorString(int errnum) {
    char errbuf[AV_ERROR_MAX_STRING_SIZE];
    av_strerror(errnum, errbuf, sizeof(errbuf));
    return std::string(errbuf);
}

VideoProcessor::VideoProcessor() 
    : format_ctx_(nullptr), codec_ctx_(nullptr), av_frame_(nullptr), 
      hw_frame_(nullptr), sw_frame_(nullptr), rgb_frame_(nullptr),  sws_ctx_(nullptr), 
      video_stream_index_(-1), current_frame_(0),
      hw_device_ctx_(nullptr), hw_pix_fmt_(AV_PIX_FMT_NONE) {
    
    av_frame_ = av_frame_alloc();
    hw_frame_ = av_frame_alloc();
    sw_frame_ = av_frame_alloc();
    rgb_frame_ = av_frame_alloc();
    
    static bool ffmpeg_initialized = false;
    if (!ffmpeg_initialized) {
        avformat_network_init();
        ffmpeg_initialized = true;
    }
}

VideoProcessor::~VideoProcessor() {
    close();
    
    if (av_frame_) av_frame_free(&av_frame_);
    if (hw_frame_) av_frame_free(&hw_frame_);
    if (sw_frame_) av_frame_free(&sw_frame_);
    if (rgb_frame_) av_frame_free(&rgb_frame_); 
}

bool VideoProcessor::initDecoder() {
    AVStream* video_stream = format_ctx_->streams[video_stream_index_];
    const AVCodec* codec = avcodec_find_decoder(video_stream->codecpar->codec_id);
    if (!codec) {
        std::cerr << "Error: Unsupported codec" << std::endl;
        return false;
    }
    
    codec_ctx_ = avcodec_alloc_context3(codec);
    if (!codec_ctx_) {
        std::cerr << "Error: Cannot allocate codec context" << std::endl;
        return false;
    }
    
    if (avcodec_parameters_to_context(codec_ctx_, video_stream->codecpar) < 0) {
        std::cerr << "Error: Cannot copy codec parameters" << std::endl;
        return false;
    }
    
    if (avcodec_open2(codec_ctx_, codec, nullptr) < 0) {
        std::cerr << "Error: Cannot open codec" << std::endl;
        return false;
    }
    
    std::cout << "Software decoder initialized: " << codec->name << std::endl;
    return true;
}

void VideoProcessor::printVideoInfo() const {
    std::cout << "=== Video Information ===" << std::endl;
    std::cout << "Resolution: " << video_info_.width << "x" << video_info_.height << std::endl;
    std::cout << "FPS: " << video_info_.fps << std::endl;
    std::cout << "Total Frames: " << (video_info_.total_frames > 0 ? std::to_string(video_info_.total_frames) : "Unknown") << std::endl;
    std::cout << "Codec: " << video_info_.codec_name << std::endl;
    std::cout << "Pixel Format: " << av_get_pix_fmt_name(video_info_.pixel_format) << std::endl;
    std::cout << "Pixel Format Code : " << video_info_.pixel_format << " yuv420p code " <<  AV_PIX_FMT_YUV420P << std::endl;
    std::cout << "Hardware Acceleration: " << (video_info_.hardware_acceleration ? "Yes" : "No") << std::endl;
    std::cout << "=========================" << std::endl;
}


bool VideoProcessor::initHardwareDevice() {
    // 尝试使用CUDA硬件加速
    int ret = av_hwdevice_ctx_create(&hw_device_ctx_, AV_HWDEVICE_TYPE_CUDA, NULL, NULL, 0);
    if (ret < 0) {
        std::cerr << "Failed to create CUDA hardware device context: " << avErrorString(ret) << std::endl;
        return false;
    }
    
    std::cout << "CUDA hardware device context created successfully" << std::endl;
    return true;
}

bool VideoProcessor::initHardwareDecoder() {
    if (!initHardwareDevice()) {
        return false;
    }
    
    AVStream* video_stream = format_ctx_->streams[video_stream_index_];
    const AVCodec* codec = avcodec_find_decoder(video_stream->codecpar->codec_id);
    if (!codec) {
        std::cerr << "Error: Unsupported codec" << std::endl;
        return false;
    }
    
    // 简化硬件格式选择
    hw_pix_fmt_ = AV_PIX_FMT_CUDA;
    
    codec_ctx_ = avcodec_alloc_context3(codec);
    if (!codec_ctx_) {
        std::cerr << "Error: Cannot allocate codec context" << std::endl;
        return false;
    }
    
    if (avcodec_parameters_to_context(codec_ctx_, video_stream->codecpar) < 0) {
        std::cerr << "Error: Cannot copy codec parameters" << std::endl;
        return false;
    }
    
    codec_ctx_->hw_device_ctx = av_buffer_ref(hw_device_ctx_);
    if (!codec_ctx_->hw_device_ctx) {
        std::cerr << "Error: Failed to set hardware device context" << std::endl;
        return false;
    }
    
    // 设置硬件像素格式
    codec_ctx_->get_format = [](AVCodecContext* ctx, const enum AVPixelFormat* pix_fmts) {
        std::cout << "Available pixel formats: ";
        for (const enum AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
            if (*p == AV_PIX_FMT_CUDA) {
                std::cout << "\nSelected CUDA hardware pixel format" << std::endl;
                return *p;
            }
        }
        std::cerr << "Failed to get CUDA hardware surface format" << std::endl;
        return pix_fmts[0];
        //return AV_PIX_FMT_NONE;
    };
    
    if (avcodec_open2(codec_ctx_, codec, nullptr) < 0) {
        std::cerr << "Error: Cannot open hardware codec" << std::endl;
        return false;
    }
    
    video_info_.hardware_acceleration = true;
    std::cout << "Hardware decoder initialized successfully" << std::endl;
    
    // 打印解码器信息用于调试
    std::cout << "Decoder info: " << codec->name 
              << ", width: " << codec_ctx_->width 
              << ", height: " << codec_ctx_->height 
              << ", pix_fmt: " << av_get_pix_fmt_name(codec_ctx_->pix_fmt) << std::endl;
    
    return true;
} 

bool VideoProcessor::transferFrameFromGPU(AVFrame* hw_frame, AVFrame* sw_frame) {
    if (!hw_frame || !sw_frame) {
        std::cerr << "Error: Frames not allocated" << std::endl;
        return false;
    }

    // 为软件帧分配内存
    if (sw_frame->data[0] == nullptr) {
        // 先释放可能存在的旧内存
        av_frame_unref(sw_frame);

        sw_frame->format = AV_PIX_FMT_NV12; // 转换为NV12格式
        sw_frame->width = hw_frame->width;
        sw_frame->height = hw_frame->height;
        if (av_frame_get_buffer(sw_frame, 32) < 0) {
            std::cerr << "Error: Cannot allocate software frame buffer" << std::endl;
            return false;
        }
        std::cout << "Software frame buffer allocated successfully" << std::endl;
    }
    
    int ret = av_hwframe_transfer_data(sw_frame, hw_frame, 0);
    if (ret < 0) {
        std::cerr << "Error transferring frame from GPU: " << avErrorString(ret) << std::endl;
        return false;
    }
    //std::cout << "Frame transferred successfully from GPU to system memory" << std::endl;
    return true;
}

bool VideoProcessor::processCUDATextureFrame(AVFrame* frame, unsigned char* y_plane, unsigned char* uv_plane, int& stride_y, int& stride_uv) {
    if (!frame || !sw_frame_) {
        std::cerr << "Error: Invalid frame pointers in processCUDATextureFrame" << std::endl;
        return false;
    }

    //std::cout << "Transferring CUDA frame to system memory..." << std::endl;
    
    // 首先将CUDA帧传输到系统内存
    if (!transferFrameFromGPU(frame, sw_frame_)) {
        std::cerr << "Failed to transfer frame from GPU" << std::endl;
        return false;
    }
    
    // 现在处理系统内存中的帧
    AVPixelFormat pixel_format = static_cast<AVPixelFormat>(sw_frame_->format);
    //std::cout << "Transferred frame format: " << av_get_pix_fmt_name(pixel_format) << std::endl;
    //std::cout << "Frame dimensions: " << sw_frame_->width << "x" << sw_frame_->height << std::endl;

    if (!sw_frame_->data[0]) {
        std::cerr << "Error: sw_frame_->data[0] is null pointer" << std::endl;
        return false;
    }
    
    if (pixel_format == AV_PIX_FMT_NV12) {
        stride_y = sw_frame_->linesize[0];
        stride_uv = sw_frame_->linesize[1];
        
        //std::cout << "NV12 format - stride_y: " << stride_y << ", stride_uv: " << stride_uv << std::endl;
        //std::cout << "Data pointers - Y: " << (void*)sw_frame_->data[0] << ", UV: " << (void*)sw_frame_->data[1] << std::endl;
        //std::cout << "Destination pointers - y_plane: " << (void*)y_plane << ", uv_plane: " << (void*)uv_plane << std::endl;
        
        // 复制Y平面
        if (y_plane) {
            int y_size = sw_frame_->height * stride_y;
            //std::cout << "Copying Y plane, size: " << y_size << " bytes" << std::endl;

            // 检查源数据是否有效
            if (!sw_frame_->data[0]) {
                std::cerr << "Error: Source Y plane data is null" << std::endl;
                return false;
            }
            
            if (y_size > 0) {
                // 使用 cudaMemcpy 从 CPU (sw_frame_->data[0]) 拷贝到 GPU (y_plane)
                cudaError_t err = cudaMemcpy(y_plane, sw_frame_->data[0], y_size, cudaMemcpyHostToDevice);
                if (err != cudaSuccess) {
                    std::cerr << "Failed to copy Y plane to GPU: " << cudaGetErrorString(err) << std::endl;
                    return false;
                }
                //std::cout << "Y plane copied successfully" << std::endl;
            } else {
                std::cerr << "Invalid Y plane size: " << y_size << std::endl;
                return false;
            }
        } else {
            std::cerr << "Warning: y_plane destination pointer is null" << std::endl;
        }
        
        // 复制UV平面
        if (uv_plane && sw_frame_->data[1]) {
            int uv_size = (sw_frame_->height / 2) * stride_uv;
            //std::cout << "Copying UV plane, size: " << uv_size << " bytes" << std::endl;
            
            if (uv_size > 0) {
                cudaError_t err = cudaMemcpy(uv_plane, sw_frame_->data[1], uv_size, cudaMemcpyHostToDevice);
                if (err != cudaSuccess) {
                    std::cerr << "Failed to copy UV plane to GPU: " << cudaGetErrorString(err) << std::endl;
                    return false;
                }
                //std::cout << "UV plane copied successfully" << std::endl;
            } else {
                std::cerr << "Invalid UV plane size: " << uv_size << std::endl;
                return false;
            }
        } else {
            std::cerr << "Warning: UV plane destination pointer is null or source data unavailable" << std::endl;
        }
        
        //std::cout << "CUDA frame processing completed successfully" << std::endl;
        return true;
        
    } else if (pixel_format == AV_PIX_FMT_YUV420P || pixel_format == AV_PIX_FMT_YUVJ420P) {
        stride_y = sw_frame_->linesize[0];
        stride_uv = sw_frame_->linesize[1] * 2; // UV交错，stride加倍
        
        //std::cout << "YUV420P format - stride_y: " << stride_y << ", stride_uv: " << stride_uv << std::endl;
        
        // 复制Y平面
        if (y_plane) {
            int y_size = sw_frame_->height * stride_y;
            //std::cout << "Copying Y plane, size: " << y_size << " bytes" << std::endl;
            
            if (y_size > 0) {
                cudaError_t err = cudaMemcpy(y_plane, sw_frame_->data[0], y_size, cudaMemcpyHostToDevice);
                if (err != cudaSuccess) {
                    std::cerr << "Failed to copy Y plane to GPU: " << cudaGetErrorString(err) << std::endl;
                    return false;
                }
                //std::cout << "Y plane copied successfully" << std::endl;
            } else {
                std::cerr << "Invalid Y plane size: " << y_size << std::endl;
                return false;
            }
        }
        
        // 交错UV平面
        if (uv_plane && sw_frame_->data[1] && sw_frame_->data[2]) {
            unsigned char* u_plane = sw_frame_->data[1];
            unsigned char* v_plane = sw_frame_->data[2];
            int uv_height = sw_frame_->height / 2;
            int uv_width = sw_frame_->width / 2;
            int u_stride = sw_frame_->linesize[1];
            int v_stride = sw_frame_->linesize[2];
            
            //std::cout << "Interleaving UV planes - height: " << uv_height << ", width: " << uv_width 
            //          << ", u_stride: " << u_stride << ", v_stride: " << v_stride << std::endl;

            // 首先在CPU内存中创建交错的UV数据
            std::vector<unsigned char> interleaved_uv(uv_height * stride_uv);
            
            for (int i = 0; i < uv_height; i++) {
                for (int j = 0; j < uv_width; j++) {
                    int idx = i * stride_uv + j * 2;
                    uv_plane[idx] = u_plane[i * u_stride + j];
                    uv_plane[idx + 1] = v_plane[i * v_stride + j];
                }
            }

            // 然后将交错的UV数据拷贝到GPU
            int uv_size = uv_height * stride_uv;
            cudaError_t err = cudaMemcpy(uv_plane, interleaved_uv.data(), uv_size, cudaMemcpyHostToDevice);
            if (err != cudaSuccess) {
                std::cerr << "Failed to copy interleaved UV plane to GPU: " << cudaGetErrorString(err) << std::endl;
                return false;
            } 
            //std::cout << "UV planes interleaved and copied to GPU successfully" << std::endl;
        }
        
        //std::cout << "CUDA frame processing completed successfully" << std::endl;
        return true;
        
    } else {
        std::cerr << "Unsupported software pixel format after transfer: " << av_get_pix_fmt_name(pixel_format) << std::endl;
        return false;
    }
} 

bool VideoProcessor::readFrameGPU(unsigned char* y_plane, unsigned char* uv_plane, int& stride_y, int& stride_uv) {
    if (!format_ctx_ || !codec_ctx_) {
        std::cerr << "Error: Video not opened" << std::endl;
        return false;
    }

    AVPacket packet;
    int ret = 0;
    
    while ((ret = av_read_frame(format_ctx_, &packet)) >= 0) {
        if (packet.stream_index == video_stream_index_) {
            // 发送包到解码器
            ret = avcodec_send_packet(codec_ctx_, &packet);
            if (ret < 0) {
                std::cerr << "Error sending packet to decoder: " << avErrorString(ret) << std::endl;
                av_packet_unref(&packet);
                continue;
            }
            
            // 接收解码后的帧
            ret = avcodec_receive_frame(codec_ctx_, av_frame_);
            if (ret == 0) {
                AVPixelFormat pixel_format = static_cast<AVPixelFormat>(av_frame_->format);
                //std::cout << "Decoded frame format: " << av_get_pix_fmt_name(pixel_format) << std::endl;
                
                if (pixel_format == AV_PIX_FMT_CUDA) {
                    // 处理CUDA纹理帧
                    //std::cout << "Processing CUDA frame..." << std::endl;
                    if (processCUDATextureFrame(av_frame_, y_plane, uv_plane, stride_y, stride_uv)) {
                        current_frame_++;
                        av_packet_unref(&packet);
                        
                        if (current_frame_ % 100 == 0) {
                            std::cout << "Processed CUDA frame " << current_frame_ << std::endl;
                        }
                        return true;
                    } else {
                        std::cerr << "Failed to process CUDA texture frame" << std::endl;
                    }
                } else {
                    // 直接处理软件帧
                    //std::cout << "Processing software frame format: " << av_get_pix_fmt_name(pixel_format) << std::endl;
                    
                    if (pixel_format == AV_PIX_FMT_NV12 || pixel_format == AV_PIX_FMT_YUV420P || 
                        pixel_format == AV_PIX_FMT_YUVJ420P) {
                        
                        stride_y = av_frame_->linesize[0];
                        
                        if (pixel_format == AV_PIX_FMT_NV12) {
                            stride_uv = av_frame_->linesize[1];
                            
                            // 复制Y平面
                            if (y_plane) {
                                int y_size = av_frame_->height * stride_y;
                                memcpy(y_plane, av_frame_->data[0], y_size);
                            }
                            
                            // 复制UV平面
                            if (uv_plane && av_frame_->data[1]) {
                                int uv_size = (av_frame_->height / 2) * stride_uv;
                                memcpy(uv_plane, av_frame_->data[1], uv_size);
                            }
                        } else {
                            // YUV420P/YUVJ420P格式：需要转换为NV12格式
                            stride_uv = av_frame_->linesize[1] * 2;
                            
                            // 复制Y平面
                            if (y_plane) {
                                int y_size = av_frame_->height * stride_y;
                                memcpy(y_plane, av_frame_->data[0], y_size);
                            }
                            
                            // 交错UV平面
                            if (uv_plane && av_frame_->data[1] && av_frame_->data[2]) {
                                unsigned char* u_plane = av_frame_->data[1];
                                unsigned char* v_plane = av_frame_->data[2];
                                int uv_height = av_frame_->height / 2;
                                int uv_width = av_frame_->width / 2;
                                int u_stride = av_frame_->linesize[1];
                                int v_stride = av_frame_->linesize[2];
                                
                                for (int i = 0; i < uv_height; i++) {
                                    for (int j = 0; j < uv_width; j++) {
                                        int idx = i * stride_uv + j * 2;
                                        uv_plane[idx] = u_plane[i * u_stride + j];
                                        uv_plane[idx + 1] = v_plane[i * v_stride + j];
                                    }
                                }
                            }
                        }
                        
                        current_frame_++;
                        av_packet_unref(&packet);
                        
                        if (current_frame_ % 100 == 0) {
                            std::cout << "Processed software frame " << current_frame_ 
                                      << ", format: " << av_get_pix_fmt_name(pixel_format)
                                      << std::endl;
                        }
                        return true;
                    } else {
                        std::cerr << "Unsupported pixel format for GPU processing: " 
                                  << av_get_pix_fmt_name(pixel_format) << std::endl;
                        continue;
                    }
                }
            } else if (ret == AVERROR(EAGAIN)) {
                // 需要更多数据
                continue;
            } else {
                std::cerr << "Error receiving frame from decoder: " << avErrorString(ret) << std::endl;
            }
        }
        av_packet_unref(&packet);
    }
    
    // 刷新解码器
    if (ret == AVERROR_EOF) {
        std::cout << "End of file reached, flushing decoder..." << std::endl;
        avcodec_send_packet(codec_ctx_, nullptr);
        
        while (avcodec_receive_frame(codec_ctx_, av_frame_) == 0) {
            AVPixelFormat pixel_format = static_cast<AVPixelFormat>(av_frame_->format);
            
            if (pixel_format == AV_PIX_FMT_CUDA) {
                if (processCUDATextureFrame(av_frame_, y_plane, uv_plane, stride_y, stride_uv)) {
                    current_frame_++;
                    std::cout << "Flushed CUDA frame " << current_frame_ << std::endl;
                    return true;
                }
            } else {
                // 处理软件帧刷新...
                current_frame_++;
                std::cout << "Flushed software frame " << current_frame_ << std::endl;
                return true;
            }
        }
    }
    
    return false;
} 

bool VideoProcessor::open(const std::string& video_path) {
    // 打开视频文件
    if (avformat_open_input(&format_ctx_, video_path.c_str(), nullptr, nullptr) != 0) {
        std::cerr << "Error: Cannot open video file: " << video_path << std::endl;
        return false;
    }
    
    // 获取流信息
    if (avformat_find_stream_info(format_ctx_, nullptr) < 0) {
        std::cerr << "Error: Cannot find stream information" << std::endl;
        return false;
    }
    
    // 查找视频流
    video_stream_index_ = av_find_best_stream(format_ctx_, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
    if (video_stream_index_ < 0) {
        std::cerr << "Error: Cannot find video stream" << std::endl;
        close();
        return false;
    }

    // 尝试使用硬件解码器
    if (initHardwareDecoder()) {
        std::cout << "Using CUDA hardware decoder" << std::endl;
    } else {
        std::cout << "Falling back to software decoder" << std::endl;
        if (!initDecoder()) {
            close();
            return false;
        }
        video_info_.hardware_acceleration = false;
    }
    
    /*
    // 尝试初始化硬件解码器，失败则使用软件解码
    if (!initHardwareDecoder()) {
        std::cout << "Falling back to software decoder" << std::endl;
        if (!initDecoder()) {
            close();
            return false;
        }
        video_info_.hardware_acceleration = false;
    }
    */ 
    // 获取视频信息
    AVStream* video_stream = format_ctx_->streams[video_stream_index_];
    video_info_.width = codec_ctx_->width;
    video_info_.height = codec_ctx_->height;
    video_info_.fps = static_cast<int>(av_q2d(video_stream->avg_frame_rate));
    if (video_stream->nb_frames > 0) {
        video_info_.total_frames = static_cast<int>(video_stream->nb_frames);
    } else {
        // 如果无法直接获取帧数，通过时长和帧率计算
        if (video_stream->duration != AV_NOPTS_VALUE && video_info_.fps > 0) {
            double duration_seconds = video_stream->duration * av_q2d(video_stream->time_base);
            video_info_.total_frames = static_cast<int>(duration_seconds * video_info_.fps);
        } else {
            // 如果还是无法获取，设置为0，后续动态计算
            video_info_.total_frames = 0;
        }
    }
    video_info_.pixel_format = static_cast<AVPixelFormat>(video_stream->codecpar->format);
    video_info_.codec_name = avcodec_get_name(codec_ctx_->codec_id);

    if (video_info_.fps <= 0) {
        video_info_.fps = 30;
    }
    
    printVideoInfo();
    return true;
}

bool VideoProcessor::readFrame(cv::Mat& frame) {
    if (!format_ctx_ || !codec_ctx_) {
        std::cerr << "Error: Video not opened" << std::endl;
        return false;
    }

    AVPacket packet;
    int ret = 0;
    
    while ((ret = av_read_frame(format_ctx_, &packet)) >= 0) {
        if (packet.stream_index == video_stream_index_) {
            // 发送包到解码器
            if (avcodec_send_packet(codec_ctx_, &packet) < 0) {
                av_packet_unref(&packet);
                continue;
            }
            
            // 接收解码后的帧
            AVFrame* decoded_frame = video_info_.hardware_acceleration ? hw_frame_ : av_frame_;
            ret = avcodec_receive_frame(codec_ctx_, decoded_frame);
            if (ret == 0) {
                // 如果是硬件解码，需要传输到系统内存
                if (video_info_.hardware_acceleration) {
                    if (!transferFrameFromGPU(decoded_frame, sw_frame_)) {
                        av_packet_unref(&packet);
                        continue;
                    }
                    decoded_frame = sw_frame_;
                }
                
                // 初始化转换器（如果需要）
                if (!sws_ctx_) {
                    sws_ctx_ = sws_getContext(decoded_frame->width, decoded_frame->height, 
                                            static_cast<AVPixelFormat>(decoded_frame->format),
                                            decoded_frame->width, decoded_frame->height,
                                            AV_PIX_FMT_BGR24, SWS_BILINEAR,
                                            nullptr, nullptr, nullptr);
                    if (!sws_ctx_) {
                        std::cerr << "Error: Cannot create scaler" << std::endl;
                        av_packet_unref(&packet);
                        return false;
                    }
                }
                
                // 分配RGB帧内存
                if (!rgb_frame_->data[0]) {
                    rgb_frame_->format = AV_PIX_FMT_BGR24;
                    rgb_frame_->width = decoded_frame->width;
                    rgb_frame_->height = decoded_frame->height;
                    if (av_frame_get_buffer(rgb_frame_, 32) < 0) {
                        std::cerr << "Error: Cannot allocate RGB frame" << std::endl;
                        av_packet_unref(&packet);
                        return false;
                    }
                }
                
                // 转换YUV到BGR
                sws_scale(sws_ctx_, decoded_frame->data, decoded_frame->linesize, 0,
                         decoded_frame->height, rgb_frame_->data, rgb_frame_->linesize);
                
                // 创建OpenCV Mat
                frame = cv::Mat(rgb_frame_->height, rgb_frame_->width, CV_8UC3, 
                               rgb_frame_->data[0], rgb_frame_->linesize[0]);
                
                current_frame_++;
                av_packet_unref(&packet);
                return true;
            } else if (ret == AVERROR(EAGAIN)) {
                // 需要更多数据
                continue;
            } else {
                // 解码错误
                av_packet_unref(&packet);
                continue;
            }
        }
        av_packet_unref(&packet);
    }

    // 刷新解码器缓冲区
    if (ret == AVERROR_EOF) {
        std::cout << "End of file reached, flushing decoder..." << std::endl;
        avcodec_send_packet(codec_ctx_, nullptr); // 发送空包刷新
        
        while (avcodec_receive_frame(codec_ctx_, video_info_.hardware_acceleration ? hw_frame_ : av_frame_) == 0) {
            AVFrame* decoded_frame = video_info_.hardware_acceleration ? hw_frame_ : av_frame_;
            
            if (video_info_.hardware_acceleration) {
                if (!transferFrameFromGPU(decoded_frame, sw_frame_)) {
                    continue;
                }
                decoded_frame = sw_frame_;
            }
            
            if (!sws_ctx_) {
                sws_ctx_ = sws_getContext(
                    decoded_frame->width, decoded_frame->height, 
                    static_cast<AVPixelFormat>(decoded_frame->format),
                    decoded_frame->width, decoded_frame->height,
                    AV_PIX_FMT_BGR24, 
                    SWS_BILINEAR,
                    nullptr, nullptr, nullptr
                );
                if (!sws_ctx_) {
                    std::cerr << "Error: Cannot create scaler context" << std::endl;
                    return false;
                }
                
                if (!rgb_frame_->data[0]) {
                    rgb_frame_->format = AV_PIX_FMT_BGR24;
                    rgb_frame_->width = decoded_frame->width;
                    rgb_frame_->height = decoded_frame->height;
                    if (av_frame_get_buffer(rgb_frame_, 32) < 0) {
                        std::cerr << "Error: Cannot allocate RGB frame buffer" << std::endl;
                        return false;
                    }
                }
            }
            
            sws_scale(sws_ctx_, 
                     decoded_frame->data, decoded_frame->linesize, 0,
                     decoded_frame->height, 
                     rgb_frame_->data, rgb_frame_->linesize);
            
            frame = cv::Mat(rgb_frame_->height, rgb_frame_->width, CV_8UC3, 
                           rgb_frame_->data[0], rgb_frame_->linesize[0]);
            
            current_frame_++;
            std::cout << "Flushed frame " << current_frame_ << std::endl;
            return true;
        }
    }
    
    return false;
}

bool VideoProcessor::readFrameYUV(std::vector<uint8_t>& y_plane, std::vector<uint8_t>& u_plane, std::vector<uint8_t>& v_plane) {
    if (!format_ctx_ || !codec_ctx_) {
        std::cerr << "Error: Video not opened" << std::endl;
        return false;
    }

    AVPacket packet;
    int ret = 0;
    
    while ((ret = av_read_frame(format_ctx_, &packet)) >= 0) {
        if (packet.stream_index == video_stream_index_) {
            if (avcodec_send_packet(codec_ctx_, &packet) < 0) {
                av_packet_unref(&packet);
                continue;
            }
            
            AVFrame* decoded_frame = video_info_.hardware_acceleration ? hw_frame_ : av_frame_;
            ret = avcodec_receive_frame(codec_ctx_, decoded_frame);
            if (ret == 0) {
                if (video_info_.hardware_acceleration) {
                    if (!transferFrameFromGPU(decoded_frame, sw_frame_)) {
                        av_packet_unref(&packet);
                        continue;
                    }
                    decoded_frame = sw_frame_;
                }
                
                // 提取Y平面
                int y_size = decoded_frame->width * decoded_frame->height;
                y_plane.resize(y_size);
                memcpy(y_plane.data(), decoded_frame->data[0], y_size);
                
                // 提取UV平面（对于YUV420P）
                int uv_size = (decoded_frame->width / 2) * (decoded_frame->height / 2);
                u_plane.resize(uv_size);
                v_plane.resize(uv_size);
                
                memcpy(u_plane.data(), decoded_frame->data[1], uv_size);
                memcpy(v_plane.data(), decoded_frame->data[2], uv_size);                

                current_frame_++;
                av_packet_unref(&packet);
                return true;
            }
        }
        av_packet_unref(&packet);
    }
    
    return false;
}

void VideoProcessor::close() {
    if (sws_ctx_) {
        sws_freeContext(sws_ctx_);
        sws_ctx_ = nullptr;
    }
    
    if (codec_ctx_) {
        avcodec_close(codec_ctx_);
        avcodec_free_context(&codec_ctx_);
        codec_ctx_ = nullptr;
    }
    
    if (format_ctx_) {
        avformat_close_input(&format_ctx_);
        format_ctx_ = nullptr;
    }
    
    if (hw_device_ctx_) {
        av_buffer_unref(&hw_device_ctx_);
        hw_device_ctx_ = nullptr;
    }

    //if (rgb_frame_ && rgb_frame_->data[0]) {
    //    av_freep(&rgb_frame_->data[0]);
    //}
    
    current_frame_ = 0;
    video_stream_index_ = -1;
}
