#ifndef VIDEO_PROCESSOR_H
#define VIDEO_PROCESSOR_H

#include <string>
#include <vector>
#include <opencv2/opencv.hpp>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_cuda.h>
}

struct VideoInfo {
    int width;
    int height;
    int fps;
    int total_frames;
    AVPixelFormat pixel_format;
    std::string codec_name;
    bool hardware_acceleration;
};

class VideoProcessor {
public:
    VideoProcessor();
    ~VideoProcessor();
    
    bool open(const std::string& video_path);
    void close();
    
    bool readFrame(cv::Mat& frame);
    bool readFrameYUV(std::vector<uint8_t>& y_plane, std::vector<uint8_t>& u_plane, std::vector<uint8_t>& v_plane);
    
    // GPU解码相关方法
    bool readFrameGPU(unsigned char* y_plane, unsigned char* uv_plane, int& stride_y, int& stride_uv);
    bool initHardwareDecoder();
    
    const VideoInfo& getVideoInfo() const { return video_info_; }
    int getCurrentFrame() const { return current_frame_; }
    void printVideoInfo() const;
    
private:
    AVFormatContext* format_ctx_;
    AVCodecContext* codec_ctx_;
    AVFrame* av_frame_;
    AVFrame* hw_frame_;
    AVFrame* sw_frame_;
    AVFrame* rgb_frame_;
    struct SwsContext* sws_ctx_;
    int video_stream_index_;
    VideoInfo video_info_;
    int current_frame_;
    
    // 硬件解码相关
    AVBufferRef* hw_device_ctx_;
    enum AVPixelFormat hw_pix_fmt_;
    
    bool initDecoder();
    bool initHardwareDevice();
    bool transferFrameFromGPU(AVFrame* hw_frame, AVFrame* sw_frame);
    
    // 新增：处理CUDA帧的方法
    bool processCUDATextureFrame(AVFrame* frame, unsigned char* y_plane, unsigned char* uv_plane, int& stride_y, int& stride_uv);
};

#endif // VIDEO_PROCESSOR_H
