#![allow(unused_assignments)]

use sdl2::keyboard::Keycode::S;
use std::ffi::c_void;
use std::ptr::{null, null_mut};
use std::slice;
use std::slice::from_mut;
use std::time::Instant;

pub struct DecoderData {
    pub data: Vec<Vec<u8>>,
    pub line_size: Vec<i32>,
    pub pts: i64,
    pub end: bool,
    pub format: ffmpeg_sys_next::AVPixelFormat,
    pub stream_index: i32,
}

impl DecoderData {
    pub fn new() -> Self {
        return Self {
            data: vec![],
            line_size: vec![],
            pts: 0,
            end: false,
            format: ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P,
            stream_index: -1,
        };
    }
}

unsafe impl Send for DecoderData {}

pub struct FramePacket {
    pub pts: i64,
    pub dts: i64,
    pub data: Vec<u8>,
    pub size: i32,
    pub stream_index: i32,
    pub flags: i32,
    pub buffer: Vec<u8>,
    pub buffer_size: i32,
    pub side_data: Vec<u8>,
    pub side_data_elems: i32,
    pub duration: i64,
    pub pos: i64,
}

impl Clone for FramePacket {
    fn clone(&self) -> Self {
        return Self {
            pts: self.pts.clone(),
            dts: self.dts.clone(),
            data: self.data.clone(),
            size: self.size.clone(),
            stream_index: self.stream_index.clone(),
            flags: self.flags.clone(),
            buffer: self.buffer.clone(),
            buffer_size: self.buffer_size.clone(),
            side_data: self.side_data.clone(),
            side_data_elems: self.side_data_elems.clone(),
            duration: self.duration.clone(),
            pos: self.pos.clone(),
        };
    }
}

impl FramePacket {
    pub fn new() -> Self {
        return Self {
            pts: 0,
            dts: 0,
            data: Vec::new(),
            size: 0,
            stream_index: -1,
            flags: 0,
            buffer: Vec::new(),
            buffer_size: 0,
            side_data: vec![],
            side_data_elems: 0,
            duration: 0,
            pos: 0,
        };
    }
}

#[derive(Debug)]
pub struct VideoContext {
    pub width: i32,
    pub height: i32,
    pub fps: i32,
    pub time_base: ffmpeg_sys_next::AVRational,
    pub sws_scale_ctx: *mut ffmpeg_sys_next::SwsContext,
    pub format_context: *mut ffmpeg_sys_next::AVFormatContext,
    pub video_codec_parameters: *mut ffmpeg_sys_next::AVCodecParameters,
    pub video_codec: *mut ffmpeg_sys_next::AVCodec,
    pub video_stream_index: i32,
    pub codec_context: *mut ffmpeg_sys_next::AVCodecContext,
    pub frame: *mut ffmpeg_sys_next::AVFrame,
    pub hw_frame: *mut ffmpeg_sys_next::AVFrame,
    pub packet: *mut ffmpeg_sys_next::AVPacket,
    pub hw_config: *const ffmpeg_sys_next::AVCodecHWConfig,
    pub isCuda: bool,
    pub format: ffmpeg_sys_next::AVPixelFormat,
    pub isNvidiaDecoder: bool,
}

impl VideoContext {
    pub fn new() -> Self {
        return Self {
            width: 0,
            height: 0,
            fps: 0,
            time_base: ffmpeg_sys_next::AVRational { num: 0, den: 0 },
            sws_scale_ctx: std::ptr::null_mut(),
            format_context: std::ptr::null_mut(),
            video_codec_parameters: std::ptr::null_mut(),
            video_codec: std::ptr::null_mut(),
            video_stream_index: 0,
            codec_context: std::ptr::null_mut(),
            frame: std::ptr::null_mut(),
            hw_frame: std::ptr::null_mut(),
            packet: std::ptr::null_mut(),
            hw_config: std::ptr::null_mut(),
            isCuda: false,
            format: ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NONE,
            isNvidiaDecoder: false,
        };
    }
}

unsafe impl Send for VideoContext {}

impl Clone for VideoContext {
    fn clone(&self) -> Self {
        Self {
            width: self.width,
            fps: self.fps,
            height: self.height,
            time_base: self.time_base.clone(),
            sws_scale_ctx: self.sws_scale_ctx.clone(),
            format_context: self.format_context.clone(),
            video_codec_parameters: self.video_codec_parameters.clone(),
            video_codec: self.video_codec.clone(),
            video_stream_index: self.video_stream_index,
            codec_context: self.codec_context.clone(),
            frame: self.frame.clone(),
            hw_frame: self.hw_frame.clone(),
            packet: self.packet.clone(),
            hw_config: self.hw_config.clone(),
            isCuda: self.isCuda.clone(),
            format: self.format,
            isNvidiaDecoder: self.isNvidiaDecoder,
        }
    }
}

#[derive(Debug)]
pub struct AudioContext {
    pub channels: i32,
    pub sample_rate: i32,
    pub out_channel_layout: i64,
    pub out_nb_samples: i32,
    pub out_sample_fmt: ffmpeg_sys_next::AVSampleFormat,
    pub out_sample_rate: i32,
    pub time_base: ffmpeg_sys_next::AVRational,
    pub out_buffer_size: i32,
    pub format_context: *mut ffmpeg_sys_next::AVFormatContext,
    pub audio_codec_parameters: *mut ffmpeg_sys_next::AVCodecParameters,
    pub audio_codec: *mut ffmpeg_sys_next::AVCodec,
    pub audio_stream_index: i32,
    pub codec_context: *mut ffmpeg_sys_next::AVCodecContext,
    pub frame: *mut ffmpeg_sys_next::AVFrame,
    pub packet: *mut ffmpeg_sys_next::AVPacket,
    pub au_convert_ctx: *mut ffmpeg_sys_next::SwrContext,
}

unsafe impl Send for AudioContext {}

impl AudioContext {
    pub fn new() -> Self {
        return Self {
            channels: 2,
            sample_rate: 44100,
            out_channel_layout: 0,
            out_nb_samples: 1024,
            out_sample_fmt: ffmpeg_sys_next::AVSampleFormat::AV_SAMPLE_FMT_S16,
            out_sample_rate: 44100,
            time_base: ffmpeg_sys_next::AVRational { num: 0, den: 0 },
            out_buffer_size: 0,
            format_context: std::ptr::null_mut(),
            audio_codec_parameters: std::ptr::null_mut(),
            audio_codec: std::ptr::null_mut(),
            audio_stream_index: -1,
            codec_context: std::ptr::null_mut(),
            frame: std::ptr::null_mut(),
            packet: std::ptr::null_mut(),
            au_convert_ctx: std::ptr::null_mut(),
        };
    }
}

impl Clone for AudioContext {
    fn clone(&self) -> Self {
        Self {
            channels: self.channels,
            sample_rate: self.sample_rate,
            out_channel_layout: self.out_channel_layout,
            out_nb_samples: self.out_nb_samples,
            out_sample_fmt: self.out_sample_fmt,
            out_sample_rate: self.out_sample_rate,
            time_base: self.time_base,
            out_buffer_size: self.out_buffer_size,
            format_context: self.format_context.clone(),
            audio_codec_parameters: self.audio_codec_parameters.clone(),
            audio_codec: self.audio_codec.clone(),
            audio_stream_index: self.audio_stream_index.clone(),
            codec_context: self.codec_context.clone(),
            frame: self.frame.clone(),
            packet: self.packet.clone(),
            au_convert_ctx: self.au_convert_ctx.clone(),
        }
    }
}

pub unsafe extern "C" fn getHwFormat(
    ctx: *mut ffmpeg_sys_next::AVCodecContext,
    pix_fmts: *const ffmpeg_sys_next::AVPixelFormat,
) -> ffmpeg_sys_next::AVPixelFormat {
    return ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_CUDA;
}

pub unsafe fn load_video(
    video_ctx: &mut VideoContext,
    audio_ctx: &mut AudioContext,
    video_path: &str,
) {
    let mut format_context = ffmpeg_sys_next::avformat_alloc_context();
    assert!(
        !format_context.is_null(),
        "ERROR could not allocate memory for Format Context"
    );
    let cstr_video_path = std::ffi::CString::new(video_path).unwrap();
    if ffmpeg_sys_next::avformat_open_input(
        &mut format_context,
        cstr_video_path.as_ptr(),
        std::ptr::null_mut(),
        std::ptr::null_mut(),
    ) != 0
    {
        assert!(false, "ERROR could not open the file");
    }
    println!(
        "Format {}, duration {} us",
        std::ffi::CStr::from_ptr((*(*format_context).iformat).long_name)
            .to_str()
            .unwrap(),
        (*format_context).duration
    );

    let mut video_codec_parameters: *mut ffmpeg_sys_next::AVCodecParameters = std::ptr::null_mut();
    let mut video_codec: *mut ffmpeg_sys_next::AVCodec = std::ptr::null_mut();
    let mut video_stream_index: i32 = -1;

    let mut audio_codec_parameters: *mut ffmpeg_sys_next::AVCodecParameters = std::ptr::null_mut();
    let mut audio_codec: *mut ffmpeg_sys_next::AVCodec = std::ptr::null_mut();
    let mut audio_stream_index: i32 = -1;

    for i in 0..(*format_context).nb_streams {
        let av_streams = (*format_context).streams.offset(i as isize);
        let local_codec_parameters = (*(*av_streams)).codecpar;
        let local_codec = ffmpeg_sys_next::avcodec_find_decoder((*local_codec_parameters).codec_id);
        if local_codec.is_null() {
            println!("ERROR unsupported codec!");
            continue;
        }

        if (*local_codec_parameters).codec_type == ffmpeg_sys_next::AVMediaType::AVMEDIA_TYPE_VIDEO
        {
            video_stream_index = i as i32;
            video_codec_parameters = local_codec_parameters;
            video_codec = local_codec as *mut ffmpeg_sys_next::AVCodec;
            ffmpeg_sys_next::av_dump_format(
                format_context,
                video_stream_index,
                video_path.as_ptr() as *const i8,
                0,
            );

            (*video_ctx).fps = ffmpeg_sys_next::av_q2d(
                (*(*(*format_context).streams).offset(i as isize)).avg_frame_rate,
            ) as i32;
            (*video_ctx).time_base = (*(*(*format_context).streams).offset(i as isize)).time_base;
            (*video_ctx).width = (*video_codec_parameters).width;
            (*video_ctx).height = (*video_codec_parameters).height;

            println!(
                "Video resolution: {} x {}",
                (*local_codec_parameters).width,
                (*local_codec_parameters).height
            );

            let mut index = 0;
            if (*video_ctx).isNvidiaDecoder {
                loop {
                    let config = ffmpeg_sys_next::avcodec_get_hw_config(local_codec, index);
                    if config == null() {
                        println!("fuweicong node cuda decoder");
                        (*video_ctx).isCuda = false;
                        break;
                    }
                    if (*config).device_type
                        == ffmpeg_sys_next::AVHWDeviceType::AV_HWDEVICE_TYPE_CUDA
                    {
                        println!("fuweicong {:?}", (*config).device_type);
                        println!("fuweicong {:?}", (*config).pix_fmt);
                        (*video_ctx).hw_config = config;
                        (*video_ctx).isCuda = true;
                        break;
                    }
                    index = index + 1;
                }
            }
        } else if (*local_codec_parameters).codec_type
            == ffmpeg_sys_next::AVMediaType::AVMEDIA_TYPE_AUDIO && audio_stream_index == -1
        {
            // TODO audio
            audio_stream_index = i as i32;
            audio_codec_parameters = local_codec_parameters;
            audio_codec = local_codec as *mut ffmpeg_sys_next::AVCodec;
            (*audio_ctx).time_base = (*(*(*format_context).streams).offset(i as isize)).time_base;
            (*audio_ctx).channels = (*local_codec_parameters).channels;
            (*audio_ctx).sample_rate = (*local_codec_parameters).sample_rate;
            (*audio_ctx).out_sample_rate = (*local_codec_parameters).sample_rate;
            (*audio_ctx).out_channel_layout =
                ffmpeg_sys_next::av_get_default_channel_layout((*local_codec_parameters).channels)
                    as i64;
            // (*audio_ctx).out_channel_layout = (*local_codec_parameters).channels as i64;

            audio_ctx.out_buffer_size = ffmpeg_sys_next::av_samples_get_buffer_size(
                std::ptr::null_mut(),
                ffmpeg_sys_next::av_get_channel_layout_nb_channels(
                    audio_ctx.out_channel_layout as u64,
                ),
                audio_ctx.out_nb_samples,
                audio_ctx.out_sample_fmt,
                1,
            ); //计算音频缓冲区大小
            println!("{}", audio_ctx.out_buffer_size);
            println!(
                "Audio channels: {}, sample rate: {}",
                (*local_codec_parameters).channels,
                (*local_codec_parameters).sample_rate
            );
        }
        println!(
            "\tID: {:?}, bitrate: {}",
            (*local_codec).id,
            (*local_codec_parameters).bit_rate
        );
    }

    assert!(
        video_stream_index != -1,
        "File does not contain a video stream!"
    );

    if video_stream_index != -1 {
        let mut codec_context = ffmpeg_sys_next::avcodec_alloc_context3(video_codec);
        assert!(
            !codec_context.is_null(),
            "Failed to allocate memory for AVCodecContext"
        );

        if ffmpeg_sys_next::avcodec_parameters_to_context(codec_context, video_codec_parameters) < 0
        {
            assert!(false, "failed to copy codec params to codec context");
        }

        println!("codec_id {:?}", (*codec_context).codec_id);
        // ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_HEVC
        if ffmpeg_sys_next::avcodec_open2(codec_context, video_codec, std::ptr::null_mut()) < 0 {
            assert!(false, "failed to open codec through avcodec_open2");
        }
        (*video_ctx).format = (*codec_context).pix_fmt;

        if (*video_ctx).isNvidiaDecoder {
            if (*video_ctx).format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NONE {
                (*video_ctx).format =
                    get_video_format(video_path, codec_context, video_stream_index);
            }

            (*video_ctx).isCuda = false;
            if (*codec_context).codec_id == ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_HEVC {
                // hevc yuv444p or yuv420p
                if (*video_ctx).format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV444P
                    || (*video_ctx).format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P
                {
                    (*video_ctx).isCuda = true;
                }
            } else if (*codec_context).codec_id == ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_H264
                || (*codec_context).codec_id == ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_VP9
            {
                if (*video_ctx).format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P {
                    (*video_ctx).isCuda = true;
                }
            }

            if (*video_ctx).isCuda {
                let mut hw_device_ctx: *mut ffmpeg_sys_next::AVBufferRef =
                    ffmpeg_sys_next::av_buffer_alloc(
                        ((*video_ctx).width * (*video_ctx).height * 3) as usize,
                    );

                if ffmpeg_sys_next::av_hwdevice_ctx_create(
                    &mut hw_device_ctx,
                    (*(*video_ctx).hw_config).device_type,
                    std::ptr::null_mut(),
                    std::ptr::null_mut(),
                    0,
                ) < 0
                {
                    println!("Failed to create specified HW device.\n");
                    // 创建硬解设备context失败，不能使用;
                    (*video_ctx).isCuda = false;
                } else {
                    codec_context = ffmpeg_sys_next::avcodec_alloc_context3(video_codec);

                    if ffmpeg_sys_next::avcodec_parameters_to_context(
                        codec_context,
                        video_codec_parameters,
                    ) < 0
                    {
                        assert!(false, "failed to copy codec params to codec context");
                    }

                    (*codec_context).get_format = Some(getHwFormat);
                    (*codec_context).hw_device_ctx = hw_device_ctx;
                    (*codec_context).pix_fmt = (*video_ctx).format;
                    println!("codec_context {:?}", (*codec_context).pix_fmt);
                    if ffmpeg_sys_next::avcodec_open2(
                        codec_context,
                        video_codec,
                        std::ptr::null_mut(),
                    ) < 0
                    {
                        assert!(false, "failed to open codec through avcodec_open2");
                    }
                }
            }
        }

        let frame = ffmpeg_sys_next::av_frame_alloc();
        assert!(!frame.is_null(), "failed to allocate memory for AVFrame");
        let packet = ffmpeg_sys_next::av_packet_alloc();
        assert!(!packet.is_null(), "failed to allocate memory for AVPacket");

        (*video_ctx).format_context = format_context;
        (*video_ctx).video_codec_parameters = video_codec_parameters;
        (*video_ctx).video_codec = video_codec;
        (*video_ctx).video_stream_index = video_stream_index;
        (*video_ctx).codec_context = codec_context;
        (*video_ctx).frame = frame;
        (*video_ctx).hw_frame = ffmpeg_sys_next::av_frame_alloc();
        (*video_ctx).packet = packet;
    }

    if audio_stream_index != -1 {
        let codec_context = ffmpeg_sys_next::avcodec_alloc_context3(audio_codec);
        assert!(
            !codec_context.is_null(),
            "Failed to allocate memory for AVCodecContext"
        );

        if ffmpeg_sys_next::avcodec_parameters_to_context(codec_context, audio_codec_parameters) < 0
        {
            assert!(false, "failed to copy codec params to codec context");
        }

        if ffmpeg_sys_next::avcodec_open2(codec_context, audio_codec, std::ptr::null_mut()) < 0 {
            assert!(false, "failed to open codec through avcodec_open2");
        }

        let frame = ffmpeg_sys_next::av_frame_alloc();
        assert!(!frame.is_null(), "failed to allocate memory for AVFrame");
        let packet = ffmpeg_sys_next::av_packet_alloc();
        assert!(!packet.is_null(), "failed to allocate memory for AVPacket");

        audio_ctx.au_convert_ctx = ffmpeg_sys_next::swr_alloc();
        audio_ctx.au_convert_ctx = ffmpeg_sys_next::swr_alloc_set_opts(
            audio_ctx.au_convert_ctx,
            audio_ctx.out_channel_layout,
            audio_ctx.out_sample_fmt,
            audio_ctx.out_sample_rate,
            ffmpeg_sys_next::av_get_default_channel_layout((*codec_context).channels),
            (*codec_context).sample_fmt,
            (*codec_context).sample_rate,
            0,
            std::ptr::null_mut(),
        ); //重采样结构体赋值

        ffmpeg_sys_next::swr_init(audio_ctx.au_convert_ctx); //将重采样结构体参数加载

        (*audio_ctx).format_context = format_context;
        (*audio_ctx).audio_codec_parameters = audio_codec_parameters;
        (*audio_ctx).audio_codec = audio_codec;
        (*audio_ctx).audio_stream_index = audio_stream_index;
        (*audio_ctx).codec_context = codec_context;
        (*audio_ctx).frame = frame;
        (*audio_ctx).packet = packet;
    }
}

/*
在获取不到 格式的时候，需要解码开始时候才可以获取
*/
pub unsafe fn get_video_format(
    video_path: &str,
    codec_context: *mut ffmpeg_sys_next::AVCodecContext,
    video_stream_index: i32,
) -> ffmpeg_sys_next::AVPixelFormat {
    let mut format_context = ffmpeg_sys_next::avformat_alloc_context();
    assert!(
        !format_context.is_null(),
        "ERROR could not allocate memory for Format Context"
    );
    let cstr_video_path = std::ffi::CString::new(video_path).unwrap();
    if ffmpeg_sys_next::avformat_open_input(
        &mut format_context,
        cstr_video_path.as_ptr(),
        std::ptr::null_mut(),
        std::ptr::null_mut(),
    ) != 0
    {
        assert!(false, "ERROR could not open the file");
    }
    let mut packet = ffmpeg_sys_next::av_packet_alloc();
    while ffmpeg_sys_next::av_read_frame(format_context, packet) >= 0 {
        if (*packet).stream_index == video_stream_index {
            let response = ffmpeg_sys_next::avcodec_send_packet(codec_context, packet);
            if response < 0 {
                println!("Failed to decode packet");
                return (*codec_context).pix_fmt;
            }
            println!("{:?}", (*codec_context).pix_fmt);
            return (*codec_context).pix_fmt;
        }
    }
    ffmpeg_sys_next::avformat_close_input(&mut format_context);
    //ffmpeg_sys_next::av_free_packet(packet);
    ffmpeg_sys_next::av_packet_free(&mut packet);
    return ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NONE;
}

pub unsafe fn read_packet(
    video_ctx: &mut VideoContext,
    audio_ctx: &mut AudioContext,
    frame_packet: &mut FramePacket,
    data: &mut Vec<u8>,
) -> bool {
    let format_context = video_ctx.format_context;
    let mut packet = ffmpeg_sys_next::av_packet_alloc();

    let mut response = 0;
    if ffmpeg_sys_next::av_read_frame(format_context, packet) >= 0 {
        frame_packet.pts = (*packet).pts;
        frame_packet.dts = (*packet).dts;
        frame_packet.size = (*packet).size;
        frame_packet.stream_index = (*packet).stream_index;
        frame_packet.duration = (*packet).duration;
        frame_packet.pos = (*packet).pos;
        frame_packet.flags = (*packet).flags;
        frame_packet.buffer_size = (*(*packet).buf).size as i32;

        frame_packet.data =
            slice::from_raw_parts((*packet).data, frame_packet.size as usize).to_vec();

        frame_packet.buffer =
            slice::from_raw_parts((*(*packet).buf).data, frame_packet.buffer_size as usize)
                .to_vec();

        ffmpeg_sys_next::av_packet_unref(packet);

        return true;
    }
    ffmpeg_sys_next::av_packet_unref(packet);
    return false;
}

pub unsafe fn decoder_frame(
    video_ctx: &mut VideoContext,
    audio_ctx: &mut AudioContext,
    decoder_data: &mut DecoderData,
    frame_packet: &mut FramePacket,
) -> bool {
    let packet = ffmpeg_sys_next::av_packet_alloc();
    let mut frame = ffmpeg_sys_next::av_frame_alloc();
    let mut hw_frame = video_ctx.hw_frame;
    (*packet).buf = ffmpeg_sys_next::av_buffer_alloc(frame_packet.buffer_size as usize);
    (*packet).pts = frame_packet.pts;
    (*packet).dts = frame_packet.dts;
    (*packet).size = frame_packet.size;
    (*packet).stream_index = frame_packet.stream_index;
    (*packet).duration = frame_packet.duration;
    (*packet).pos = frame_packet.pos;
    (*packet).flags = frame_packet.flags;
    (*(*packet).buf).size = frame_packet.buffer_size as usize;
    (*(*packet).buf).data = frame_packet.buffer.as_mut_ptr();
    (*packet).data = frame_packet.data.as_mut_ptr();

    let video_stream_index = video_ctx.video_stream_index;
    let audio_stream_index = audio_ctx.audio_stream_index;

    let mut response = 0;
    if (*packet).stream_index == video_stream_index {
        let now = Instant::now();

        let codec_context = video_ctx.codec_context;
        response = ffmpeg_sys_next::avcodec_send_packet(codec_context, packet);
        if response < 0 {
            println!("Failed to decode packet");
            return false;
        }
        response = ffmpeg_sys_next::avcodec_receive_frame(codec_context, frame);
        if (response == ffmpeg_sys_next::AVERROR(ffmpeg_sys_next::EAGAIN))
            || (response == ffmpeg_sys_next::AVERROR_EOF)
        {
            return false;
        } else if response < 0 {
            println!("Failed to decode packet");
            return false;
        }

        if (*video_ctx).isCuda {
            //TODO start 硬解码
            let mut iRet = ffmpeg_sys_next::av_hwframe_transfer_data(hw_frame, frame, 0);
            if iRet < 0 {
                ffmpeg_sys_next::av_frame_unref(hw_frame);
                return false;
            }
            ffmpeg_sys_next::av_frame_copy_props(hw_frame, frame);
            // hw_frame中的data，就是硬解完成后的视频帧数据;
            //TODO end 硬解码
        } else {
            hw_frame = frame;
        }
        decoder_data.format = (*codec_context).pix_fmt;

        if video_ctx.isCuda {
            if video_ctx.format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV444P {
                decoder_data.format = ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV444P;
            } else if video_ctx.format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P {
                decoder_data.format = ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NV12;
            } else {
                decoder_data.format = ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NONE;
            }
        }

        if video_ctx.isCuda {
            for i in 0..8 {
                decoder_data.line_size.push((*hw_frame).linesize[i]);
                // println!("{}", (*hw_frame).linesize[i] * (*hw_frame).height);
            }

            if decoder_data.format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_NV12 {
                decoder_data
                    .data
                    .push(if (*hw_frame).data[0] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[0],
                            (video_ctx.width * video_ctx.height) as usize,
                        )
                        .to_vec()
                    });
                decoder_data
                    .data
                    .push(if (*hw_frame).data[1] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[1],
                            (video_ctx.width * video_ctx.height / 2) as usize,
                        )
                        .to_vec()
                    });
            }
            if decoder_data.format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P {
                decoder_data
                    .data
                    .push(if (*hw_frame).data[0] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[0],
                            (video_ctx.width * video_ctx.height) as usize,
                        )
                        .to_vec()
                    });
                decoder_data
                    .data
                    .push(if (*hw_frame).data[1] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[1],
                            (video_ctx.width * video_ctx.height / 4) as usize,
                        )
                        .to_vec()
                    });
                decoder_data
                    .data
                    .push(if (*hw_frame).data[2] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[2],
                            (video_ctx.width * video_ctx.height / 4) as usize,
                        )
                        .to_vec()
                    });
            }
            if decoder_data.format == ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV444P {
                decoder_data
                    .data
                    .push(if (*hw_frame).data[0] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[0],
                            (video_ctx.width * video_ctx.height) as usize,
                        )
                        .to_vec()
                    });
                decoder_data
                    .data
                    .push(if (*hw_frame).data[1] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[1],
                            (video_ctx.width * video_ctx.height) as usize,
                        )
                        .to_vec()
                    });
                decoder_data
                    .data
                    .push(if (*hw_frame).data[2] == null_mut() {
                        Vec::new()
                    } else {
                        slice::from_raw_parts(
                            (*hw_frame).data[2],
                            (video_ctx.width * video_ctx.height) as usize,
                        )
                        .to_vec()
                    });
            }
        } else {
            //软解不自己设置数据
            for i in 0..8 {
                decoder_data.line_size.push((*hw_frame).linesize[i]);
                // println!("{}", (*hw_frame).linesize[i] * (*hw_frame).height);
                decoder_data.data.push(if (*hw_frame).buf[i] == null_mut() {
                    Vec::new()
                } else {
                    slice::from_raw_parts(
                        (*(*hw_frame).buf[i]).data,
                        (*(*hw_frame).buf[i]).size as usize,
                    )
                    .to_vec()
                })
            }
        }
    } else if ((*packet).stream_index == audio_stream_index) {
        let MAX_AUDIO_FRAME_SIZE: i32 = 19200;
        let codec_context = audio_ctx.codec_context;

        response = ffmpeg_sys_next::avcodec_send_packet(codec_context, packet);
        if response < 0 {
            println!("Failed to decode packet");
            return false;
        }

        response = ffmpeg_sys_next::avcodec_receive_frame(codec_context, frame);
        if (response == ffmpeg_sys_next::AVERROR(ffmpeg_sys_next::EAGAIN))
            || (response == ffmpeg_sys_next::AVERROR_EOF)
        {
            return false;
        } else if response < 0 {
            println!("Failed to decode packet");
            return false;
        }
        decoder_data.data.push(Vec::new());
        decoder_data.data[0].reserve_exact((audio_ctx.out_buffer_size as usize));
        let dest = [decoder_data.data[0].as_mut_ptr()];
        ffmpeg_sys_next::swr_convert(
            audio_ctx.au_convert_ctx,
            dest.as_ptr() as *mut *mut u8,
            MAX_AUDIO_FRAME_SIZE,
            (*frame).data.as_ptr() as *mut *const u8,
            (*frame).nb_samples,
        );
    }

    decoder_data.pts = (*frame).pts;
    ffmpeg_sys_next::av_frame_free(&mut frame);

    ffmpeg_sys_next::av_packet_unref(packet);
    return true;
}

pub unsafe fn free_video_data(video_ctx: &mut VideoContext) {
    ffmpeg_sys_next::sws_freeContext(video_ctx.sws_scale_ctx);
    ffmpeg_sys_next::avformat_close_input(&mut video_ctx.format_context);
    ffmpeg_sys_next::avformat_free_context(video_ctx.format_context);
    ffmpeg_sys_next::avcodec_free_context(&mut video_ctx.codec_context);
    //ffmpeg_sys_next::av_free_packet(video_ctx.packet);
    ffmpeg_sys_next::av_packet_free(&mut video_ctx.packet);
    ffmpeg_sys_next::av_frame_free(&mut video_ctx.frame);
    ffmpeg_sys_next::av_frame_free(&mut video_ctx.hw_frame);
}
