use crate::{CameraPacket, CustomError};
use ffmpeg_sys_next::{
    av_dump_format, av_find_input_format, av_frame_alloc, av_frame_free, av_frame_get_buffer, av_interleaved_write_frame, av_packet_alloc, av_packet_rescale_ts, av_packet_unref, av_read_frame, av_samples_get_buffer_size, av_write_trailer, avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_open2, avcodec_parameters_from_context, avcodec_parameters_to_context, avcodec_receive_frame, avcodec_receive_packet, avcodec_send_frame, avcodec_send_packet, avformat_alloc_context, avformat_alloc_output_context2, avformat_free_context, avformat_new_stream, avformat_open_input, avformat_write_header, avio_open2, swr_alloc, swr_alloc_set_opts2, swr_convert, swr_init, AVChannelLayout, AVCodec, AVCodecContext, AVCodecParameters, AVFormatContext, AVInputFormat, AVMediaType, AVPacket, AVSampleFormat, AVStream, SwrContext, AVERROR, AVERROR_EOF, AVIO_FLAG_WRITE, AV_CH_LAYOUT_STEREO, AV_CODEC_FLAG_GLOBAL_HEADER, EAGAIN, FF_PROFILE_AAC_LOW
};
use sdl2::{
    audio::{AudioCallback, AudioSpecDesired},
    Sdl, VideoSubsystem,
};
use tokio::sync::{broadcast, Mutex};

use std::{
    collections::VecDeque,
    ffi::{c_char, CString},
    ptr::{null, null_mut},
    sync::{
        atomic::{AtomicBool, Ordering},
        Arc,
    },
};

pub struct AudioData {
    pub data: Vec<u8>,
}

impl AudioData {
    pub fn new(data: Vec<u8>) -> Self {
        AudioData { data }
    }
}

#[derive(Debug, Clone)]
pub struct SquareWave {
    pub data: Arc<std::sync::Mutex<VecDeque<Vec<i16>>>>,
}

impl AudioCallback for SquareWave {
    type Channel = i16;

    fn callback(&mut self, out: &mut [i16]) {
        //println!("0000000000000000000000000000000");
        let mut buffer = self.data.lock().unwrap();
        match buffer.pop_front() {
            None => {
                println!("audio callback no data.");
            }
            Some(data) => {
                //println!("11111111111");
                out.copy_from_slice(data.as_slice());
            }
        };
    }
}

#[derive(Clone)]
pub struct AudioPlayer {
    pub short_name: CString,
    pub url: CString,
    pub input_format: *const AVInputFormat,
    pub format_context: *mut AVFormatContext,
    pub audio_index: isize,
    pub av_stream: *mut *mut AVStream,
    pub codec_parameters: *mut AVCodecParameters,
    pub av_codec: *const AVCodec,
    pub codec_context: *mut AVCodecContext,
    pub swr_context: *mut SwrContext,
    pub sdl: Option<Sdl>,
    pub video_subsystem: Option<VideoSubsystem>,
    pub channel_layout: *const AVChannelLayout,
    pub out_channel_layout: *const AVChannelLayout,
    pub sample_fmt: AVSampleFormat,
    pub out_sample_fmt: AVSampleFormat,
    pub sample_rate: i32,
    pub out_sample_rate: i32,
    pub channels: i32,
    pub out_buffer_size: i32,
    pub audio_callback_data: Arc<std::sync::Mutex<VecDeque<Vec<i16>>>>,

    pub out_format_context: *mut AVFormatContext,
    pub out_file_name: CString,
    pub out_codec: *const AVCodec,
    pub out_codec_context: *mut AVCodecContext,
    pub out_av_stream: *mut AVStream,

    pub running: Arc<AtomicBool>,
    pub end: Arc<Mutex<bool>>,
}

unsafe impl Send for AudioPlayer {}
unsafe impl Sync for AudioPlayer {}

impl AudioPlayer {
    pub fn new(
        short_name: String,
        url: String,
        out_file_name: String,
        running: Arc<AtomicBool>,
    ) -> Self {
        AudioPlayer {
            short_name: CString::new(short_name).unwrap(),
            url: CString::new(url).unwrap(),
            input_format: null_mut(),
            format_context: null_mut(),
            audio_index: -1,
            av_stream: null_mut(),
            codec_parameters: null_mut(),
            av_codec: null_mut(),
            codec_context: null_mut(),
            swr_context: null_mut(),
            sdl: None,
            video_subsystem: None,
            channel_layout: null_mut(),
            out_channel_layout: null_mut(),
            sample_fmt: AVSampleFormat::AV_SAMPLE_FMT_S16,
            out_sample_fmt: AVSampleFormat::AV_SAMPLE_FMT_FLTP,
            sample_rate: 0,
            out_sample_rate: 44100,
            channels: 0,
            out_buffer_size: 0,
            audio_callback_data: Arc::new(std::sync::Mutex::new(VecDeque::new())),
            out_format_context: null_mut(),
            out_file_name: CString::new(out_file_name).unwrap(),
            out_codec: null_mut(),
            out_codec_context: null_mut(),
            out_av_stream: null_mut(),
            running,
            end: Arc::new(Mutex::new(false)),
        }
    }

    pub async fn find_input_context(&mut self) -> Result<(), CustomError> {
        self.input_format = unsafe { av_find_input_format(self.short_name.as_ptr() as *const i8) };
        Ok(())
    }

    pub async fn alloc_format_context(&mut self) -> Result<(), CustomError> {
        self.format_context = unsafe { avformat_alloc_context() };
        Ok(())
    }

    pub async fn open_input(&mut self) -> Result<(), CustomError> {
        if unsafe {
            avformat_open_input(
                &mut self.format_context,
                self.url.as_ptr() as *const c_char,
                self.input_format,
                null_mut(),
            )
        } < 0
        {
            return Err(CustomError::FfmepgError("cannot open input ".to_owned()));
        }
        Ok(())
    }

    pub async fn dump_format(&self) -> Result<(), CustomError> {
        unsafe {
            av_dump_format(
                self.format_context,
                0,
                self.url.as_ptr() as *const c_char,
                0,
            );
        }
        Ok(())
    }

    pub async fn stream_index(&mut self) -> Result<(), CustomError> {
        for index in 0..unsafe { (*self.format_context).nb_streams } as isize {
            if unsafe { (*(*(*(*self.format_context).streams).offset(index)).codecpar).codec_type }
                == AVMediaType::AVMEDIA_TYPE_AUDIO
            {
                self.audio_index = index;
                self.av_stream = unsafe { (*self.format_context).streams.offset(index) };
                self.codec_parameters = unsafe { (**self.av_stream).codecpar };
                self.sample_rate = unsafe { (*self.codec_parameters).sample_rate };
                //self.channels = unsafe { (*self.codec_parameters).channels };
                self.channel_layout = unsafe { &(*self.codec_parameters).ch_layout };
                //self.out_sample_rate = self.sample_rate;
                self.out_channel_layout = self.channel_layout;
            }
        }
        Ok(())
    }

    pub async fn find_decoder(&mut self) -> Result<(), CustomError> {
        self.av_codec = unsafe { avcodec_find_decoder((*self.codec_parameters).codec_id) };
        Ok(())
    }

    pub async fn alloc_codec_context(&mut self) -> Result<(), CustomError> {
        self.codec_context = unsafe { avcodec_alloc_context3(self.av_codec) };
        Ok(())
    }

    pub async fn parameters_to_context(&mut self) -> Result<(), CustomError> {
        let res =
            unsafe { avcodec_parameters_to_context(self.codec_context, self.codec_parameters) };
        if res < 0 {
            return Err(CustomError::FfmepgError(
                "avcodec parameters to context error".to_owned(),
            ));
        }
        Ok(())
    }

    pub async fn open_avcodec(&mut self) -> Result<(), CustomError> {
        let res = unsafe { avcodec_open2(self.codec_context, self.av_codec, null_mut()) };
        if res < 0 {
            return Err(CustomError::FfmepgError("open avcodec error".to_owned()));
        }
        unsafe {
            self.sample_fmt = (*self.codec_context).sample_fmt;
        }
        Ok(())
    }

    pub async fn get_buffer_size(&mut self) -> Result<(), CustomError> {
        self.out_buffer_size = unsafe {
            av_samples_get_buffer_size(
                null_mut(),
                (*self.out_channel_layout).nb_channels,
                1024,
                self.out_sample_fmt,
                1,
            )
        };
        Ok(())
    }

    pub async fn init_swr_context(&mut self) -> Result<(), CustomError> {
        let mut swr_context = unsafe { swr_alloc() };
        self.swr_context = swr_context;
        unsafe {
            swr_alloc_set_opts2(
                &mut swr_context,
                self.out_channel_layout,
                self.out_sample_fmt,
                self.out_sample_rate,
                self.channel_layout,
                self.sample_fmt,
                self.sample_rate,
                0,
                null_mut(),
            )
        };
        unsafe { swr_init(self.swr_context) };
        Ok(())
    }

    //******************设置输出编码器 打开输出编码器 */
    pub async fn alloc_out_format_context(&mut self) -> Result<(), CustomError> {
        unsafe {
            self.out_format_context = avformat_alloc_context();
        }
        Ok(())
    }

    pub async fn alloc_output_context(&mut self) -> Result<(), CustomError> {
        unsafe {
            avformat_alloc_output_context2(
                &mut self.out_format_context,
                null_mut(),
                null_mut(),
                self.out_file_name.as_ptr() as *const c_char,
            );
        }
        Ok(())
    }

    pub async fn avio_open(&mut self) -> Result<(), CustomError> {
        unsafe {
            let ret = avio_open2(
                &mut (*self.out_format_context).pb,
                self.out_file_name.as_ptr() as *const c_char,
                AVIO_FLAG_WRITE,
                null_mut(),
                null_mut(),
            );
            if ret < 0 {
                println!("avio_open fail\n");
                return Err(CustomError::FfmepgError("open avio failed".to_owned()));
            }
        }
        Ok(())
    }

    pub async fn find_encoder(&mut self) -> Result<(), CustomError> {
        unsafe {
            let out_codec: *const AVCodec =
                avcodec_find_encoder(ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_AAC);
            if out_codec.is_null() {
                println!("Can not find audio encoder! \n");
                return Err(CustomError::FfmepgError(
                    "find encoder aac failed".to_owned(),
                ));
            }
            self.out_codec = out_codec;
        }
        Ok(())
    }

    pub async fn alloc_out_codec_context(&mut self) -> Result<(), CustomError> {
        unsafe {
            let out_codec_ctx: *mut AVCodecContext = avcodec_alloc_context3(self.out_codec);
            (*out_codec_ctx).codec_type = AVMediaType::AVMEDIA_TYPE_AUDIO;
            (*out_codec_ctx).codec_id = ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_AAC;
            (*out_codec_ctx).sample_fmt = ffmpeg_sys_next::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
            (*out_codec_ctx).sample_rate = self.out_sample_rate;
            //(*out_codec_ctx).channel_layout = AV_CH_LAYOUT_STEREO;
            (*out_codec_ctx).ch_layout = *self.out_channel_layout;
            (*out_codec_ctx).bit_rate = 128000;
            //(*out_codec_ctx).channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
            (*out_codec_ctx).profile = FF_PROFILE_AAC_LOW;
            (*out_codec_ctx).frame_size = 1024;
            (*out_codec_ctx).codec_tag = 0;
            (*out_codec_ctx).flags = AV_CODEC_FLAG_GLOBAL_HEADER as i32;
            self.out_codec_context = out_codec_ctx;
        }
        Ok(())
    }

    pub async fn open_out_avcodec(&mut self) -> Result<(), CustomError> {
        unsafe {
            let ret = avcodec_open2(self.out_codec_context, self.out_codec, null_mut());
            if ret < 0 {
                println!("open out avcodec fail ! \n");
                return Err(CustomError::FfmepgError(
                    "open out avcodec failed".to_owned(),
                ));
            }
        }
        Ok(())
    }

    pub async fn new_stream(&mut self) -> Result<(), CustomError> {
        unsafe {
            self.out_av_stream = avformat_new_stream(self.out_format_context, self.out_codec);
        }
        Ok(())
    }

    pub async fn out_parameters_to_context(&self) -> Result<(), CustomError> {
        unsafe {
            avcodec_parameters_from_context((*self.out_av_stream).codecpar, self.out_codec_context);
        }
        Ok(())
    }

    pub async fn write_header(&self) -> Result<(), CustomError> {
        unsafe {
            avformat_write_header(self.out_format_context, null_mut());
        }
        Ok(())
    }

    // pub async fn init_sdl(&mut self) -> Result<(), CustomError> {
    //     let sdl: Sdl = sdl2::init().unwrap();
    //     let video_subsystem = sdl.video().unwrap();
    //     self.sdl = Some(sdl);
    //     self.video_subsystem = Some(video_subsystem);
    //     Ok(())
    // }

    // pub async fn read_and_codec(&self) -> Result<(), CustomError> {
    //     let mut frame_count = 0;
    //     loop {
    //         if !self.running.load(Ordering::SeqCst) {
    //             let mut re = unsafe { avcodec_send_frame(self.out_codec_context, null_mut()) };

    //             let audio_pkt = unsafe { av_packet_alloc() };
    //             while re == 0 {
    //                 re = unsafe { avcodec_receive_packet(self.out_codec_context, audio_pkt) };

    //                 if unsafe { (*audio_pkt).size } > 0 {
    //                     unsafe {
    //                         (*audio_pkt).stream_index = self.audio_index as i32;
    //                     }
    //                     unsafe {
    //                         av_packet_rescale_ts(
    //                             audio_pkt,
    //                             (*self.out_codec_context).time_base,
    //                             (*self.out_av_stream).time_base,
    //                         );
    //                     }
    //                     unsafe {
    //                         av_interleaved_write_frame(self.out_format_context, audio_pkt);
    //                     }
    //                 }
    //             }
    //             unsafe {
    //                 av_write_trailer(self.out_format_context);
    //             }
    //             return Ok(());
    //         }

    //         let pkt: *mut AVPacket = unsafe { av_packet_alloc() };
    //         if unsafe { av_read_frame(self.format_context, pkt) } < 0 {
    //             unsafe { av_packet_unref(pkt) };
    //             return Err(CustomError::FfmepgError("Read frame failed".to_owned()));
    //         }

    //         if unsafe { (*pkt).stream_index } != self.audio_index as i32 {
    //             unsafe { av_packet_unref(pkt) };
    //             continue;
    //         }

    //         if unsafe { avcodec_send_packet(self.codec_context, pkt) } < 0 {
    //             unsafe { av_packet_unref(pkt) };
    //             continue;
    //         }
    //         unsafe { av_packet_unref(pkt) };

    //         loop {
    //             let mut frame = unsafe { av_frame_alloc() };
    //             let ret = unsafe { avcodec_receive_frame(self.codec_context, frame) };
    //             if ret == AVERROR(EAGAIN) || ret == AVERROR_EOF {
    //                 unsafe { av_frame_free(&mut frame) };
    //                 break;
    //             } else if ret < 0 {
    //                 unsafe { av_frame_free(&mut frame) };
    //                 return Err(CustomError::FfmepgError("Decoding error".to_owned()));
    //             }

    //             let mut out_frame = unsafe {
    //                 av_frame_alloc()
    //             };
    //             unsafe {
    //                 (*out_frame).sample_rate = 44100;
    //                 (*out_frame).nb_samples = (*self.out_codec_context).frame_size;
    //                 (*out_frame).format = 8; //ffmpeg_sys_next::AVSampleFormat::AV_SAMPLE_FMT_FLTP
    //                                          //(*out_frame).ch_layout.nb_channels = 2;
    //                 (*out_frame).channel_layout = AV_CH_LAYOUT_STEREO;
    //                 //(*out_frame).channels = (*out_codec_ctx).channels;
    //                 //pts递增nb_samples
    //                 (*out_frame).pts = frame_count * (*out_frame).nb_samples as i64;
    //                 av_frame_get_buffer(out_frame, 0);
    //             }

    //             frame_count = frame_count + 1;

    //             unsafe {
    //                 swr_convert(
    //                     self.swr_context,
    //                     (*out_frame).data.as_ptr() as *mut *mut u8,
    //                     (*out_frame).nb_samples,
    //                     (*frame).data.as_ptr() as *mut *const u8,
    //                     (*frame).nb_samples,
    //                 )
    //             };

    //             unsafe { av_frame_free(&mut frame) };

    //             let audio_send_frame_ret =
    //                 unsafe { avcodec_send_frame(self.out_codec_context, out_frame) };

    //             let audio_pkt = unsafe { av_packet_alloc() };
    //             if audio_send_frame_ret == 0 {
    //                 unsafe { avcodec_receive_packet(self.out_codec_context, audio_pkt) };

    //                 if unsafe { (*audio_pkt).size } > 0 {
    //                     unsafe {
    //                         (*audio_pkt).stream_index = self.audio_index as i32;
    //                     }
    //                     unsafe {
    //                         av_packet_rescale_ts(
    //                             audio_pkt,
    //                             (*self.out_codec_context).time_base,
    //                             (*self.out_av_stream).time_base,
    //                         );
    //                     }

    //                     unsafe { av_interleaved_write_frame(self.out_format_context, audio_pkt) };
    //                 }
    //             }
    //             unsafe {
    //                 av_packet_unref(audio_pkt);
    //             }
    //             unsafe { av_frame_free(&mut out_frame) };
    //         }
    //     }
    //     //Ok(())
    // }

    // pub async fn async_read_and_codec(&mut self) -> Result<(), CustomError> {
    //     let audio_player_clone = self.clone();
    //     tokio::spawn(async move {
    //         let _ = audio_player_clone.read_and_codec().await;
    //     });
    //     Ok(())
    // }

    pub async fn async_read_from_audio(
        &mut self,
        in_packet_sender: broadcast::Sender<CameraPacket>,
    ) -> Result<(), CustomError> {
        let audio_player_clone = self.clone();
        tokio::spawn(async move {
            let _ = audio_player_clone.read_from_audio(in_packet_sender).await;
        });
        Ok(())
    }

    pub async fn read_from_audio(
        &self,
        in_packet_sender: broadcast::Sender<CameraPacket>,
    ) -> Result<(), CustomError> {
        loop {
            let pkt: *mut AVPacket = unsafe { av_packet_alloc() };
            if unsafe { av_read_frame(self.format_context, pkt) } < 0 {
                unsafe { av_packet_unref(pkt) };
                return Err(CustomError::FfmepgError("Read frame failed".to_owned()));
            }

            if unsafe { (*pkt).stream_index } != self.audio_index as i32 {
                unsafe { av_packet_unref(pkt) };
                continue;
            }

            let mut camera_packet = CameraPacket::default();
            unsafe {
                camera_packet.copy(pkt);
            };
            unsafe { av_packet_unref(pkt) };

            in_packet_sender.send(camera_packet).unwrap();
            if !self.running.load(Ordering::SeqCst) {
                let mut end_lock = self.end.lock().await;
                *end_lock = true;
                return Ok(());
            }
            tokio::task::yield_now().await;
        }
        //Ok(())
    }

    pub async fn async_write_data(
        &mut self,
        file_packet_receiver: Arc<Mutex<broadcast::Receiver<CameraPacket>>>,
    ) -> Result<(), CustomError> {
        let mut audio_player_clone = self.clone();
        tokio::spawn(async move {
            let _ = audio_player_clone.write_data(file_packet_receiver).await;
        });
        Ok(())
    }

    pub async fn write_data(
        &mut self,
        file_packet_receiver: Arc<Mutex<broadcast::Receiver<CameraPacket>>>,
    ) -> Result<(), CustomError> {
        let mut frame_count = 0;
        loop {
            if !self.running.load(Ordering::SeqCst) {
                let mut re = unsafe { avcodec_send_frame(self.out_codec_context, null_mut()) };

                let audio_pkt = unsafe { av_packet_alloc() };
                while re == 0 {
                    re = unsafe { avcodec_receive_packet(self.out_codec_context, audio_pkt) };

                    if unsafe { (*audio_pkt).size } > 0 {
                        unsafe {
                            (*audio_pkt).stream_index = self.audio_index as i32;
                        }
                        unsafe {
                            av_packet_rescale_ts(
                                audio_pkt,
                                (*self.out_codec_context).time_base,
                                (*self.out_av_stream).time_base,
                            );
                        }
                        unsafe {
                            av_interleaved_write_frame(self.out_format_context, audio_pkt);
                        }
                    }
                }
                unsafe {
                    av_write_trailer(self.out_format_context);
                }
                return Ok(());
            }

            let mut audio_packet = {
                let mut file_packet_recevicer_lock = file_packet_receiver.lock().await;
                let audio_packet = file_packet_recevicer_lock.recv().await;

                match audio_packet {
                    Ok(packet) => packet.clone(),
                    Err(_) => {
                        return Err(CustomError::FfmepgError("camera packet error".to_owned()))
                    }
                }
            };

            let pkt: *mut AVPacket = unsafe { av_packet_alloc() };
            unsafe {
                audio_packet.to_av_packet(pkt);
            }

            if unsafe { avcodec_send_packet(self.codec_context, pkt) } < 0 {
                unsafe { av_packet_unref(pkt) };
                continue;
            }
            unsafe { av_packet_unref(pkt) };

            loop {
                let mut frame = unsafe { av_frame_alloc() };
                let ret = unsafe { avcodec_receive_frame(self.codec_context, frame) };
                if ret == AVERROR(EAGAIN) || ret == AVERROR_EOF {
                    unsafe { av_frame_free(&mut frame) };
                    break;
                } else if ret < 0 {
                    unsafe { av_frame_free(&mut frame) };
                    return Err(CustomError::FfmepgError("Decoding error".to_owned()));
                }

                let mut out_frame = unsafe { av_frame_alloc() };
                unsafe {
                    (*out_frame).sample_rate = 44100;
                    (*out_frame).nb_samples = (*self.out_codec_context).frame_size;
                    (*out_frame).format = 8; //ffmpeg_sys_next::AVSampleFormat::AV_SAMPLE_FMT_FLTP
                                             //(*out_frame).ch_layout.nb_channels = 2;
                    //(*out_frame).channel_layout = AV_CH_LAYOUT_STEREO;
                    (*out_frame).ch_layout = *self.out_channel_layout;
                    //(*out_frame).channels = (*out_codec_ctx).channels;
                    //pts递增nb_samples
                    (*out_frame).pts = frame_count * (*out_frame).nb_samples as i64;
                    av_frame_get_buffer(out_frame, 0);
                }

                frame_count = frame_count + 1;

                unsafe {
                    swr_convert(
                        self.swr_context,
                        (*out_frame).data.as_ptr() as *mut *mut u8,
                        (*out_frame).nb_samples,
                        (*frame).data.as_ptr() as *mut *const u8,
                        (*frame).nb_samples,
                    )
                };

                unsafe { av_frame_free(&mut frame) };

                let audio_send_frame_ret =
                    unsafe { avcodec_send_frame(self.out_codec_context, out_frame) };

                let audio_pkt = unsafe { av_packet_alloc() };
                if audio_send_frame_ret == 0 {
                    unsafe { avcodec_receive_packet(self.out_codec_context, audio_pkt) };

                    if unsafe { (*audio_pkt).size } > 0 {
                        unsafe {
                            (*audio_pkt).stream_index = self.audio_index as i32;
                        }
                        unsafe {
                            av_packet_rescale_ts(
                                audio_pkt,
                                (*self.out_codec_context).time_base,
                                (*self.out_av_stream).time_base,
                            );
                        }

                        unsafe { av_interleaved_write_frame(self.out_format_context, audio_pkt) };
                    }
                }
                unsafe {
                    av_packet_unref(audio_pkt);
                }
                unsafe { av_frame_free(&mut out_frame) };
            }
        }
        Ok(())
    }

    // pub async fn player(&mut self, sdl: Sdl) -> Result<(), CustomError> {
    //     let audio_subsystem = sdl.audio().unwrap();

    //     let desired_spec = AudioSpecDesired {
    //         freq: Some(self.out_sample_rate),
    //         channels: Some(self.channels as u8),
    //         samples: Some(1024),
    //     };

    //     let device = audio_subsystem
    //         .open_playback(None, &desired_spec, |_spec| {
    //             // initialize the audio callback
    //             SquareWave {
    //                 data: self.audio_callback_data.clone(),
    //             }
    //         })
    //         .unwrap();

    //     device.resume();
    //     println!("222222222222222222222222");

    //     std::thread::sleep(Duration::from_secs(10));

    //     Ok(())
    // }

    pub async fn close(&self) -> Result<(), CustomError> {
        unsafe {
            //avformat_close_input(&mut &mut *self.input_format);
            avformat_free_context(self.format_context);
            avformat_free_context(self.out_format_context);
        }
        Ok(())
    }
}
