use ffmpeg_next::{
    software::scaling::{context::Context as SwsContext, flag::Flags},
    util::frame::{Audio, Video},
    util::format::pixel::Pixel,
    Stream,
    codec::Context as CodecContext,
};
use std::collections::VecDeque;
use std::time::Duration;
use anyhow::Result;
use tauri::Window;
use std::sync::{Arc, Mutex};

use super::events::{send_video_frame, send_audio_frame};

const MAX_QUEUE_SIZE: usize = 5;  // 限制队列大小以避免内存溢出

#[derive(Debug)]
pub struct Frame<T> {
    pub data: T,
    pub timestamp: Duration,
    pub duration: Duration,
}

pub struct VideoProcessor {
    width: u32,
    height: u32,
    frame_rate: f64,
    format: Pixel,
    scaler: Arc<Mutex<SwsContext>>,
    frame_queue: VecDeque<Frame<Vec<u8>>>,
    last_frame_time: Option<Duration>,
    buffer_size: usize,
    queue_size: usize,
    hardware_acceleration: bool,
}

impl VideoProcessor {
    pub fn new_with_config(
        stream: Stream,
        buffer_size: usize,
        queue_size: usize,
    ) -> Result<Self> {
        let codec_context = CodecContext::from_parameters(stream.parameters())?;
        let video_decoder = codec_context.decoder().video()?;
        
        let width = video_decoder.width() as u32;
        let height = video_decoder.height() as u32;
        let format = video_decoder.format();

        let scaler = SwsContext::get(
            format,
            width as u32,
            height as u32,
            Pixel::RGB24,
            width as u32,
            height as u32,
            Flags::BILINEAR,
        )?;
        let scaler = Arc::new(Mutex::new(scaler));

        Ok(Self {
            width,
            height,
            frame_rate: stream.avg_frame_rate().into(),
            format,
            scaler,
            frame_queue: VecDeque::with_capacity(queue_size),
            last_frame_time: None,
            buffer_size,
            queue_size,
            hardware_acceleration: false,
        })
    }

    pub fn enable_hardware_acceleration(&mut self) -> Result<()> {
        log::info!("Enabling hardware acceleration for video processing");
        // 这里添加硬件加速的具体实现
        self.hardware_acceleration = true;
        Ok(())
    }

    pub fn update_config(
        &mut self,
        buffer_size: usize,
        queue_size: usize,
        hardware_acceleration: bool,
    ) -> Result<()> {
        log::info!(
            "Updating video processor config: buffer_size={}KB, queue_size={}, hw_accel={}",
            buffer_size/1024, queue_size, hardware_acceleration
        );

        self.buffer_size = buffer_size;
        self.queue_size = queue_size;

        if hardware_acceleration && !self.hardware_acceleration {
            self.enable_hardware_acceleration()?;
        }

        // 调整队列大小
        if self.frame_queue.capacity() != queue_size {
            let mut new_queue = VecDeque::with_capacity(queue_size);
            new_queue.extend(self.frame_queue.drain(..));
            self.frame_queue = new_queue;
        }

        Ok(())
    }

    pub fn process_frame(
        &mut self,
        frame: &Video,
        timestamp: Duration,
        duration: Duration,
        window: &Window,
    ) -> Result<()> {
        // 帧率控制
        if let Some(last_time) = self.last_frame_time {
            let frame_interval = Duration::from_secs_f64(1.0 / self.frame_rate);
            if timestamp - last_time < frame_interval {
                return Ok(());  // 跳过过快的帧
            }
        }

        // 创建 RGB 帧缓冲区
        let mut rgb_frame = Video::empty();
        unsafe {
            rgb_frame.alloc(Pixel::RGB24, self.width, self.height);
        }

        // 获取 scaler 的锁并使用
        let mut scaler = self.scaler.lock().unwrap();
        scaler.run(frame, &mut rgb_frame)?;

        // 优化的数据复制
        let stride = rgb_frame.stride(0);
        let height = self.height as usize;
        let width = self.width as usize;
        let row_size = width * 3;  // RGB24 格式

        let mut data = Vec::with_capacity(height * row_size);
        let src = rgb_frame.data(0);

        for y in 0..height {
            let start = y * stride;
            let end = start + row_size;
            data.extend_from_slice(&src[start..end]);
        }

        // 发送帧到前端
        send_video_frame(
            window,
            self.width,
            self.height,
            data.clone(),
            timestamp,
            duration,
        )?;

        // 更新队列
        if self.frame_queue.len() >= self.queue_size {
            self.frame_queue.pop_front();
        }
        self.frame_queue.push_back(Frame {
            data,
            timestamp,
            duration,
        });

        self.last_frame_time = Some(timestamp);

        Ok(())
    }

    pub fn clear_queue(&mut self) {
        self.frame_queue.clear();
        self.last_frame_time = None;
    }

    pub fn get_frame_rate(&self) -> f64 {
        self.frame_rate
    }

    pub fn get_dimensions(&self) -> (u32, u32) {
        (self.width, self.height)
    }
}

pub struct AudioProcessor {
    channels: u32,
    sample_rate: u32,
    frame_queue: VecDeque<Frame<Vec<f32>>>,
    last_frame_time: Option<Duration>,
    buffer_size: usize,
    queue_size: usize,
}

impl AudioProcessor {
    pub fn new_with_config(
        channels: u32,
        sample_rate: u32,
        buffer_size: usize,
        queue_size: usize,
    ) -> Result<Self> {
        log::debug!(
            "Creating audio processor: {} channels @{}Hz, buffer_size={}KB, queue_size={}",
            channels, sample_rate, buffer_size/1024, queue_size
        );

        Ok(Self {
            channels,
            sample_rate,
            frame_queue: VecDeque::with_capacity(queue_size),
            last_frame_time: None,
            buffer_size,
            queue_size,
        })
    }

    pub fn update_config(
        &mut self,
        buffer_size: usize,
        queue_size: usize,
    ) -> Result<()> {
        log::info!(
            "Updating audio processor config: buffer_size={}KB, queue_size={}",
            buffer_size/1024, queue_size
        );

        self.buffer_size = buffer_size;
        self.queue_size = queue_size;

        // 调整队列大小
        if self.frame_queue.capacity() != queue_size {
            let mut new_queue = VecDeque::with_capacity(queue_size);
            new_queue.extend(self.frame_queue.drain(..));
            self.frame_queue = new_queue;
        }

        Ok(())
    }

    pub fn process_frame(
        &mut self,
        frame: &Audio,
        timestamp: Duration,
        duration: Duration,
        window: &Window,
    ) -> Result<()> {
        let samples = frame.samples();
        let channels = self.channels as usize;
        let mut audio_data = Vec::with_capacity(samples * channels);

        // 优化的音频数据交织
        for i in 0..samples {
            for ch in 0..channels {
                let sample = frame.plane::<f32>(ch)[i];
                audio_data.push(sample);
            }
        }

        // 发送音频帧到前端
        send_audio_frame(
            window,
            audio_data.clone(),
            self.channels,
            self.sample_rate,
            timestamp,
            duration,
        )?;

        // 更新队列
        if self.frame_queue.len() >= self.queue_size {
            self.frame_queue.pop_front();
        }
        self.frame_queue.push_back(Frame {
            data: audio_data,
            timestamp,
            duration,
        });

        self.last_frame_time = Some(timestamp);

        Ok(())
    }

    pub fn clear_queue(&mut self) {
        self.frame_queue.clear();
        self.last_frame_time = None;
    }

    pub fn get_channels(&self) -> u32 {
        self.channels
    }

    pub fn get_sample_rate(&self) -> u32 {
        self.sample_rate
    }
}
