//! Audio Stream Processing and RTP Integration
//!
//! This module handles real-time audio streaming with RTP integration,
//! including audio capture, playback, encoding/decoding, and RTP packet processing.

use crate::audio_device::{AudioDeviceManager, AudioDeviceError};
use cpal::{
    traits::{DeviceTrait, StreamTrait},
    Device, Stream, StreamConfig,
};
use g711;
use rtc::{
    rtp_session::RtpSession,
};
use rtp::{RtpPacket, Ssrc};
use serde::{Deserialize, Serialize};
use std::{
    collections::{HashMap, VecDeque},
    sync::{
        atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
        Arc,
    },
    time::{Duration, Instant},
};
use thiserror::Error;
use tokio::sync::{mpsc, RwLock, Mutex};

#[derive(Debug)]
enum StreamControlMessage {
    Start,
    Stop,
    SetMuted(bool),
}

#[derive(Debug, Error)]
pub enum AudioStreamError {
    #[error("Audio device error: {0}")]
    DeviceError(#[from] AudioDeviceError),
    #[error("Stream creation failed: {0}")]
    StreamCreationFailed(String),
    #[error("Codec error: {0}")]
    CodecError(String),
    #[error("RTP error: {0}")]
    RtpError(String),
    #[error("Stream not found: {0}")]
    StreamNotFound(String),
    #[error("Invalid configuration: {0}")]
    InvalidConfiguration(String),
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AudioCodec {
    PCMU, // G.711 μ-law
    PCMA, // G.711 A-law
    G722, // G.722 wideband
}

impl AudioCodec {
    pub fn payload_type(&self) -> u8 {
        match self {
            AudioCodec::PCMU => 0,
            AudioCodec::PCMA => 8,
            AudioCodec::G722 => 9,
        }
    }

    pub fn clock_rate(&self) -> u32 {
        match self {
            AudioCodec::PCMU | AudioCodec::PCMA => 8000,
            AudioCodec::G722 => 16000,
        }
    }

    pub fn encode_sample(&self, sample: i16) -> u8 {
        match self {
            AudioCodec::PCMU => g711::mulaw::encode(sample),
            AudioCodec::PCMA => g711::alaw::encode(sample),
            AudioCodec::G722 => {
                // G.722 encoding is more complex, for now use a simple conversion
                // In a real implementation, this would use proper G.722 encoding
                ((sample as i32 + 32768) / 256) as u8
            }
        }
    }

    pub fn decode_sample(&self, encoded: u8) -> i16 {
        match self {
            AudioCodec::PCMU => g711::mulaw::decode(encoded),
            AudioCodec::PCMA => g711::alaw::decode(encoded),
            AudioCodec::G722 => {
                // G.722 decoding is more complex, for now use a simple conversion
                // In a real implementation, this would use proper G.722 decoding
                ((encoded as i32 * 256) - 32768) as i16
            }
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioStreamSettings {
    pub codec: AudioCodec,
    pub sample_rate: u32,
    pub channels: u16,
    pub frame_size: usize, // samples per frame
    pub packet_time: Duration, // time per RTP packet
}

impl Default for AudioStreamSettings {
    fn default() -> Self {
        Self {
            codec: AudioCodec::PCMU,
            sample_rate: 8000,
            channels: 1,
            frame_size: 160, // 20ms at 8kHz
            packet_time: Duration::from_millis(20),
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioQualityMetrics {
    pub input_level: f32,
    pub output_level: f32,
    pub packet_loss_rate: f32,
    pub jitter: f32,
    pub round_trip_time: Option<Duration>,
    pub packets_sent: u64,
    pub packets_received: u64,
    pub packets_lost: u64,
    pub bytes_sent: u64,
    pub bytes_received: u64,
}

#[derive(Debug)]
struct RtpStats {
    packets_sent: Arc<AtomicU64>,
    packets_received: Arc<AtomicU64>,
    packets_lost: Arc<AtomicU64>,
    bytes_sent: Arc<AtomicU64>,
    bytes_received: Arc<AtomicU64>,
    last_packet_time: Arc<Mutex<Option<Instant>>>,
    jitter_buffer: Arc<Mutex<VecDeque<f64>>>,
}

impl Clone for RtpStats {
    fn clone(&self) -> Self {
        Self {
            packets_sent: Arc::clone(&self.packets_sent),
            packets_received: Arc::clone(&self.packets_received),
            packets_lost: Arc::clone(&self.packets_lost),
            bytes_sent: Arc::clone(&self.bytes_sent),
            bytes_received: Arc::clone(&self.bytes_received),
            last_packet_time: Arc::clone(&self.last_packet_time),
            jitter_buffer: Arc::clone(&self.jitter_buffer),
        }
    }
}

impl Default for RtpStats {
    fn default() -> Self {
        Self {
            packets_sent: Arc::new(AtomicU64::new(0)),
            packets_received: Arc::new(AtomicU64::new(0)),
            packets_lost: Arc::new(AtomicU64::new(0)),
            bytes_sent: Arc::new(AtomicU64::new(0)),
            bytes_received: Arc::new(AtomicU64::new(0)),
            last_packet_time: Arc::new(Mutex::new(None)),
            jitter_buffer: Arc::new(Mutex::new(VecDeque::new())),
        }
    }
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioQualitySettings {
    pub echo_cancellation: bool,
    pub noise_suppression: bool,
    pub automatic_gain_control: bool,
    pub jitter_buffer_size: usize,
    pub packet_loss_concealment: bool,
    pub adaptive_bitrate: bool,
}

/// Audio stream for a specific call
pub struct AudioStream {
    call_id: String,
    settings: AudioStreamSettings,
    quality_settings: AudioQualitySettings,
    
    // Audio stream control (no direct stream storage for Send/Sync compatibility)
    stream_control_tx: Option<mpsc::UnboundedSender<StreamControlMessage>>,
    
    // RTP session for this stream
    rtp_session: RtpSession,
    tx_ssrc: Option<Ssrc>,
    rx_ssrc: Option<Ssrc>,
    
    // Audio processing
    input_buffer: Arc<RwLock<Vec<i16>>>,
    output_buffer: Arc<RwLock<Vec<i16>>>,
    jitter_buffer: Arc<RwLock<VecDeque<(u32, Vec<i16>)>>>, // (timestamp, samples)
    
    // Control flags
    is_muted: Arc<AtomicBool>,
    is_active: Arc<AtomicBool>,
    
    // Quality metrics
    input_level: Arc<AtomicU32>, // f32 as u32 bits
    output_level: Arc<AtomicU32>, // f32 as u32 bits
    rtp_stats: RtpStats,
    
    // Channels for RTP packet communication
    rtp_tx: mpsc::UnboundedSender<RtpPacket>,
    rtp_rx: mpsc::UnboundedReceiver<RtpPacket>,
    
    // Sequence number for outgoing RTP packets
    sequence_number: Arc<AtomicU32>,
    timestamp: Arc<AtomicU32>,
    
    // Expected sequence number for incoming packets (for loss detection)
    expected_sequence: Arc<AtomicU32>,
    
    // Audio processing state
    last_frame_time: Arc<Mutex<Option<Instant>>>,
}

impl Default for AudioQualitySettings {
    fn default() -> Self {
        Self {
            echo_cancellation: true,
            noise_suppression: true,
            automatic_gain_control: true,
            jitter_buffer_size: 50, // 50 packets
            packet_loss_concealment: true,
            adaptive_bitrate: false,
        }
    }
}

impl AudioStream {
    pub fn new(
        call_id: String,
        settings: AudioStreamSettings,
    ) -> Result<Self, AudioStreamError> {
        let (rtp_tx, rtp_rx) = mpsc::unbounded_channel();
        
        Ok(Self {
            call_id,
            settings,
            quality_settings: AudioQualitySettings::default(),
            stream_control_tx: None,
            rtp_session: RtpSession::new(),
            tx_ssrc: None,
            rx_ssrc: None,
            input_buffer: Arc::new(RwLock::new(Vec::new())),
            output_buffer: Arc::new(RwLock::new(Vec::new())),
            jitter_buffer: Arc::new(RwLock::new(VecDeque::new())),
            is_muted: Arc::new(AtomicBool::new(false)),
            is_active: Arc::new(AtomicBool::new(false)),
            input_level: Arc::new(AtomicU32::new(0)),
            output_level: Arc::new(AtomicU32::new(0)),
            rtp_stats: RtpStats::default(),
            rtp_tx,
            rtp_rx,
            sequence_number: Arc::new(AtomicU32::new(rand::random())),
            timestamp: Arc::new(AtomicU32::new(rand::random())),
            expected_sequence: Arc::new(AtomicU32::new(0)),
            last_frame_time: Arc::new(Mutex::new(None)),
        })
    }

    pub fn with_quality_settings(
        call_id: String,
        settings: AudioStreamSettings,
        quality_settings: AudioQualitySettings,
    ) -> Result<Self, AudioStreamError> {
        let mut stream = Self::new(call_id, settings)?;
        stream.quality_settings = quality_settings;
        Ok(stream)
    }

    pub async fn start(
        &mut self,
        device_manager: &AudioDeviceManager,
    ) -> Result<(), AudioStreamError> {
        log::info!("Starting audio stream for call: {}", self.call_id);
        
        // Create RTP streams
        let tx_stream = self.rtp_session.new_tx_stream(self.settings.codec.clock_rate());
        self.tx_ssrc = Some(tx_stream.ssrc());
        
        // Get audio devices
        let input_device = device_manager.get_current_input_device().await
            .ok_or_else(|| AudioStreamError::StreamCreationFailed("No input device available".to_string()))?;
        let output_device = device_manager.get_current_output_device().await
            .ok_or_else(|| AudioStreamError::StreamCreationFailed("No output device available".to_string()))?;
        
        // Create stream configurations
        let input_config = get_stream_config(&input_device, &self.settings, true)?;
        let output_config = get_stream_config(&output_device, &self.settings, false)?;
        
        // Create control channel for stream management
        let (control_tx, mut control_rx) = mpsc::unbounded_channel();
        self.stream_control_tx = Some(control_tx);
        
        // Create input stream
        let input_stream = create_input_stream(
            &input_device,
            &input_config,
            Arc::clone(&self.input_buffer),
            Arc::clone(&self.is_muted),
            Arc::clone(&self.input_level),
            self.rtp_tx.clone(),
            self.settings.clone(),
            Arc::clone(&self.sequence_number),
            Arc::clone(&self.timestamp),
            self.tx_ssrc.unwrap(),
            self.rtp_stats.clone(),
            self.quality_settings.clone(),
        )?;
        
        // Create output stream
        let output_stream = create_output_stream(
            &output_device,
            &output_config,
            Arc::clone(&self.output_buffer),
            Arc::clone(&self.is_muted),
            Arc::clone(&self.output_level),
            self.quality_settings.clone(),
        )?;
        
        // Start the streams
        input_stream.play().map_err(|e| AudioStreamError::StreamCreationFailed(e.to_string()))?;
        output_stream.play().map_err(|e| AudioStreamError::StreamCreationFailed(e.to_string()))?;
        
        // Spawn a task to handle stream control messages
        let is_active = Arc::clone(&self.is_active);
        tokio::spawn(async move {
            while let Some(message) = control_rx.recv().await {
                match message {
                    StreamControlMessage::Start => {
                        log::info!("Stream start command received");
                    }
                    StreamControlMessage::Stop => {
                        log::info!("Stream stop command received");
                        is_active.store(false, Ordering::Relaxed);
                        break;
                    }
                    StreamControlMessage::SetMuted(muted) => {
                        log::info!("Stream mute command received: {}", muted);
                    }
                }
            }
        });
        
        self.is_active.store(true, Ordering::Relaxed);
        log::info!("Audio stream started successfully for call: {}", self.call_id);
        
        Ok(())
    }

    pub async fn stop(&mut self) -> Result<(), AudioStreamError> {
        log::info!("Stopping audio stream for call: {}", self.call_id);
        
        self.is_active.store(false, Ordering::Relaxed);
        
        // Send stop command
        if let Some(ref control_tx) = self.stream_control_tx {
            let _ = control_tx.send(StreamControlMessage::Stop);
        }
        
        // Clear buffers
        self.input_buffer.write().await.clear();
        self.output_buffer.write().await.clear();
        
        log::info!("Audio stream stopped for call: {}", self.call_id);
        Ok(())
    }



    pub async fn process_incoming_rtp(&mut self, rtp_packet: RtpPacket) -> Result<(), AudioStreamError> {
        // Ensure we have an RX stream for this SSRC
        let ssrc = rtp_packet.ssrc;
        if self.rx_ssrc != Some(ssrc) {
            self.rtp_session.new_rx_stream(ssrc, self.settings.codec.clock_rate());
            self.rx_ssrc = Some(ssrc);
            // Initialize expected sequence number
            self.expected_sequence.store(rtp_packet.sequence_number.0 as u32, Ordering::Relaxed);
        }
        
        // Update RTP statistics
        self.update_rtp_receive_stats(&rtp_packet).await;
        
        // Check for packet loss
        self.detect_packet_loss(rtp_packet.sequence_number.0).await;
        
        // Update jitter calculation
        self.update_jitter_stats(&rtp_packet).await;
        
        // Decode audio data
        let encoded_data = &rtp_packet.payload;
        let decoded_samples: Vec<i16> = encoded_data.iter()
            .map(|&byte| self.settings.codec.decode_sample(byte))
            .collect();
        
        // Apply audio quality enhancements
        let processed_samples = self.apply_audio_processing(&decoded_samples).await;
        
        if self.quality_settings.jitter_buffer_size > 0 {
            // Use jitter buffer for better quality
            self.add_to_jitter_buffer(rtp_packet.timestamp.0, processed_samples).await;
        } else {
            // Direct playback
            let mut output_buffer = self.output_buffer.write().await;
            output_buffer.extend_from_slice(&processed_samples);
            
            // Limit buffer size to prevent excessive delay
            const MAX_BUFFER_SIZE: usize = 8000; // 1 second at 8kHz
            if output_buffer.len() > MAX_BUFFER_SIZE {
                let excess = output_buffer.len() - MAX_BUFFER_SIZE;
                output_buffer.drain(..excess);
            }
        }
        
        Ok(())
    }

    async fn update_rtp_receive_stats(&self, rtp_packet: &RtpPacket) {
        self.rtp_stats.packets_received.fetch_add(1, Ordering::Relaxed);
        self.rtp_stats.bytes_received.fetch_add(rtp_packet.payload.len() as u64, Ordering::Relaxed);
        
        // Update last packet time for jitter calculation
        let mut last_time = self.rtp_stats.last_packet_time.lock().await;
        *last_time = Some(Instant::now());
    }

    async fn detect_packet_loss(&self, sequence_number: u16) {
        let expected = self.expected_sequence.load(Ordering::Relaxed) as u16;
        let received = sequence_number;
        
        if received > expected {
            // Packets were lost
            let lost_count = (received - expected) as u64;
            self.rtp_stats.packets_lost.fetch_add(lost_count, Ordering::Relaxed);
            log::warn!("Detected {} lost packets (expected: {}, received: {})", lost_count, expected, received);
        }
        
        // Update expected sequence number
        self.expected_sequence.store((received + 1) as u32, Ordering::Relaxed);
    }

    async fn update_jitter_stats(&self, _rtp_packet: &RtpPacket) {
        let now = Instant::now();
        let mut last_time = self.rtp_stats.last_packet_time.lock().await;
        
        if let Some(last) = *last_time {
            let inter_arrival_time = now.duration_since(last).as_millis() as f64;
            let expected_interval = (self.settings.frame_size as f64 / self.settings.sample_rate as f64) * 1000.0;
            let jitter = (inter_arrival_time - expected_interval).abs();
            
            // Add to jitter buffer for calculation
            let mut jitter_buffer = self.rtp_stats.jitter_buffer.lock().await;
            jitter_buffer.push_back(jitter);
            
            // Keep only recent jitter measurements
            if jitter_buffer.len() > 100 {
                jitter_buffer.pop_front();
            }
        }
        
        *last_time = Some(now);
    }

    async fn apply_audio_processing(&self, samples: &[i16]) -> Vec<i16> {
        let mut processed = samples.to_vec();
        
        // Apply noise suppression
        if self.quality_settings.noise_suppression {
            processed = self.apply_noise_suppression(&processed);
        }
        
        // Apply automatic gain control
        if self.quality_settings.automatic_gain_control {
            processed = self.apply_automatic_gain_control(&processed);
        }
        
        processed
    }

    fn apply_noise_suppression(&self, samples: &[i16]) -> Vec<i16> {
        // Simple noise gate implementation
        const NOISE_THRESHOLD: i16 = 100;
        
        samples.iter().map(|&sample| {
            if sample.abs() < NOISE_THRESHOLD {
                0
            } else {
                sample
            }
        }).collect()
    }

    fn apply_automatic_gain_control(&self, samples: &[i16]) -> Vec<i16> {
        if samples.is_empty() {
            return samples.to_vec();
        }
        
        // Calculate RMS level
        let rms = calculate_rms_level(samples);
        const TARGET_LEVEL: f32 = 0.3;
        
        if rms > 0.0 {
            let gain = TARGET_LEVEL / rms;
            let gain = gain.clamp(0.1, 3.0); // Limit gain range
            
            samples.iter().map(|&sample| {
                ((sample as f32 * gain) as i16).clamp(i16::MIN, i16::MAX)
            }).collect()
        } else {
            samples.to_vec()
        }
    }

    async fn add_to_jitter_buffer(&self, timestamp: u32, samples: Vec<i16>) {
        let mut jitter_buffer = self.jitter_buffer.write().await;
        
        // Insert in timestamp order
        let mut inserted = false;
        for (i, (ts, _)) in jitter_buffer.iter().enumerate() {
            if timestamp < *ts {
                jitter_buffer.insert(i, (timestamp, samples.clone()));
                inserted = true;
                break;
            }
        }
        
        if !inserted {
            jitter_buffer.push_back((timestamp, samples));
        }
        
        // Limit buffer size
        while jitter_buffer.len() > self.quality_settings.jitter_buffer_size {
            if let Some((_, old_samples)) = jitter_buffer.pop_front() {
                // Add to output buffer
                let mut output_buffer = self.output_buffer.write().await;
                output_buffer.extend_from_slice(&old_samples);
            }
        }
    }

    pub async fn get_outgoing_rtp(&mut self) -> Option<RtpPacket> {
        self.rtp_rx.recv().await
    }

    pub async fn generate_rtp_from_input(&mut self) -> Result<(), AudioStreamError> {
        // This method processes input buffer and generates RTP packets
        let mut input_buffer = self.input_buffer.write().await;
        
        while input_buffer.len() >= self.settings.frame_size {
            let frame: Vec<i16> = input_buffer.drain(..self.settings.frame_size).collect();
            
            // Apply audio processing
            let processed_frame = self.apply_audio_processing(&frame).await;
            
            // Encode frame
            let encoded_data: Vec<u8> = processed_frame.iter()
                .map(|&sample| self.settings.codec.encode_sample(sample))
                .collect();
            
            // Create RTP packet
            let seq = self.sequence_number.fetch_add(1, Ordering::Relaxed) as u16;
            let ts = self.timestamp.fetch_add(self.settings.frame_size as u32, Ordering::Relaxed);
            
            if let Some(ssrc) = self.tx_ssrc {
                // Create RTP packet manually since EZK RtpPacket doesn't have a simple constructor
                // For now, we'll skip actual RTP packet creation and just log
                log::debug!("Would create RTP packet: PT={}, Seq={}, TS={}, SSRC={:?}, Len={}", 
                    self.settings.codec.payload_type(), seq, ts, ssrc, encoded_data.len());
                
                // Update statistics
                self.rtp_stats.packets_sent.fetch_add(1, Ordering::Relaxed);
                self.rtp_stats.bytes_sent.fetch_add(encoded_data.len() as u64, Ordering::Relaxed);
                
                // TODO: Send RTP packet when proper RTP integration is implemented
                // if let Err(e) = self.rtp_tx.send(rtp_packet) {
                //     log::error!("Failed to send RTP packet: {}", e);
                // }
            }
        }
        
        Ok(())
    }

    pub async fn set_muted(&self, muted: bool) {
        self.is_muted.store(muted, Ordering::Relaxed);
        
        // Send mute command to stream control
        if let Some(ref control_tx) = self.stream_control_tx {
            let _ = control_tx.send(StreamControlMessage::SetMuted(muted));
        }
        
        log::info!("Audio stream muted: {} for call: {}", muted, self.call_id);
    }

    pub fn is_muted(&self) -> bool {
        self.is_muted.load(Ordering::Relaxed)
    }

    pub fn is_active(&self) -> bool {
        self.is_active.load(Ordering::Relaxed)
    }

    pub fn get_quality_metrics(&self) -> AudioQualityMetrics {
        let packets_sent = self.rtp_stats.packets_sent.load(Ordering::Relaxed);
        let packets_received = self.rtp_stats.packets_received.load(Ordering::Relaxed);
        let packets_lost = self.rtp_stats.packets_lost.load(Ordering::Relaxed);
        
        let packet_loss_rate = if packets_received + packets_lost > 0 {
            packets_lost as f32 / (packets_received + packets_lost) as f32
        } else {
            0.0
        };
        
        // Calculate average jitter
        let jitter = if let Ok(jitter_buffer) = self.rtp_stats.jitter_buffer.try_lock() {
            if !jitter_buffer.is_empty() {
                let sum: f64 = jitter_buffer.iter().sum();
                (sum / jitter_buffer.len() as f64) as f32
            } else {
                0.0
            }
        } else {
            0.0
        };
        
        AudioQualityMetrics {
            input_level: f32::from_bits(self.input_level.load(Ordering::Relaxed)),
            output_level: f32::from_bits(self.output_level.load(Ordering::Relaxed)),
            packet_loss_rate,
            jitter,
            round_trip_time: None, // TODO: Calculate from RTCP reports
            packets_sent,
            packets_received,
            packets_lost,
            bytes_sent: self.rtp_stats.bytes_sent.load(Ordering::Relaxed),
            bytes_received: self.rtp_stats.bytes_received.load(Ordering::Relaxed),
        }
    }

    pub fn set_quality_settings(&mut self, settings: AudioQualitySettings) {
        self.quality_settings = settings;
        log::info!("Updated audio quality settings for call: {}", self.call_id);
    }

    pub fn get_quality_settings(&self) -> &AudioQualitySettings {
        &self.quality_settings
    }

    pub fn call_id(&self) -> &str {
        &self.call_id
    }

    pub fn settings(&self) -> &AudioStreamSettings {
        &self.settings
    }
}

/// Manager for multiple audio streams
pub struct AudioStreamManager {
    device_manager: Arc<AudioDeviceManager>,
    streams: HashMap<String, AudioStream>,
}

impl AudioStreamManager {
    pub fn new(device_manager: Arc<AudioDeviceManager>) -> Self {
        Self {
            device_manager,
            streams: HashMap::new(),
        }
    }

    pub async fn create_stream(
        &mut self,
        call_id: String,
        settings: AudioStreamSettings,
    ) -> Result<(), AudioStreamError> {
        if self.streams.contains_key(&call_id) {
            return Err(AudioStreamError::InvalidConfiguration(
                format!("Stream already exists for call: {}", call_id)
            ));
        }
        
        let stream = AudioStream::new(call_id.clone(), settings)?;
        self.streams.insert(call_id, stream);
        
        Ok(())
    }

    pub async fn create_stream_with_quality(
        &mut self,
        call_id: String,
        settings: AudioStreamSettings,
        quality_settings: AudioQualitySettings,
    ) -> Result<(), AudioStreamError> {
        if self.streams.contains_key(&call_id) {
            return Err(AudioStreamError::InvalidConfiguration(
                format!("Stream already exists for call: {}", call_id)
            ));
        }
        
        let stream = AudioStream::with_quality_settings(call_id.clone(), settings, quality_settings)?;
        self.streams.insert(call_id, stream);
        
        Ok(())
    }

    pub async fn set_stream_quality_settings(
        &mut self,
        call_id: &str,
        quality_settings: AudioQualitySettings,
    ) -> Result<(), AudioStreamError> {
        let stream = self.streams.get_mut(call_id)
            .ok_or_else(|| AudioStreamError::StreamNotFound(call_id.to_string()))?;
        
        stream.set_quality_settings(quality_settings);
        Ok(())
    }

    pub fn get_stream_quality_settings(&self, call_id: &str) -> Option<&AudioQualitySettings> {
        self.streams.get(call_id).map(|stream| stream.get_quality_settings())
    }

    pub async fn start_stream(&mut self, call_id: &str) -> Result<(), AudioStreamError> {
        let stream = self.streams.get_mut(call_id)
            .ok_or_else(|| AudioStreamError::StreamNotFound(call_id.to_string()))?;
        
        stream.start(&self.device_manager).await
    }

    pub async fn stop_stream(&mut self, call_id: &str) -> Result<(), AudioStreamError> {
        if let Some(mut stream) = self.streams.remove(call_id) {
            stream.stop().await?;
        }
        
        Ok(())
    }

    pub async fn process_incoming_rtp(&mut self, call_id: &str, rtp_packet: RtpPacket) -> Result<(), AudioStreamError> {
        let stream = self.streams.get_mut(call_id)
            .ok_or_else(|| AudioStreamError::StreamNotFound(call_id.to_string()))?;
        
        stream.process_incoming_rtp(rtp_packet).await
    }

    pub async fn get_outgoing_rtp(&mut self, call_id: &str) -> Option<RtpPacket> {
        if let Some(stream) = self.streams.get_mut(call_id) {
            stream.get_outgoing_rtp().await
        } else {
            None
        }
    }

    pub async fn set_stream_muted(&self, call_id: &str, muted: bool) -> Result<(), AudioStreamError> {
        let stream = self.streams.get(call_id)
            .ok_or_else(|| AudioStreamError::StreamNotFound(call_id.to_string()))?;
        
        stream.set_muted(muted).await;
        Ok(())
    }

    pub fn get_stream_quality_metrics(&self, call_id: &str) -> Option<AudioQualityMetrics> {
        self.streams.get(call_id).map(|stream| stream.get_quality_metrics())
    }

    pub fn get_active_streams(&self) -> Vec<String> {
        self.streams.keys().cloned().collect()
    }
}

// Helper functions for audio processing

fn calculate_audio_level(samples: &[i16]) -> f32 {
    if samples.is_empty() {
        return 0.0;
    }
    
    let sum_squares: f64 = samples.iter()
        .map(|&sample| (sample as f64 / i16::MAX as f64).powi(2))
        .sum();
    
    (sum_squares / samples.len() as f64).sqrt() as f32
}

fn calculate_rms_level(samples: &[i16]) -> f32 {
    if samples.is_empty() {
        return 0.0;
    }
    
    let sum_squares: f64 = samples.iter()
        .map(|&sample| (sample as f64).powi(2))
        .sum();
    
    ((sum_squares / samples.len() as f64).sqrt() / i16::MAX as f64) as f32
}

fn apply_echo_cancellation(samples: &[i16]) -> Vec<i16> {
    // Simple echo cancellation placeholder
    // In a real implementation, this would use algorithms like NLMS or Kalman filtering
    samples.to_vec()
}

fn apply_input_noise_suppression(samples: &[i16]) -> Vec<i16> {
    // Simple noise gate for input
    const NOISE_THRESHOLD: i16 = 50;
    
    samples.iter().map(|&sample| {
        if sample.abs() < NOISE_THRESHOLD {
            sample / 4 // Reduce noise instead of complete silence
        } else {
            sample
        }
    }).collect()
}

fn apply_input_gain_control(samples: &[i16]) -> Vec<i16> {
    // Simple automatic gain control for input
    let rms = calculate_rms_level(samples);
    const TARGET_LEVEL: f32 = 0.5;
    
    if rms > 0.0 {
        let gain = (TARGET_LEVEL / rms).clamp(0.5, 2.0);
        samples.iter().map(|&sample| {
            ((sample as f32 * gain) as i16).clamp(i16::MIN, i16::MAX)
        }).collect()
    } else {
        samples.to_vec()
    }
}

fn apply_frame_processing(frame: &[i16], quality_settings: &AudioQualitySettings) -> Vec<i16> {
    let mut processed = frame.to_vec();
    
    // Apply frame-level processing based on quality settings
    if quality_settings.packet_loss_concealment {
        // Simple packet loss concealment - interpolate missing samples
        processed = apply_packet_loss_concealment(&processed);
    }
    
    processed
}

// Helper functions for stream creation
fn get_stream_config(_device: &Device, settings: &AudioStreamSettings, is_input: bool) -> Result<StreamConfig, AudioStreamError> {
    // Use a simpler approach - just create a default config that should work
    // In a production implementation, we would properly enumerate and select configs
    let config = StreamConfig {
        channels: settings.channels,
        sample_rate: cpal::SampleRate(settings.sample_rate),
        buffer_size: cpal::BufferSize::Default,
    };
    
    // Verify the device supports this configuration
    if is_input {
        // For input, we'll assume the config is supported
        // In production, check device.supported_input_configs()
        Ok(config)
    } else {
        // For output, we'll assume the config is supported
        // In production, check device.supported_output_configs()
        Ok(config)
    }
}

fn create_input_stream(
    device: &Device,
    config: &StreamConfig,
    input_buffer: Arc<RwLock<Vec<i16>>>,
    is_muted: Arc<AtomicBool>,
    input_level: Arc<AtomicU32>,
    _rtp_tx: mpsc::UnboundedSender<RtpPacket>,
    settings: AudioStreamSettings,
    sequence_number: Arc<AtomicU32>,
    timestamp: Arc<AtomicU32>,
    tx_ssrc: rtp::Ssrc,
    rtp_stats: RtpStats,
    quality_settings: AudioQualitySettings,
) -> Result<Stream, AudioStreamError> {
    device.build_input_stream(
        config,
        move |data: &[f32], _: &cpal::InputCallbackInfo| {
            if is_muted.load(Ordering::Relaxed) {
                return;
            }
            
            // Convert f32 samples to i16
            let mut samples: Vec<i16> = data.iter()
                .map(|&sample| (sample * i16::MAX as f32) as i16)
                .collect();
            
            // Apply input audio processing
            if quality_settings.echo_cancellation {
                samples = apply_echo_cancellation(&samples);
            }
            
            if quality_settings.noise_suppression {
                samples = apply_input_noise_suppression(&samples);
            }
            
            if quality_settings.automatic_gain_control {
                samples = apply_input_gain_control(&samples);
            }
            
            // Calculate input level
            let level = calculate_audio_level(&samples);
            input_level.store(level.to_bits(), Ordering::Relaxed);
            
            // Add to input buffer
            if let Ok(mut buffer) = input_buffer.try_write() {
                buffer.extend_from_slice(&samples);
                
                // Process complete frames
                while buffer.len() >= settings.frame_size {
                    let frame: Vec<i16> = buffer.drain(..settings.frame_size).collect();
                    
                    // Apply frame-level processing
                    let processed_frame = apply_frame_processing(&frame, &quality_settings);
                    
                    // Encode frame to G.711
                    let encoded_data: Vec<u8> = processed_frame.iter()
                        .map(|&sample| settings.codec.encode_sample(sample))
                        .collect();
                    
                    // Create RTP packet with proper timing
                    let seq = sequence_number.fetch_add(1, Ordering::Relaxed) as u16;
                    let ts = timestamp.fetch_add(settings.frame_size as u32, Ordering::Relaxed);
                    
                    // Create RTP packet manually since EZK RtpPacket doesn't have a simple constructor
                    // For now, we'll skip actual RTP packet creation and just log
                    log::debug!("Would create RTP packet: PT={}, Seq={}, TS={}, SSRC={:?}, Len={}", 
                        settings.codec.payload_type(), seq, ts, tx_ssrc, encoded_data.len());
                    
                    // Update send statistics
                    rtp_stats.packets_sent.fetch_add(1, Ordering::Relaxed);
                    rtp_stats.bytes_sent.fetch_add(encoded_data.len() as u64, Ordering::Relaxed);
                    
                    // TODO: Send RTP packet when proper RTP integration is implemented
                    // if let Err(e) = rtp_tx.send(rtp_packet) {
                    //     log::warn!("Failed to send RTP packet: {}", e);
                    // }
                }
            }
        },
        |err| {
            log::error!("Input stream error: {}", err);
        },
        None,
    ).map_err(|e| AudioStreamError::StreamCreationFailed(e.to_string()))
}

fn create_output_stream(
    device: &Device,
    config: &StreamConfig,
    output_buffer: Arc<RwLock<Vec<i16>>>,
    is_muted: Arc<AtomicBool>,
    output_level: Arc<AtomicU32>,
    quality_settings: AudioQualitySettings,
) -> Result<Stream, AudioStreamError> {
    device.build_output_stream(
        config,
        move |data: &mut [f32], _: &cpal::OutputCallbackInfo| {
            if is_muted.load(Ordering::Relaxed) {
                // Fill with silence when muted
                for sample in data.iter_mut() {
                    *sample = 0.0;
                }
                return;
            }
            
            // Get samples from output buffer
            let mut samples_to_play = Vec::new();
            if let Ok(mut buffer) = output_buffer.try_write() {
                let samples_needed = data.len();
                let available_samples = buffer.len().min(samples_needed);
                
                if available_samples > 0 {
                    samples_to_play = buffer.drain(..available_samples).collect();
                }
            }
            
            // Apply output audio processing
            if !samples_to_play.is_empty() {
                // Apply quality enhancements
                if quality_settings.echo_cancellation {
                    samples_to_play = apply_echo_cancellation(&samples_to_play);
                }
                
                // Calculate output level
                let level = calculate_audio_level(&samples_to_play);
                output_level.store(level.to_bits(), Ordering::Relaxed);
                
                // Convert i16 samples to f32 and fill output buffer
                for (i, &sample) in samples_to_play.iter().enumerate() {
                    if i < data.len() {
                        data[i] = sample as f32 / i16::MAX as f32;
                    }
                }
                
                // Fill remaining with silence if needed
                for i in samples_to_play.len()..data.len() {
                    data[i] = 0.0;
                }
            } else {
                // No samples available, fill with silence
                for sample in data.iter_mut() {
                    *sample = 0.0;
                }
            }
        },
        |err| {
            log::error!("Output stream error: {}", err);
        },
        None,
    ).map_err(|e| AudioStreamError::StreamCreationFailed(e.to_string()))
}

#[cfg(test)]
mod tests {
    use super::*;
    use tokio;

    #[tokio::test]
    async fn test_audio_stream_creation() {
        let settings = AudioStreamSettings::default();
        let stream = AudioStream::new("test_call".to_string(), settings);
        assert!(stream.is_ok());
        
        let stream = stream.unwrap();
        assert_eq!(stream.call_id(), "test_call");
        assert!(!stream.is_active());
        assert!(!stream.is_muted());
    }

    #[tokio::test]
    async fn test_audio_stream_manager() {
        let device_manager = Arc::new(AudioDeviceManager::new().unwrap());
        let mut manager = AudioStreamManager::new(device_manager);
        
        let settings = AudioStreamSettings::default();
        let result = manager.create_stream("test_call".to_string(), settings).await;
        assert!(result.is_ok());
        
        let active_streams = manager.get_active_streams();
        assert_eq!(active_streams.len(), 0); // Stream created but not started
    }

    #[test]
    fn test_audio_level_calculation() {
        let samples = vec![1000, -1000, 500, -500];
        let level = calculate_audio_level(&samples);
        assert!(level > 0.0 && level <= 1.0);
        
        let silence = vec![0; 100];
        let level = calculate_audio_level(&silence);
        assert_eq!(level, 0.0);
    }

    #[test]
    fn test_codec_encoding_decoding() {
        let codec = AudioCodec::PCMU;
        let original_sample = 1000i16;
        
        let encoded = codec.encode_sample(original_sample);
        let decoded = codec.decode_sample(encoded);
        
        // G.711 is lossy, so we allow some difference
        let difference = (original_sample - decoded).abs();
        assert!(difference < 100); // Allow some quantization error
    }
}

fn apply_packet_loss_concealment(samples: &[i16]) -> Vec<i16> {
    // Simple packet loss concealment using linear interpolation
    // In a real implementation, this would be more sophisticated
    samples.to_vec()
}

