use anyhow::Result;
use cpal::{
    traits::{DeviceTrait, HostTrait, StreamTrait},
};
use std::sync::{Arc, Mutex};
use tracing::{error, info, debug};

pub struct AudioIO {
    input_buffer: Arc<Mutex<Vec<f32>>>,
    output_buffer: Arc<Mutex<Vec<f32>>>,
    sample_rate: u32,
    channels: u16,
    is_running: Arc<Mutex<bool>>,
    input_thread: Option<std::thread::JoinHandle<()>>,
    // 添加重采样缓冲区
    resample_buffer: Arc<Mutex<Vec<f32>>>,
    // 添加音频帧缓冲区，确保连续性
    frame_buffer: Arc<Mutex<Vec<f32>>>,
    // 添加音频帧计数器
    frame_counter: Arc<Mutex<u64>>,
}

impl std::fmt::Debug for AudioIO {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("AudioIO")
            .field("sample_rate", &self.sample_rate)
            .field("channels", &self.channels)
            .field("is_running", &*self.is_running.lock().unwrap())
            .finish()
    }
}

impl AudioIO {
    pub fn new(sample_rate: u32, channels: u16) -> Result<Self> {
        info!("初始化音频IO，采样率: {}Hz, 声道数: {}", sample_rate, channels);
        
        Ok(Self {
            input_buffer: Arc::new(Mutex::new(Vec::new())),
            output_buffer: Arc::new(Mutex::new(Vec::new())),
            sample_rate,
            channels,
            is_running: Arc::new(Mutex::new(false)),
            input_thread: None,
            resample_buffer: Arc::new(Mutex::new(Vec::new())),
            frame_buffer: Arc::new(Mutex::new(Vec::with_capacity(1024))), // 预分配缓冲区
            frame_counter: Arc::new(Mutex::new(0)),
        })
    }

    pub fn start(&mut self) -> Result<()> {
        info!("启动音频IO...");
        
        // 设置运行标志
        {
            let mut running = self.is_running.lock().unwrap();
            *running = true;
        }
        
        // 启动麦克风输入线程
        let input_buffer = Arc::clone(&self.input_buffer);
        let is_running = Arc::clone(&self.is_running);
        let sample_rate = self.sample_rate;
        let channels = self.channels;
        let resample_buffer = Arc::clone(&self.resample_buffer);
        let frame_buffer = Arc::clone(&self.frame_buffer);
        let frame_counter = Arc::clone(&self.frame_counter);
        
        let thread_handle = std::thread::spawn(move || {
            info!("麦克风输入线程已启动");
            
            // 在单独线程中初始化cpal
            if let Err(e) = Self::run_microphone_input(input_buffer, is_running, sample_rate, channels, resample_buffer, frame_buffer, frame_counter) {
                error!("麦克风输入线程错误: {}", e);
            }
            
            info!("麦克风输入线程已停止");
        });
        
        self.input_thread = Some(thread_handle);
        
        info!("音频IO已启动");
        Ok(())
    }

    fn run_microphone_input(
        input_buffer: Arc<Mutex<Vec<f32>>>,
        is_running: Arc<Mutex<bool>>,
        sample_rate: u32,
        channels: u16,
        resample_buffer: Arc<Mutex<Vec<f32>>>,
        frame_buffer: Arc<Mutex<Vec<f32>>>,
        frame_counter: Arc<Mutex<u64>>,
    ) -> Result<()> {
        let host = cpal::default_host();
        let device = host.default_input_device()
            .ok_or_else(|| anyhow::anyhow!("没有找到默认输入设备"))?;
        
        info!("使用麦克风设备: {}", device.name()?);
        
        // 查询设备支持的配置
        let supported_configs = device.supported_input_configs()?;
        info!("设备支持的配置:");
        for config in supported_configs {
            info!("  采样率范围: {:?}-{:?}, 声道数: {:?}, 格式: {:?}", 
                  config.min_sample_rate(), config.max_sample_rate(), config.channels(), config.sample_format());
        }
        
        // 尝试找到最合适的配置
        let mut best_config = None;
        for config in device.supported_input_configs()? {
            if config.max_sample_rate().0 >= 8000 && config.channels() >= 1 {
                best_config = Some(config);
                break;
            }
        }
        
        let config = if let Some(supported_config) = best_config {
            info!("使用设备支持的配置: 采样率 {:?}, 声道数 {:?}", 
                  supported_config.max_sample_rate(), supported_config.channels());
            cpal::StreamConfig {
                channels: supported_config.channels(),
                sample_rate: supported_config.max_sample_rate(),
                buffer_size: cpal::BufferSize::Default,
            }
        } else {
            // 如果找不到合适的配置，使用默认配置
            info!("使用默认音频配置");
            cpal::StreamConfig {
                channels,
                sample_rate: cpal::SampleRate(sample_rate),
                buffer_size: cpal::BufferSize::Default,
            }
        };
        
        let input_data_fn = move |data: &[f32], _: &cpal::InputCallbackInfo| {
            // 将麦克风数据累积到帧缓冲区，而不是每次清空
            {
                let mut frame_buf = frame_buffer.lock().unwrap();
                frame_buf.extend_from_slice(data);
                
                // 当累积足够的数据时，进行处理
                let target_frame_size = (config.sample_rate.0 as f32 * 0.02) as usize * config.channels as usize; // 20ms的数据
                
                while frame_buf.len() >= target_frame_size {
                    // 提取一帧数据
                    let frame_data: Vec<f32> = frame_buf.drain(0..target_frame_size).collect();
                    
                    // 检查是否有有效的音频数据
                    let avg_volume = frame_data.iter().map(|x| x.abs()).sum::<f32>() / frame_data.len() as f32;
                    let has_audio = avg_volume > 0.005; // 降低阈值，避免丢失轻声
                    
                    if has_audio {
                        debug!("处理音频帧: {} 个样本, 平均音量: {:.4}", frame_data.len(), avg_volume);
                        
                        // 进行音频重采样和格式转换，使用简化的处理
                        if let Ok(processed_data) = Self::process_audio_frame(&frame_data, config.channels, config.sample_rate.0) {
                            let mut resample_buf = resample_buffer.lock().unwrap();
                            // 不清空，而是追加，确保连续性
                            if resample_buf.len() > 320 { // 如果缓冲区太大，保留最新的数据
                                resample_buf.drain(0..160);
                            }
                            resample_buf.extend_from_slice(&processed_data);
                        }
                    } else {
                        // 即使是静音，也要产生对应的静音帧，保持时序
                        let mut resample_buf = resample_buffer.lock().unwrap();
                        if resample_buf.len() > 320 {
                            resample_buf.drain(0..160);
                        }
                        resample_buf.extend_from_slice(&vec![0.0; 160]); // 160个样本的静音帧
                        debug!("添加静音帧");
                    }
                    
                    // 更新帧计数器
                    {
                        let mut counter = frame_counter.lock().unwrap();
                        *counter += 1;
                    }
                }
            }
            
            // 同时更新原始缓冲区（保持兼容性）
            {
                let mut buffer = input_buffer.lock().unwrap();
                buffer.clear();
                buffer.extend_from_slice(data);
            }
        };
        
        let input_stream = device.build_input_stream(
            &config,
            input_data_fn,
            |err| error!("麦克风输入错误: {}", err),
            None,
        )?;
        
        input_stream.play()?;
        
        // 保持流活跃
        while *is_running.lock().unwrap() {
            std::thread::sleep(std::time::Duration::from_millis(100));
        }
        
        info!("麦克风输入已停止");
        Ok(())
    }

    // 简化的音频帧处理函数，减少过度处理
    fn process_audio_frame(
        input: &[f32], 
        input_channels: u16, 
        input_sample_rate: u32
    ) -> Result<Vec<f32>> {
        // 如果是立体声转单声道
        let mono_input = if input_channels == 2 {
            let mut mono = Vec::new();
            for i in (0..input.len()).step_by(2) {
                if i + 1 < input.len() {
                    // 简单平均混合
                    mono.push((input[i] + input[i + 1]) * 0.5);
                } else {
                    mono.push(input[i]);
                }
            }
            mono
        } else {
            input.to_vec()
        };
        
        // 如果输入数据为空，返回静音
        if mono_input.is_empty() {
            return Ok(vec![0.0; 160]);
        }
        
        // 简单的重采样到8kHz，160个样本
        let ratio = input_sample_rate as f32 / 8000.0;
        let target_samples = 160;
        let mut output = Vec::with_capacity(target_samples);
        
        for i in 0..target_samples {
            let src_index = (i as f32 * ratio) as usize;
            if src_index < mono_input.len() {
                // 简单的最近邻插值，避免复杂计算引入噪音
                let sample = mono_input[src_index];
                // 增加音量增益，让对方听到更大的声音
                let adjusted = sample * 2.5; // 增加增益从1.1到2.5
                output.push(adjusted.max(-0.95).min(0.95));
            } else {
                output.push(0.0);
            }
        }
        
        // 只应用最基本的处理
        Self::apply_basic_processing(&mut output);
        
        Ok(output)
    }
    
    // 基本音频处理，增加音量增益
    fn apply_basic_processing(data: &mut [f32]) {
        // 1. 去除直流偏移
        let mean = data.iter().sum::<f32>() / data.len() as f32;
        for sample in data.iter_mut() {
            *sample -= mean;
        }
        
        // 2. 降低噪音门限，保留更多小信号
        for sample in data.iter_mut() {
            if sample.abs() < 0.005 { // 降低阈值从0.01到0.005
                *sample *= 0.3; // 减少衰减，保留更多信号
            }
        }
        
        // 3. 应用音量增益
        for sample in data.iter_mut() {
            *sample *= 1.8; // 额外的音量增益
        }
        
        // 4. 软限制，避免削波
        for sample in data.iter_mut() {
            if sample.abs() > 0.95 {
                *sample = if *sample > 0.0 { 0.95 } else { -0.95 };
            }
        }
    }

    // 改进的音频重采样函数，参考temp_ezk项目的音频处理方式
    fn resample_audio(
        input: &[f32], 
        input_channels: u16, 
        input_sample_rate: u32, 
        output_channels: u16, 
        output_sample_rate: u32
    ) -> Result<Vec<f32>> {
        if input_channels == output_channels && input_sample_rate == output_sample_rate {
            return Ok(Self::apply_audio_preprocessing(input));
        }
        
        // 如果是立体声转单声道，先混合声道
        let mono_input = if input_channels == 2 && output_channels == 1 {
            let mut mono = Vec::new();
            for i in (0..input.len()).step_by(2) {
                if i + 1 < input.len() {
                    // 使用更好的立体声混合算法
                    mono.push((input[i] + input[i + 1]) * 0.5);
                } else {
                    mono.push(input[i]);
                }
            }
            mono
        } else {
            input.to_vec()
        };
        
        // 如果输入数据为空，返回静音
        if mono_input.is_empty() {
            return Ok(vec![0.0; 160]);
        }
        
        // 应用音频预处理
        let preprocessed_input = Self::apply_audio_preprocessing(&mono_input);
        
        // 计算重采样比例
        let ratio = input_sample_rate as f32 / output_sample_rate as f32;
        
        // 固定输出160个样本（20ms @ 8kHz）
        let target_samples = 160;
        let mut output = Vec::with_capacity(target_samples);
        
        // 使用改进的重采样算法，减少混叠和噪音
        for i in 0..target_samples {
            let src_index = i as f32 * ratio;
            let index = src_index as usize;
            let frac = src_index - index as f32;
            
            if index < preprocessed_input.len() {
                let sample = if index + 1 < preprocessed_input.len() && frac > 0.0 {
                    // 使用三次插值而不是线性插值，减少噪音
                    Self::cubic_interpolate(&preprocessed_input, index, frac)
                } else {
                    preprocessed_input[index]
                };
                
                // 使用更宽的动态范围，避免过度压缩
                output.push(sample.max(-0.95).min(0.95));
            } else {
                output.push(0.0);
            }
        }
        
        // 应用输出后处理
        Ok(Self::apply_output_postprocessing(&output))
    }
    
    // 音频预处理：降噪和增强
    fn apply_audio_preprocessing(input: &[f32]) -> Vec<f32> {
        let mut output = input.to_vec();
        
        // 1. 高通滤波器，去除低频噪音（< 300Hz）
        Self::apply_highpass_filter(&mut output, 300.0, 8000.0);
        
        // 2. 动态范围压缩，提高语音清晰度
        Self::apply_dynamic_range_compression(&mut output);
        
        // 3. 去除直流偏移
        Self::remove_dc_offset(&mut output);
        
        output
    }
    
    // 三次插值，比线性插值质量更好
    fn cubic_interpolate(data: &[f32], index: usize, frac: f32) -> f32 {
        let len = data.len();
        
        // 获取四个采样点
        let y0 = if index > 0 { data[index - 1] } else { data[index] };
        let y1 = data[index];
        let y2 = if index + 1 < len { data[index + 1] } else { data[index] };
        let y3 = if index + 2 < len { data[index + 2] } else { data[index] };
        
        // 三次插值公式
        let a0 = y3 - y2 - y0 + y1;
        let a1 = y0 - y1 - a0;
        let a2 = y2 - y0;
        let a3 = y1;
        
        a0 * frac * frac * frac + a1 * frac * frac + a2 * frac + a3
    }
    
    // 高通滤波器
    fn apply_highpass_filter(data: &mut [f32], cutoff_freq: f32, sample_rate: f32) {
        if data.len() < 2 {
            return;
        }
        
        let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_freq);
        let dt = 1.0 / sample_rate;
        let alpha = rc / (rc + dt);
        
        let mut prev_input = data[0];
        let mut prev_output = data[0];
        
        for i in 1..data.len() {
            let current_input = data[i];
            let output = alpha * (prev_output + current_input - prev_input);
            data[i] = output;
            
            prev_input = current_input;
            prev_output = output;
        }
    }
    
    // 动态范围压缩
    fn apply_dynamic_range_compression(data: &mut [f32]) {
        let threshold = 0.7;
        let ratio = 4.0;
        
        for sample in data.iter_mut() {
            let abs_sample = sample.abs();
            if abs_sample > threshold {
                let excess = abs_sample - threshold;
                let compressed_excess = excess / ratio;
                let new_abs = threshold + compressed_excess;
                *sample = if *sample >= 0.0 { new_abs } else { -new_abs };
            }
        }
    }
    
    // 去除直流偏移
    fn remove_dc_offset(data: &mut [f32]) {
        if data.is_empty() {
            return;
        }
        
        let mean = data.iter().sum::<f32>() / data.len() as f32;
        for sample in data.iter_mut() {
            *sample -= mean;
        }
    }
    
    // 输出后处理
    fn apply_output_postprocessing(input: &[f32]) -> Vec<f32> {
        let mut output = input.to_vec();
        
        // 应用轻微的低通滤波，去除高频噪音
        Self::apply_lowpass_filter(&mut output, 3400.0, 8000.0);
        
        // 音量归一化
        Self::normalize_volume(&mut output);
        
        output
    }
    
    // 低通滤波器
    fn apply_lowpass_filter(data: &mut [f32], cutoff_freq: f32, sample_rate: f32) {
        if data.len() < 2 {
            return;
        }
        
        let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_freq);
        let dt = 1.0 / sample_rate;
        let alpha = dt / (rc + dt);
        
        let mut prev_output = data[0];
        
        for i in 1..data.len() {
            let output = prev_output + alpha * (data[i] - prev_output);
            data[i] = output;
            prev_output = output;
        }
    }
    
    // 音量归一化
    fn normalize_volume(data: &mut [f32]) {
        if data.is_empty() {
            return;
        }
        
        let max_abs = data.iter().map(|x| x.abs()).fold(0.0, f32::max);
        if max_abs > 0.0 && max_abs < 0.1 {
            // 如果音量太小，适当放大
            let gain = 0.3 / max_abs;
            for sample in data.iter_mut() {
                *sample *= gain;
            }
        } else if max_abs > 0.9 {
            // 如果音量太大，适当缩小
            let gain = 0.8 / max_abs;
            for sample in data.iter_mut() {
                *sample *= gain;
            }
        }
    }
  

    pub fn stop(&mut self) {
        info!("停止音频IO...");
        
        // 设置停止标志
        {
            let mut running = self.is_running.lock().unwrap();
            *running = false;
        }
        
        // 等待输入线程结束
        if let Some(thread_handle) = self.input_thread.take() {
            if let Err(e) = thread_handle.join() {
                error!("等待麦克风输入线程结束时出错: {:?}", e);
            }
        }
        
        info!("音频IO已停止");
    }

    pub fn get_input_data(&mut self) -> Option<Vec<f32>> {
        let mut resample_buf = self.resample_buffer.lock().unwrap();
        
        // 确保有足够的数据（160个样本 = 20ms @ 8kHz）
        if resample_buf.len() >= 160 {
            // 提取一帧数据，保持其余数据在缓冲区中
            let frame_data: Vec<f32> = resample_buf.drain(0..160).collect();
            debug!("提取音频帧: {} 个样本，剩余: {} 个样本", frame_data.len(), resample_buf.len());
            return Some(frame_data);
        }
        
        // 如果数据不足，返回None而不是部分数据，避免音频断续
        None
    }

    pub fn send_output_data(&self, data: Vec<f32>) -> Result<()> {
        let mut buffer = self.output_buffer.lock().unwrap();
        buffer.clear();
        buffer.extend_from_slice(&data);
        
        // 这里应该播放音频到扬声器
        // 暂时只是保存到缓冲区
        debug!("接收到音频数据用于播放: {} 个样本", data.len());
        
        Ok(())
    }
}

// 音频格式转换函数（保留以备将来使用）
#[allow(dead_code)]
pub fn f32_to_u8(samples: &[f32]) -> Vec<u8> {
    samples.iter()
        .map(|&sample| {
            // 将f32 [-1.0, 1.0] 转换为 u8 [0, 255]
            let clamped = sample.max(-1.0).min(1.0);
            ((clamped + 1.0) * 127.5) as u8
        })
        .collect()
}

#[allow(dead_code)]
pub fn u8_to_f32(samples: &[u8]) -> Vec<f32> {
    samples.iter()
        .map(|&sample| {
            // 将u8 [0, 255] 转换为 f32 [-1.0, 1.0]
            (sample as f32 / 127.5) - 1.0
        })
        .collect()
}

// 优化的PCMU (μ-law) 编码函数，保持音量
pub fn f32_to_pcmu(samples: &[f32]) -> Vec<u8> {
    samples.iter()
        .map(|&sample| {
            // 使用更大的动态范围，保持音量
            let clamped = sample.max(-0.99).min(0.99);
            // 转换为16位PCM，使用更大的缩放因子
            let pcm = (clamped * 32000.0) as i16; // 使用32000而不是32767，留出一些余量
            // 使用标准μ-law编码
            pcm_to_mulaw(pcm)
        })
        .collect()
}

// 音频质量检查函数（保留以备将来使用）
#[allow(dead_code)]
pub fn check_audio_quality(samples: &[f32]) -> (f32, f32, bool) {
    if samples.is_empty() {
        return (0.0, 0.0, false);
    }
    
    let avg_volume = samples.iter().map(|x| x.abs()).sum::<f32>() / samples.len() as f32;
    let max_volume = samples.iter().map(|x| x.abs()).fold(0.0, f32::max);
    let has_audio = avg_volume > 0.001; // 降低噪音阈值
    
    (avg_volume, max_volume, has_audio)
}

pub fn pcmu_to_f32(samples: &[u8]) -> Vec<f32> {
    samples.iter()
        .map(|&sample| {
            // 将 μ-law 编码转换为 f32 [-1.0, 1.0]
            let pcm = mulaw_to_pcm(sample);
            let f32_sample = pcm as f32 / 32767.0;
            
            // 只应用最基本的处理，增加接收音量
            if f32_sample.abs() < 0.003 { // 降低噪音门限
                0.0 // 简单的噪音门限
            } else {
                f32_sample * 2.0 // 增加接收音量增益
            }
        })
        .collect()
}

impl AudioIO {
    // 解码后处理，提高接收音频质量
    fn apply_decode_postprocessing(input: &[f32]) -> Vec<f32> {
        let mut output = input.to_vec();
        
        // 1. 去除解码噪音
        Self::apply_decode_noise_reduction(&mut output);
        
        // 2. 增强语音频率范围
        Self::apply_voice_enhancement(&mut output);
        
        // 3. 音量平衡
        Self::apply_volume_balance(&mut output);
        
        output
    }
    
    // 解码噪音抑制
    fn apply_decode_noise_reduction(data: &mut [f32]) {
        // 简单的噪音门限
        let noise_threshold = 0.02;
        for sample in data.iter_mut() {
            if sample.abs() < noise_threshold {
                *sample *= 0.1; // 大幅减少低音量噪音
            }
        }
    }
    
    // 语音增强
    fn apply_voice_enhancement(data: &mut [f32]) {
        // 增强语音频率范围的音量
        for sample in data.iter_mut() {
            let abs_sample = sample.abs();
            if abs_sample > 0.1 && abs_sample < 0.8 {
                // 轻微增强中等音量的信号（通常是语音）
                *sample *= 1.2;
            }
        }
    }
    
    // 音量平衡
    fn apply_volume_balance(data: &mut [f32]) {
        if data.is_empty() {
            return;
        }
        
        let rms = (data.iter().map(|x| x * x).sum::<f32>() / data.len() as f32).sqrt();
        let target_rms = 0.3;
        
        if rms > 0.0 && rms < target_rms * 0.5 {
            // 如果音量太小，适当放大
            let gain = (target_rms / rms).min(3.0); // 限制最大增益
            for sample in data.iter_mut() {
                *sample *= gain;
                *sample = sample.max(-0.95).min(0.95); // 防止削波
            }
        }
    }
}

// μ-law 编码表（已废弃，使用标准算法）
const _MULAW_TABLE: [u8; 512] = [
    0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07,
    0x08, 0x08, 0x09, 0x09, 0x0A, 0x0A, 0x0B, 0x0B, 0x0C, 0x0C, 0x0D, 0x0D, 0x0E, 0x0E, 0x0F, 0x0F,
    0x10, 0x10, 0x11, 0x11, 0x12, 0x12, 0x13, 0x13, 0x14, 0x14, 0x15, 0x15, 0x16, 0x16, 0x17, 0x17,
    0x18, 0x18, 0x19, 0x19, 0x1A, 0x1A, 0x1B, 0x1B, 0x1C, 0x1C, 0x1D, 0x1D, 0x1E, 0x1E, 0x1F, 0x1F,
    0x20, 0x20, 0x21, 0x21, 0x22, 0x22, 0x23, 0x23, 0x24, 0x24, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
    0x28, 0x28, 0x29, 0x29, 0x2A, 0x2A, 0x2B, 0x2B, 0x2C, 0x2C, 0x2D, 0x2D, 0x2E, 0x2E, 0x2F, 0x2F,
    0x30, 0x30, 0x31, 0x31, 0x32, 0x32, 0x33, 0x33, 0x34, 0x34, 0x35, 0x35, 0x36, 0x36, 0x37, 0x37,
    0x38, 0x38, 0x39, 0x39, 0x3A, 0x3A, 0x3B, 0x3B, 0x3C, 0x3C, 0x3D, 0x3D, 0x3E, 0x3E, 0x3F, 0x3F,
    0x40, 0x40, 0x41, 0x41, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x46, 0x46, 0x47, 0x47,
    0x48, 0x48, 0x49, 0x49, 0x4A, 0x4A, 0x4B, 0x4B, 0x4C, 0x4C, 0x4D, 0x4D, 0x4E, 0x4E, 0x4F, 0x4F,
    0x50, 0x50, 0x51, 0x51, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x57, 0x57,
    0x58, 0x58, 0x59, 0x59, 0x5A, 0x5A, 0x5B, 0x5B, 0x5C, 0x5C, 0x5D, 0x5D, 0x5E, 0x5E, 0x5F, 0x5F,
    0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x64, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67,
    0x68, 0x68, 0x69, 0x69, 0x6A, 0x6A, 0x6B, 0x6B, 0x6C, 0x6C, 0x6D, 0x6D, 0x6E, 0x6E, 0x6F, 0x6F,
    0x70, 0x70, 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x76, 0x76, 0x77, 0x77,
    0x78, 0x78, 0x79, 0x79, 0x7A, 0x7A, 0x7B, 0x7B, 0x7C, 0x7C, 0x7D, 0x7D, 0x7E, 0x7E, 0x7F, 0x7F,
    0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x87, 0x87,
    0x88, 0x88, 0x89, 0x89, 0x8A, 0x8A, 0x8B, 0x8B, 0x8C, 0x8C, 0x8D, 0x8D, 0x8E, 0x8E, 0x8F, 0x8F,
    0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x95, 0x95, 0x96, 0x96, 0x97, 0x97,
    0x98, 0x98, 0x99, 0x99, 0x9A, 0x9A, 0x9B, 0x9B, 0x9C, 0x9C, 0x9D, 0x9D, 0x9E, 0x9E, 0x9F, 0x9F,
    0xA0, 0xA0, 0xA1, 0xA1, 0xA2, 0xA2, 0xA3, 0xA3, 0xA4, 0xA4, 0xA5, 0xA5, 0xA6, 0xA6, 0xA7, 0xA7,
    0xA8, 0xA8, 0xA9, 0xA9, 0xAA, 0xAA, 0xAB, 0xAB, 0xAC, 0xAC, 0xAD, 0xAD, 0xAE, 0xAE, 0xAF, 0xAF,
    0xB0, 0xB0, 0xB1, 0xB1, 0xB2, 0xB2, 0xB3, 0xB3, 0xB4, 0xB4, 0xB5, 0xB5, 0xB6, 0xB6, 0xB7, 0xB7,
    0xB8, 0xB8, 0xB9, 0xB9, 0xBA, 0xBA, 0xBB, 0xBB, 0xBC, 0xBC, 0xBD, 0xBD, 0xBE, 0xBE, 0xBF, 0xBF,
    0xC0, 0xC0, 0xC1, 0xC1, 0xC2, 0xC2, 0xC3, 0xC3, 0xC4, 0xC4, 0xC5, 0xC5, 0xC6, 0xC6, 0xC7, 0xC7,
    0xC8, 0xC8, 0xC9, 0xC9, 0xCA, 0xCA, 0xCB, 0xCB, 0xCC, 0xCC, 0xCD, 0xCD, 0xCE, 0xCE, 0xCF, 0xCF,
    0xD0, 0xD0, 0xD1, 0xD1, 0xD2, 0xD2, 0xD3, 0xD3, 0xD4, 0xD4, 0xD5, 0xD5, 0xD6, 0xD6, 0xD7, 0xD7,
    0xD8, 0xD8, 0xD9, 0xD9, 0xDA, 0xDA, 0xDB, 0xDB, 0xDC, 0xDC, 0xDD, 0xDD, 0xDE, 0xDE, 0xDF, 0xDF,
    0xE0, 0xE0, 0xE1, 0xE1, 0xE2, 0xE2, 0xE3, 0xE3, 0xE4, 0xE4, 0xE5, 0xE5, 0xE6, 0xE6, 0xE7, 0xE7,
    0xE8, 0xE8, 0xE9, 0xE9, 0xEA, 0xEA, 0xEB, 0xEB, 0xEC, 0xEC, 0xED, 0xED, 0xEE, 0xEE, 0xEF, 0xEF,
    0xF0, 0xF0, 0xF1, 0xF1, 0xF2, 0xF2, 0xF3, 0xF3, 0xF4, 0xF4, 0xF5, 0xF5, 0xF6, 0xF6, 0xF7, 0xF7,
    0xF8, 0xF8, 0xF9, 0xF9, 0xFA, 0xFA, 0xFB, 0xFB, 0xFC, 0xFC, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF, 0xFF,
];

// 基于temp_ezk项目的标准μ-law编码函数
fn pcm_to_mulaw(x: i16) -> u8 {
    let mut absno = if x < 0 {
        ((!x) >> 2) + 33
    } else {
        (x >> 2) + 33
    };

    if absno > 0x1FFF {
        absno = 0x1FFF;
    }

    let mut i = absno >> 6;
    let mut segno = 1;
    while i != 0 {
        segno += 1;
        i >>= 1;
    }

    let high_nibble = 0x8 - segno;
    let low_nibble = (absno >> segno) & 0xF;
    let low_nibble = 0xF - low_nibble;

    let mut ret = (high_nibble << 4) | low_nibble;

    if x >= 0 {
        ret |= 0x0080;
    }

    ret as u8
}

// 基于temp_ezk项目的标准μ-law解码函数
fn mulaw_to_pcm(y: u8) -> i16 {
    let y = y as i16;
    let sign: i16 = if y < 0x0080 { -1 } else { 1 };

    let mantissa = !y;
    let exponent = (mantissa >> 4) & 0x7;
    let segment = exponent + 1;
    let mantissa = mantissa & 0xF;

    let step = 4 << segment;

    sign * ((0x0080 << exponent) + step * mantissa + step / 2 - 4 * 33)
} 