use anyhow::{Context, Result};
use cpal::{
    traits::{DeviceTrait, HostTrait, StreamTrait},
    BufferSize, SampleFormat, StreamConfig,
};
use std::path::Path;
use std::sync::mpsc;
use std::thread;
use vosk::{Model, Recognizer};

fn main() -> Result<()> {
    // 初始化 Vosk 模型和识别器
    let model_path = "vosk-model-cn-0.22"; // 替换为你的模型路径
    let model = Model::new(model_path).context("Failed to load Vosk model")?;
    let recognizer = Recognizer::new(&model, 16000.0).context("Failed to create recognizer")?;

/*
    // 设置音频输入
    let host = cpal::default_host();
    let input_device = host
        .default_input_device()
        .context("Failed to get default input device")?;

    // 打印设备信息
    println!("Using input device: {}", input_device.name()?);

    // 获取合适的配置
    let config = input_device
        .supported_input_configs()?
        .find(|c| c.sample_format() == SampleFormat::F32 && c.channels() == 1)
        .context("No suitable input config found")?
        .with_sample_rate(cpal::SampleRate(16000))
        .config();

    println!("Audio input config: {:?}", config);

    // 创建通道用于音频数据和识别结果
    let (audio_tx, audio_rx) = mpsc::channel();
    let (result_tx, result_rx) = mpsc::channel();

    // 启动识别线程
    thread::spawn(move || {
        let mut recognizer = recognizer;
        for audio_data in audio_rx {
            if recognizer.accept_waveform(&audio_data) {
                let result = recognizer.result();
                result_tx.send(result.text).unwrap();
            } else {
                let partial = recognizer.partial_result();
                if !partial.partial.is_empty() {
                    println!("Partial: {}", partial.partial);
                }
            }
        }
    });

    // 启动音频流
    let stream = input_device.build_input_stream(
        &config,
        move |data: &[f32], _: &_| {
            let audio_data: Vec<u8> = data
                .iter()
                .flat_map(|sample| {
                    let i16_sample = (sample * i16::MAX as f32) as i16;
                    i16_sample.to_le_bytes()
                })
                .collect();
            audio_tx.send(audio_data).unwrap();
        },
        |err| eprintln!("Audio stream error: {:?}", err),
        None,
    )?;

    stream.play()?;

    println!("Listening... Press Ctrl+C to stop.");

    // 主线程处理识别结果
    for recognized_text in result_rx {
        if !recognized_text.is_empty() {
            println!("Recognized: {}", recognized_text);
        }
    }
*/
    Ok(())
}