#!/usr/bin/env python3

import sys
import os 
os.environ["PYTHONUTF8"] = "1"
import argparse
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import logging
from collections import defaultdict
import time
import numpy as np
import sherpa_onnx
from logging.handlers import RotatingFileHandler
try:
    import sounddevice as sd
except ImportError:
    sys.exit("Please install sounddevice: pip install sounddevice")

try:
    import soundfile as sf
except ImportError:
    sys.exit("Please install soundfile: pip install soundfile")

# 配置全局参数
DEFAULT_CONFIG = {
    "sample_rate": 16000,
    "vad_threshold": 0.6,
    "min_silence_duration": 0.25,
    "min_speech_duration": 0.25,
    "models_dir": "./models",
    "speaker_file": "./speakers/speakers.txt",
    "num_threads": 6,
    "vad_buffer_seconds": 100,
    "provider": "cpu",
    "fire_red_asr_model_encoder": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/encoder.int8.onnx",
    "fire_red_asr_model_decoder": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/decoder.int8.onnx",
    "fire_red_asr_model_tokens": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/tokens.txt",
    "paraformer_model":"./models/sherpa-onnx-paraformer-zh-small-2024-03-09/model.int8.onnx",
    "paraformer_tokens":"/models/sherpa-onnx-paraformer-zh-small-2024-03-09/tokens.txt",
    "vad_model": "./models/silero_vad.onnx",
    "speaker_embbeding_model": "./models/wespeaker_zh_cnceleb_resnet34.onnx",
    "punctuation_model": "./models/sherpa-onnx-punct-ct-transformer-zh-en-vocab272727-2024-04-12/model.onnx",
    
}

class ASRProcessor:
    """语音处理核心类，封装ASR、说话人识别和VAD功能"""
    
    def __init__(self, config: Optional[Dict] = None):
        self.config = DEFAULT_CONFIG | (config or {})
        self._validate_paths()
        
        # 初始化组件
        self.recognizer = self._init_recognizer()
        self.extractor = self._init_speaker_embedding()
        self.punctuator = self._init_punctuator()
        self.speaker_db = self._init_speaker_database()
        self.vad = self._init_vad()
        
        logging.info("ASR Processor initialized successfully")

    def _validate_paths(self):
        """验证模型文件路径"""
        required_files = {
            "asr_encoder": f"{self.config['fire_red_asr_model_encoder']}",
            "asr_decoder": f"{self.config['fire_red_asr_model_decoder']}",
            "asr_tokens": f"{self.config['fire_red_asr_model_tokens']}",
            "vad_model": f"{self.config['vad_model']}",
            "speaker_model": f"{self.config['speaker_embbeding_model']}",
            "punctuation_model": f"{self.config['punctuation_model']}",
        }

        for key, path in required_files.items():
            if not Path(path).exists():
                raise FileNotFoundError(f"Model file not found: {path}")

    def _init_recognizer(self) -> sherpa_onnx.OfflineRecognizer:
        """初始化ASR识别器"""
        print(self.config["paraformer_model"])
        if self.config["paraformer_model"]:
            recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
                paraformer=f"{self.config["paraformer_model"]}",
                tokens=f"{self.config["paraformer_tokens"]}",
                num_threads=self.config["num_threads"],
            )
        else:
            recognizer = sherpa_onnx.OfflineRecognizer.from_fire_red_asr(
            encoder=f"{self.config['fire_red_asr_model_encoder']}",
            decoder=f"{self.config['fire_red_asr_model_decoder']}",
            tokens=f"{self.config['fire_red_asr_model_tokens']}",
            num_threads=self.config["num_threads"],
            provider=self.config["provider"],
            debug=True,
        )

        return recognizer

    def _init_speaker_embedding(self) -> sherpa_onnx.SpeakerEmbeddingExtractor:
        """初始化说话人特征提取器"""
        config = sherpa_onnx.SpeakerEmbeddingExtractorConfig(
            model=f"{self.config['speaker_embbeding_model']}",
            num_threads=self.config["num_threads"],
            provider=self.config["provider"],
        )
        return sherpa_onnx.SpeakerEmbeddingExtractor(config)

    def _init_punctuator(self) -> sherpa_onnx.OfflinePunctuation:
        """初始化标点模型"""
        config = sherpa_onnx.OfflinePunctuationConfig(
            model=sherpa_onnx.OfflinePunctuationModelConfig(
                ct_transformer=f"{self.config['punctuation_model']}"
            )
        )
        return sherpa_onnx.OfflinePunctuation(config)

    def _init_speaker_database(self) -> sherpa_onnx.SpeakerEmbeddingManager:
        """初始化说话人数据库"""
        manager = sherpa_onnx.SpeakerEmbeddingManager(self.extractor.dim)
        
        speakers = self._load_speaker_list()
        for name, files in speakers.items():
            embedding = self._compute_mean_embedding(files)
            if not manager.add(name, embedding):
                raise RuntimeError(f"Failed to register speaker: {name}")
        return manager

    def _load_speaker_list(self) -> Dict[str, List[str]]:
        """加载说话人列表"""
        speakers = defaultdict(list)
        try:
            with open(self.config["speaker_file"]) as f:
                for line in f:
                    line = line.strip()
                    if not line or line.startswith("#"):
                        continue
                    if len(fields := line.split(maxsplit=1)) == 2:
                        speakers[fields[0]].append(fields[1])
        except Exception as e:
            raise RuntimeError(f"Error loading speaker file: {e}")
        return speakers

    def _compute_mean_embedding(self, files: List[str]) -> np.ndarray:
        """计算说话人平均嵌入向量"""
        embeddings = []
        for f in files:
            try:
                samples, sr = self._load_audio(f)
                stream = self.extractor.create_stream()
                stream.accept_waveform(sr, samples)
                stream.input_finished()
                embeddings.append(np.array(self.extractor.compute(stream)))
            except Exception as e:
                logging.warning(f"Error processing {f}: {e}")
        return np.mean(embeddings, axis=0) if embeddings else np.zeros(self.extractor.dim)

    def _init_vad(self) -> sherpa_onnx.VoiceActivityDetector:
        """初始化语音活动检测器"""
        vad_config = sherpa_onnx.VadModelConfig()
        vad_config.silero_vad.model = f"{self.config['vad_model']}"
        vad_config.silero_vad.min_silence_duration = self.config["min_silence_duration"]
        vad_config.silero_vad.min_speech_duration = self.config["min_speech_duration"]
        vad_config.sample_rate = self.config["sample_rate"]
        
        if not vad_config.validate():
            raise ValueError("Invalid VAD configuration")
            
        return sherpa_onnx.VoiceActivityDetector(
            config=vad_config,
            buffer_size_in_seconds=self.config["vad_buffer_seconds"],
        )

    @staticmethod
    def _load_audio(filename: str) -> Tuple[np.ndarray, int]:
        """加载音频文件"""
        data, sr = sf.read(filename, always_2d=True, dtype="float32")
        return np.ascontiguousarray(data[:, 0]), sr

    def process_audio_stream(self, audio_generator):
        """处理音频流（通用接口）"""
        buffer = np.array([], dtype=np.float32)
        idx = 0
        
        for samples in audio_generator:
            buffer = np.concatenate([buffer, samples])
            
            # 处理完整窗口
            while len(buffer) >= self.vad.config.silero_vad.window_size:
                self.vad.accept_waveform(buffer[:self.vad.config.silero_vad.window_size])
                buffer = buffer[self.vad.config.silero_vad.window_size:]
                
                # 处理检测到的语音段
                while not self.vad.empty():
                    segment = self.vad.front
                    if len(segment.samples) < 0.5 * self.config["sample_rate"]:
                        self.vad.pop()
                        continue
                        
                    # 执行识别流程
                    idx = self._process_segment(segment.samples, idx)
                    self.vad.pop()
        return idx

    def _process_segment(self, samples: np.ndarray, idx: int) -> int:
        """处理单个语音段"""
        # 说话人识别
        spk_embedding = self._get_speaker_embedding(samples)
        speaker = self.speaker_db.search(spk_embedding, self.config["vad_threshold"]) or "unknown"
        
        # 语音识别
        text = self._transcribe_audio(samples)
        
        # 输出结果
        logging.info(f"{idx}-{speaker}: {text}")
        return idx + 1

    def _get_speaker_embedding(self, samples: np.ndarray) -> np.ndarray:
        """获取说话人特征向量"""
        stream = self.extractor.create_stream()
        stream.accept_waveform(self.config["sample_rate"], samples)
        stream.input_finished()
        return np.array(self.extractor.compute(stream))

    def _transcribe_audio(self, samples: np.ndarray) -> str:
        """执行语音识别"""
        stream = self.recognizer.create_stream()
        stream.accept_waveform(self.config["sample_rate"], samples)
        self.recognizer.decode_stream(stream)
        return self.punctuator.add_punctuation(stream.result.text)


def mic_input_generator(config: Dict):
    """麦克风输入生成器"""
    logging.info("Starting microphone input...")
    block_size = int(config["sample_rate"] * 0.1)  # 100ms块大小
    with sd.InputStream(
        samplerate=config["sample_rate"],
        channels=1,
        dtype="float32"
    ) as stream:
        while True:
            data, _ = stream.read(block_size)
            yield data.ravel()


def file_input_generator(filename: str, config: Dict):
    """文件输入生成器"""
    samples, sr = sf.read(filename, dtype="float32", always_2d=True)
    if sr != config["sample_rate"]:
        raise ValueError(f"Sample rate mismatch: file {sr} vs config {config['sample_rate']}")
    
    window_size = config["sample_rate"] // 10  # 100ms窗口
    for i in range(0, len(samples), window_size):
        yield samples[i:i+window_size, 0].ravel()


def main():
    # 配置命令行参数
    parser = argparse.ArgumentParser(description="语音识别系统")
    parser.add_argument("--input", type=str, default="mic", 
                      help="输入源：mic 或 wav文件路径")
    parser.add_argument("--debug", action="store_true", help="启用调试模式")
    args = parser.parse_args()

    # 配置日志
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
    
    log_dir = Path("./logs")
    log_dir.mkdir(exist_ok=True)  # 自动创建目录
    logging.basicConfig(
        level=logging.DEBUG if args.debug else logging.INFO,
        format="%(asctime)s [%(levelname)s] %(message)s",  # 添加时间戳
        encoding="utf-8",  # 指定编码（Python 3.9+ 支持）
        handlers=[
            RotatingFileHandler(
                log_dir / "app.log",
                maxBytes=10*1024*1024,  # 10MB
                backupCount=5,
                encoding="utf-8"
                ),
            logging.StreamHandler()
            ]
    )
    
    try:
        processor = ASRProcessor(config={
            "sample_rate": 16000,
            "vad_threshold": 0.6,
        })
        
        if args.input.lower() == "mic":
            generator = mic_input_generator(processor.config)
        else:
            generator = file_input_generator(args.input, processor.config)
            
        processor.process_audio_stream(generator)
        
    except KeyboardInterrupt:
        logging.info("\n程序主动终止")
    except Exception as e:
        logging.error(f"发生严重错误: {e}", exc_info=args.debug)
        sys.exit(1)

if __name__ == "__main__":
    main()
