#!/usr/bin/env python3

import sys
import os
os.environ["PYTHONUTF8"] = "1"
import argparse
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import logging
from collections import defaultdict
import time
import numpy as np
import sherpa_onnx
from logging.handlers import RotatingFileHandler
import sounddevice as sd
import soundfile as sf
import os
from datetime import datetime
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
                             QPushButton, QLabel, QTextEdit, QFileDialog, QListWidget,
                             QInputDialog, QTabWidget, QMessageBox,QComboBox)
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QTimer
from dataclasses import dataclass
from datetime import timedelta


# 配置全局参数
DEFAULT_CONFIG = {
    "sample_rate": 16000,
    "vad_threshold": 0.6,
    "min_silence_duration": 0.25,
    "min_speech_duration": 0.25,
    "max_speech_duration": 10,
    "models_dir": "./models",
    "speaker_dir":"./speakers",
    "speaker_file": "./speakers/speakers.txt",
    "num_threads": 6,
    "vad_buffer_seconds": 100,
    "provider": "cpu",
    # 多模型选择
    "current_model": "paraformer",
    "fire_red_asr_model_encoder": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/encoder.int8.onnx",
    "fire_red_asr_model_decoder": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/decoder.int8.onnx",
    "fire_red_asr_model_tokens": "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/tokens.txt",
    "paraformer_model":"./models/sherpa-onnx-paraformer-zh-small-2024-03-09/model.int8.onnx",
    "paraformer_tokens":"./models/sherpa-onnx-paraformer-zh-small-2024-03-09/tokens.txt",
    "sense_voice_ctc_model": "./models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx",
    "sense_voice_ctc_tokens": "./models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt",
    "sense_voice_ctc_use_itn": True,
    "zipformer_ctc_model":"./models/sherpa-onnx-zipformer-ctc-zh-int8-2025-07-03/model.int8.onnx",
    "zipformer_ctc_tokens": "./models/sherpa-onnx-zipformer-ctc-zh-int8-2025-07-03/tokens.txt",  
    "vad_model": "./models/silero_vad.onnx",
    "speaker_embbeding_model": "./models/wespeaker_zh_cnceleb_resnet34.onnx",
    "punctuation_model": "./models/sherpa-onnx-punct-ct-transformer-zh-en-vocab272727-2024-04-12/model.onnx",
    "wav_dir" : "./wavs",
    "temp_dir" : "./temp",
    "temp_wav_length": 30,
    "log_dir" : "./logs"
}


class SharedConfig:
    def __init__(self, initial_config):
        self._config = initial_config.copy()
        self._listeners = []

    def add_listener(self, callback):
        self._listeners.append(callback)
        
    def update(self, new_config):
        self._config.update(new_config)
        
    def __getitem__(self, key):
        return self._config[key]
        
    def __setitem__(self, key, value):
        old_value = self._config.get(key)
        self._config[key] = value
        if old_value != value:
            for callback in self._listeners:
                callback(key, old_value, value)
        
    def copy(self):
        return self._config.copy()



@dataclass
class Segment:
    start: float
    duration: float
    text: str = ""

    @property
    def end(self):
        return self.start + self.duration

    def __str__(self):
        s = f"{timedelta(seconds=self.start)}"[:-3]
        s += " --> "
        s += f"{timedelta(seconds=self.end)}"[:-3]
        s = s.replace(".", ",")
        s += "\n"
        s += self.text
        return s


class ASRProcessor:
    """语音处理核心类，封装ASR、说话人识别和VAD功能"""
    
    def __init__(self, shared_config):
        self.config = shared_config  # 直接使用共享配置
        self._validate_paths()
        logging.info(f"初始化ASRProcessor，当前模型: {self.config['current_model']}")
        # 初始化组件
        self.recognizer = self._init_recognizer()
        self.extractor = self._init_speaker_embedding()
        self.punctuator = self._init_punctuator()
        self.speaker_db = self._init_speaker_database()
        self.vad_config = self._init_vad_config()  # 保存VAD配置而非实例
        
        logging.info("ASR Processor initialized successfully")
        
    def reload_recognizer(self):
        """重新加载当前配置的识别器"""
        self.recognizer = self._init_recognizer()
        logging.info(f"已重新加载模型: {self.config['current_model']}")


    def _validate_paths(self):
        """验证模型文件路径"""
        required_files = {
            "asr_encoder": f"{self.config['fire_red_asr_model_encoder']}",
            "asr_decoder": f"{self.config['fire_red_asr_model_decoder']}",
            "asr_tokens": f"{self.config['fire_red_asr_model_tokens']}",
            "vad_model": f"{self.config['vad_model']}",
            "speaker_model": f"{self.config['speaker_embbeding_model']}",
            "punctuation_model": f"{self.config['punctuation_model']}",
        }

        for key, path in required_files.items():
            if not Path(path).exists():
                raise FileNotFoundError(f"Model file not found: {path}")

    
    def _init_recognizer(self) -> sherpa_onnx.OfflineRecognizer:
        """初始化ASR识别器"""
        logging.info(f"正在初始化{self.config['current_model']}模型...")
        if self.config["current_model"] == "paraformer":
            recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
                paraformer=f"{self.config['paraformer_model']}",
                tokens=f"{self.config['paraformer_tokens']}",
                num_threads=self.config["num_threads"],
            )
        elif self.config["current_model"] == "fire_red":  # 默认为fire_red模型
            recognizer = sherpa_onnx.OfflineRecognizer.from_fire_red_asr(
                encoder=f"{self.config['fire_red_asr_model_encoder']}",
                decoder=f"{self.config['fire_red_asr_model_decoder']}",
                tokens=f"{self.config['fire_red_asr_model_tokens']}",
                num_threads=self.config["num_threads"],
                provider=self.config["provider"],
                debug=True,
            )
        elif self.config["current_model"] == "sense_voice":
            recognizer = sherpa_onnx.OfflineRecognizer.from_sense_voice(
                model=f"{self.config['sense_voice_ctc_model']}",
                tokens=f"{self.config['sense_voice_ctc_tokens']}",
                num_threads=self.config["num_threads"],
                provider=self.config["provider"],
                use_itn=self.config["sense_voice_ctc_use_itn"],
                debug=True,
            )
        elif self.config["current_model"] == "zipformer":
            recognizer = sherpa_onnx.OfflineRecognizer.from_zipformer_ctc(
                model=f"{self.config['zipformer_ctc_model']}",
                tokens=f"{self.config['zipformer_ctc_tokens']}",
                num_threads=self.config["num_threads"],
                provider=self.config["provider"],
                debug=True,
            )
        else:
            recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
                paraformer=f"{self.config['paraformer_model']}",
                tokens=f"{self.config['paraformer_tokens']}",
                num_threads=self.config["num_threads"],
            )

        logging.info(f"{self.config['current_model']}模型初始化完成")
        return recognizer

    def _init_speaker_embedding(self) -> sherpa_onnx.SpeakerEmbeddingExtractor:
        """初始化说话人特征提取器"""
        config = sherpa_onnx.SpeakerEmbeddingExtractorConfig(
            model=f"{self.config['speaker_embbeding_model']}",
            num_threads=self.config["num_threads"],
            provider=self.config["provider"],
        )
        return sherpa_onnx.SpeakerEmbeddingExtractor(config)

    def _init_punctuator(self) -> sherpa_onnx.OfflinePunctuation:
        """初始化标点模型"""
        config = sherpa_onnx.OfflinePunctuationConfig(
            model=sherpa_onnx.OfflinePunctuationModelConfig(
                ct_transformer=f"{self.config['punctuation_model']}"
            )
        )
        return sherpa_onnx.OfflinePunctuation(config)

    def _init_speaker_database(self) -> sherpa_onnx.SpeakerEmbeddingManager:
        """初始化说话人数据库"""
        manager = sherpa_onnx.SpeakerEmbeddingManager(self.extractor.dim)
        
        speakers = self._load_speaker_list()
        for name, files in speakers.items():
            embedding = self._compute_mean_embedding(files)
            if not manager.add(name, embedding):
                raise RuntimeError(f"Failed to register speaker: {name}")
        return manager

    def _load_speaker_list(self) -> Dict[str, List[str]]:
        """加载说话人列表"""
        speakers = defaultdict(list)
        try:
            with open(self.config["speaker_file"]) as f:
                for line in f:
                    line = line.strip()
                    if not line or line.startswith("#"):
                        continue
                    if len(fields := line.split(maxsplit=1)) == 2:
                        speakers[fields[0]].append(fields[1])
        except Exception as e:
            raise RuntimeError(f"Error loading speaker file: {e}")
        return speakers

    def _compute_mean_embedding(self, files: List[str]) -> np.ndarray:
        """计算说话人平均嵌入向量"""
        embeddings = []
        for f in files:
            try:
                samples, sr = self._load_audio(f)
                stream = self.extractor.create_stream()
                stream.accept_waveform(sr, samples)
                stream.input_finished()
                embeddings.append(np.array(self.extractor.compute(stream)))
            except Exception as e:
                logging.warning(f"Error processing {f}: {e}")
        return np.mean(embeddings, axis=0) if embeddings else np.zeros(self.extractor.dim)

    def _init_vad(self) -> sherpa_onnx.VoiceActivityDetector:
        """初始化语音活动检测器"""
        vad_config = sherpa_onnx.VadModelConfig()
        vad_config.silero_vad.model = f"{self.config['vad_model']}"
        vad_config.silero_vad.min_silence_duration = self.config["min_silence_duration"]
        vad_config.silero_vad.min_speech_duration = self.config["min_speech_duration"]
        vad_config.silero_vad.max_speech_duration = self.config["max_speech_duration"]
        vad_config.sample_rate = self.config["sample_rate"]
        
        if not vad_config.validate():
            raise ValueError("Invalid VAD configuration")
            
        return sherpa_onnx.VoiceActivityDetector(
            config=vad_config,
            buffer_size_in_seconds=self.config["vad_buffer_seconds"],
        )
        
    
    def _init_vad_config(self) -> sherpa_onnx.VadModelConfig:
        """初始化VAD配置"""
        vad_config = sherpa_onnx.VadModelConfig()
        vad_config.silero_vad.model = f"{self.config['vad_model']}"
        vad_config.silero_vad.min_silence_duration = self.config["min_silence_duration"]
        vad_config.silero_vad.min_speech_duration = self.config["min_speech_duration"]
        vad_config.sample_rate = self.config["sample_rate"]
        
        if not vad_config.validate():
            raise ValueError("Invalid VAD configuration")
        return vad_config
    def create_vad_instance(self):
        """创建新的VAD实例"""
        return sherpa_onnx.VoiceActivityDetector(
            config=self.vad_config,
            buffer_size_in_seconds=self.config["vad_buffer_seconds"]
        )
    

    @staticmethod
    def _load_audio(filename: str) -> Tuple[np.ndarray, int]:
        """加载音频文件"""
        data, sr = sf.read(filename, always_2d=True, dtype="float32")
        return np.ascontiguousarray(data[:, 0]), sr


    def _process_segment(self, samples: np.ndarray, idx: int) -> int:
        """处理单个语音段"""
        # 说话人识别
        spk_embedding = self._get_speaker_embedding(samples)
        speaker = self.speaker_db.search(spk_embedding, self.config["vad_threshold"]) or "unknown"
        
        # 语音识别
        text = self._transcribe_audio(samples)
        
        # 输出结果
        logging.info(f"{idx}-{speaker}: {text}")
        return idx + 1

    def _get_speaker_embedding(self, samples: np.ndarray) -> np.ndarray:
        """获取说话人特征向量"""
        stream = self.extractor.create_stream()
        stream.accept_waveform(self.config["sample_rate"], samples)
        stream.input_finished()
        return np.array(self.extractor.compute(stream))

    def _transcribe_audio(self, samples: np.ndarray) -> str:
        """执行语音识别"""
        stream = self.recognizer.create_stream()
        stream.accept_waveform(self.config["sample_rate"], samples)
        self.recognizer.decode_stream(stream)
        # return self.punctuator.add_punctuation(stream.result.text)
        return stream.result.text


class ModelLoaderThread(QThread):
    """模型加载线程，用于应用启动时预先加载模型"""
    finished = pyqtSignal(object)  # 发送加载完成的ASRProcessor实例
    error_occurred = pyqtSignal(str)
    def __init__(self, shared_config):
        self.shared_config = shared_config
        super().__init__()
    def run(self):
        try:
            processor = ASRProcessor(shared_config=self.shared_config)
            self.finished.emit(processor)
        except Exception as e:
            self.error_occurred.emit(f"模型加载失败: {str(e)}")


class AudioProcessorThread(QThread):
    """音频处理线程（使用预先加载的模型）"""
    logging.info("进入AudioProcessorThread开始音频处理")
    processing_finished = pyqtSignal()
    result_ready = pyqtSignal(str, str)
    error_occurred = pyqtSignal(str)
    def __init__(self, config, input_type, file_path=None, processor=None):
        super().__init__()
        self.config = config
        self.input_type = input_type
        self.file_path = file_path
        self.processor = processor  # 使用传入的processor实例
        self._stop_flag = False
        # 新增缓存相关变量
        self.current_speaker = None
        self.speaker_buffer = []
        self.max_buffer_length = 5  # 最大缓存句子数
        self.max_sentence_length = 50  # 最大句子长度(字符数)

    def flush_buffer(self):
        """处理缓存中的文本"""
        if not self.speaker_buffer:
            return
            
        # 合并缓存中的文本
        combined_text = "".join([text for _, text in self.speaker_buffer])
        
        # 执行标点处理
        try:
            punctuated_text = self.processor.punctuator.add_punctuation(combined_text)
            speaker = self.speaker_buffer[0][0]  # 使用第一个说话人作为标识
            
            # 发送结果
            self.result_ready.emit(speaker, punctuated_text)
            logging.info(f"处理缓存结果: {speaker}: {punctuated_text}")
            
        except Exception as e:
            self.error_occurred.emit(f"标点处理错误: {str(e)}")
            
        # 清空缓存
        self.speaker_buffer = []
    def run(self):
        try:
            if self.processor is None:
                raise ValueError("ASRProcessor未初始化")
            
            if self.input_type == "mic":
                generator = self.mic_input_generator()
            else:
                generator = self.file_input_generator()
            self.process_audio_stream(generator)
        except Exception as e:
            self.error_occurred.emit(f"处理错误: {str(e)}")

    def stop(self):
        self._stop_flag = True
        self.flush_buffer()  # 处理剩余缓存
        self.processing_finished.emit()

    def mic_input_generator(self):
        temp_wav_files = []
        temp_wav_length = self.config["temp_wav_length"]
        import queue
        audio_queue = queue.Queue()
        wav_dir = self.config["wav_dir"]
        temp_dir = self.config["temp_dir"]
        temp_prefix = "temp_"
        os.makedirs(wav_dir, exist_ok=True)
        os.makedirs(temp_dir, exist_ok=True)
        
        def callback(indata, frames, time, status):
            if status:
                print(f"Sounddevice status: {status}", file=sys.stderr)
            if not self._stop_flag:    
                audio_queue.put(indata.copy()) 
            # logging.debug(f"往音频队列里存入数据，长度:{len(indata)}")

        block_size = int(self.config["sample_rate"] * 0.1)  # 每次读取0.1秒的音频
        buffer = np.array([], dtype=np.float32)
        
        with sd.InputStream(
            samplerate=self.config["sample_rate"],
            channels=1,
            dtype="float32",
            blocksize=block_size,
            callback=callback
        ) as stream:
            while not self._stop_flag:
                try:
                    if self._stop_flag:
                        break
                    # 设置短暂的超时时间以频繁检查停止标志
                    data = audio_queue.get(timeout=0.1)
                    # logging.info(f"从音频队列中获取到数据，长度： {data.shape}")
                    
                    # 将数据添加到缓冲区
                    buffer = np.concatenate([buffer, data.ravel()])
                    # 每temp_wav_length秒保存一次文件
                    if len(buffer) >= self.config["sample_rate"] * temp_wav_length:  # temp_wav_length秒音频
                        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                        temp_filename = os.path.join(temp_dir, f"{temp_prefix}{timestamp}.wav")

                        # 保存前temp_wav_length秒音频
                        sf.write(
                            temp_filename,
                            buffer[:self.config["sample_rate"] * temp_wav_length],
                            self.config["sample_rate"],
                            subtype='PCM_16'
                        )
                        temp_wav_files.append(temp_filename)
                        logging.info(f"已保存临时音频文件: {temp_filename}")

                        # 保留剩余音频在缓冲区
                        buffer = buffer[self.config["sample_rate"] * temp_wav_length:]                    
                    yield data.ravel()
                except queue.Empty:
                    if self._stop_flag:
                        break
                    continue
                except Exception as e:
                    self.error_occurred.emit(f"麦克风输入错误: {str(e)}")
                    break
            logging.info(f"退出录音,缓冲区buffer长度为: {len(buffer)}")
            if len(buffer) > 0:
                logging.info(f"stop_flag为true,退出录音,缓冲区buffer长度为: {len(buffer)}")
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                temp_filename = os.path.join(temp_dir, f"{temp_prefix}{timestamp}.wav")
                sf.write(
                    temp_filename,
                    buffer,
                    self.config["sample_rate"],
                    subtype='PCM_16'
                )
                temp_wav_files.append(temp_filename)
                logging.info(f"已保存最后一个临时音频文件: {temp_filename}")
            if len(temp_wav_files) > 0:
                # 合并文件到wav_dir
                all_data = []
                for wav_file in  temp_wav_files:
                    data, sr = sf.read(wav_file)
                    all_data.append(data)
                if len(all_data) > 0:
                    merged_data = np.concatenate(all_data)
                    time_duration = int(len(merged_data))//self.config["sample_rate"]
                    logging.info(f"合并文件时长为{time_duration}秒")
                    start_time = temp_wav_files[0].split(temp_prefix)[1].split('.')[0]
                    final_filename = os.path.join(wav_dir, f"{start_time}--{time_duration}s.wav")   
                    sf.write(
                        final_filename,
                        merged_data,
                        self.config["sample_rate"],
                        subtype='PCM_16'
                    ) 
                    logging.info(f"已合并为最终文件: {final_filename}")
                    # 删除临时文件
                    for f in temp_wav_files:
                        try:
                            os.remove(f)
                        except:
                            pass

    def file_input_generator(self):
        logging.info(f"从文件{self.file_path}中获取音频流进行处理")
        samples, sr = sf.read(self.file_path, dtype="float32", always_2d=True)
        
        # 在文件末尾添加3秒静音
        silence_duration = 3  # 3秒静音
        silence_samples = int(sr * silence_duration)
        silence = np.zeros((silence_samples, 1), dtype=np.float32)
        samples_with_silence = np.vstack([samples, silence])
        
        window_size = self.config["sample_rate"] // 10
        for i in range(0, len(samples_with_silence), window_size):
            if self._stop_flag:
                break
            # logging.info(f"处理第{i//window_size+1}/{len(samples_with_silence)//window_size+1}个音频文件段")
            yield samples_with_silence[i:i+window_size, 0].ravel()
        logging.info("音频文件处理完成")
        self.stop()

    def process_audio_stream(self, audio_generator):
        vad = self.processor.create_vad_instance()  # 创建新的VAD实例
        logging.info(f"VAD窗口大小:{vad.config.silero_vad.window_size}")
        buffer = np.array([], dtype=np.float32)
        idx = 0
        for samples in audio_generator:
            if self._stop_flag:
                self.processing_finished.emit()  
                break
            
            buffer = np.concatenate([buffer, samples])
            # 处理完整窗口
            while len(buffer) >= vad.config.silero_vad.window_size:
                vad.accept_waveform(
                    buffer[:vad.config.silero_vad.window_size]
                )
                buffer = buffer[vad.config.silero_vad.window_size:]
                # 处理检测到的语音段
                while not vad.empty():
                    if self._stop_flag:
                        break
                    segment = vad.front
                    seg_record = Segment(
                        start=vad.front.start / self.config["sample_rate"],
                        duration=len(vad.front.samples) / self.config["sample_rate"],
                    )
                    if len(segment.samples) < 0.5 * self.config["sample_rate"]:
                        vad.pop()
                        continue
                    self.process_segment(segment.samples)
                    vad.pop()

    def process_segment(self, samples):
        try:
            # logging.debug(f"当前使用模型: {self.processor.config['current_model']}")
            spk_embedding = self.processor._get_speaker_embedding(samples)
            speaker = self.processor.speaker_db.search(
                spk_embedding, self.config["vad_threshold"]
            ) or "unknown"
            
            text = self.processor._transcribe_audio(samples)
            # 检查说话人是否变化或缓存是否已满
            if (speaker != self.current_speaker or 
                len(self.speaker_buffer) >= self.max_buffer_length or
                sum(len(t) for _, t in self.speaker_buffer) >= self.max_sentence_length):
                self.flush_buffer()
            # 更新当前说话人并缓存文本
            self.current_speaker = speaker
            self.speaker_buffer.append((speaker, text))
            logging.debug(f"缓存文本: {speaker}: {text} (||||缓存长度: {len(self.speaker_buffer)})")
            # logging.info(f"识别结果: {speaker}: {text}")
            # self.result_ready.emit(speaker, text)
        except Exception as e:
            self.error_occurred.emit(f"处理片段错误: {str(e)}")

class SpeakerManager(QWidget):
    """说话人管理界面"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.speaker_file = config["speaker_file"]
        self.is_recording = False
        self.recording_data = []
        self.init_ui()
        self.load_speakers()

    def init_ui(self):
        layout = QVBoxLayout()

        # 说话人列表
        self.speaker_list = QListWidget()
        layout.addWidget(QLabel("已注册说话人:"))
        layout.addWidget(self.speaker_list)

        # 操作按钮
        btn_layout = QHBoxLayout()
        self.add_btn = QPushButton("添加说话人")
        self.add_btn.clicked.connect(self.add_speaker)
        self.record_btn = QPushButton("录制音频")
        self.record_btn.clicked.connect(self.toggle_recording)
        self.remove_btn = QPushButton("删除说话人")
        self.remove_btn.clicked.connect(self.remove_speaker)
        btn_layout.addWidget(self.add_btn)
        btn_layout.addWidget(self.record_btn)
        btn_layout.addWidget(self.remove_btn)
        layout.addLayout(btn_layout)

        # 录音状态显示
        self.record_status = QLabel("准备录音")
        self.record_status.setAlignment(Qt.AlignCenter)
        layout.addWidget(self.record_status)

        self.setLayout(layout)

    def load_speakers(self):
        self.speaker_list.clear()
        if not Path(self.speaker_file).exists():
            return

        with open(self.speaker_file, 'r') as f:
            for line in f:
                if line.strip() and not line.startswith("#"):
                    name = line.split(maxsplit=1)[0]
                    self.speaker_list.addItem(name)

    def toggle_recording(self):
        if self.is_recording:
            self.stop_recording()
        else:
            self.start_recording()

    def start_recording(self):
        self.is_recording = True
        self.recording_data = []
        self.record_btn.setText("停止录音")
        self.record_status.setText("录音中... (请说话)")
        
        # 设置录音参数
        self.sample_rate = self.config["sample_rate"]
        self.stream = sd.InputStream(
            samplerate=self.sample_rate,
            channels=1,
            dtype='float32',
            callback=self.audio_callback
        )
        self.stream.start()

    def stop_recording(self):
        self.is_recording = False
        self.stream.stop()
        self.stream.close()
        self.record_btn.setText("录制音频")
        self.record_status.setText("录音完成")
        
        # 保存录音文件
        name, ok = QInputDialog.getText(self, "添加说话人", "请输入说话人姓名:")
        if ok and name:
            # 创建录音目录
            os.makedirs(self.config["speaker_dir"], exist_ok=True)
            
            # 生成文件名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            file_path = os.path.join(self.config["speaker_dir"], f"{name}_{timestamp}.wav")
            
            # 保存录音
            sf.write(file_path, np.concatenate(self.recording_data), self.sample_rate)
            
            # 添加到说话人列表
            self.save_speaker(name, file_path)
            self.load_speakers()

    def audio_callback(self, indata, frames, time, status):
        if self.is_recording:
            self.recording_data.append(indata.copy())

    def add_speaker(self):
        name, ok = QInputDialog.getText(self, "添加说话人", "请输入说话人姓名:")
        if ok and name:
            file_path, _ = QFileDialog.getOpenFileName(
                self, "选择音频文件", "", "音频文件 (*.wav *.flac)"
            )
            if file_path:
                self.save_speaker(name, file_path)
                self.load_speakers()

    def save_speaker(self, name, file_path):
        with open(self.config["speaker_file"], "a") as f:
            f.write(f"{name}\t{file_path}\n")

    def remove_speaker(self):
        current_item = self.speaker_list.currentItem()
        if current_item:
            name = current_item.text()
            self.remove_speaker_from_file(name)
            self.load_speakers()

    def remove_speaker_from_file(self, name):
        lines = []
        with open(self.speaker_file, "r") as f:
            for line in f:
                if not line.startswith(name):
                    lines.append(line)
        
        with open(self.speaker_file, "w") as f:
            f.writelines(lines)


class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.shared_config = SharedConfig(DEFAULT_CONFIG)
        self.shared_config.add_listener(self.on_config_changed)
        self.config = self.shared_config
        # self.config = DEFAULT_CONFIG
        self.processor_thread = None
        self.asr_processor = None
        self._init_logging()
        self.init_ui()
        self.init_recording()
        self.start_model_loading()  # 启动模型加载

    def on_config_changed(self, key, old_value, new_value):
        if key == "current_model":
            logging.info(f"模型配置变化: {old_value} -> {new_value}")


    def start_model_loading(self):
        """启动模型加载线程"""
        self.status_label.setText("初始化中...")
        self.model_loader_thread = ModelLoaderThread(self.shared_config)
        self.model_loader_thread.finished.connect(self.on_model_loaded)
        self.model_loader_thread.error_occurred.connect(self.show_error)
        self.model_loader_thread.start()
    def on_model_loaded(self, processor):
        """模型加载完成后的回调"""
        self.asr_processor = processor
        current_model = self.config["current_model"]
        logging.info(f"模型{current_model}加载完成")
        # 设置下拉框为当前模型
        idx = self.model_combo.findData(self.config["current_model"])
        if idx >= 0:
            self.model_combo.setCurrentIndex(idx)
        self.status_label.setText("准备就绪")
        self.start_btn.setEnabled(True)
        

    def change_model(self, index):
        """切换模型"""
        prev_model = self.shared_config["current_model"]
        model_name = self.model_combo.itemData(index)
        logging.info(f"点击按钮切换模型为: {model_name}")
        self.shared_config["current_model"] = model_name

        if self.asr_processor is not None:
            # 如果已有处理器实例，重新初始化识别器
            try:
                self.status_label.setText(f"正在加载{model_name}模型...")
                QApplication.processEvents()  # 更新UI
                # reload model
                self.asr_processor.reload_recognizer()
                
                self.status_label.setText(f"{model_name}模型加载完成")

                # 如果有正在运行的处理线程，需要重启
                if self.processor_thread and self.processor_thread.isRunning():
                    reply = QMessageBox.question(
                        self, '确认', 
                        '模型切换需要停止当前处理，是否继续?',
                        QMessageBox.Yes | QMessageBox.No
                    )
                    if reply == QMessageBox.No:
                        # 恢复之前的选项
                        prev_idx = self.model_combo.findData(self.config["current_model"])
                        self.model_combo.setCurrentIndex(prev_idx)
                        return
                    self.stop_processing()
            except Exception as e:
                self.show_error(f"模型切换失败: {str(e)}")
                # 恢复之前的模型选择
                self.config["current_model"] = prev_model
                idx = self.model_combo.findData(prev_model)
                if idx >= 0:
                    self.model_combo.setCurrentIndex(idx)

    def _init_logging(self):
        """初始化日志配置"""
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        log_dir = Path(self.config["log_dir"])
        try:
            log_dir.mkdir(exist_ok=True)
            logging.getLogger().debug(f"日志目录：{log_dir.absolute()}")
        except Exception as e:
            error_msg = f"无法创建日志目录：{e}"
            print(error_msg)
            QMessageBox.critical(self, "错误", error_msg)
            return
        # 设置文件处理器
        formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
        try:
            file_handler = RotatingFileHandler(
                log_dir / "app_ui.log",
                maxBytes=10*1024*1024,
                backupCount=5,
                encoding="utf-8"
            )
            file_handler.setFormatter(formatter)
        except Exception as e:
            error_msg = f"无法初始化文件日志：{e}"
            print(error_msg)
            QMessageBox.critical(self, "错误", error_msg)
            return
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        # 清除现有处理器并添加新处理器
        for handler in logger.handlers[:]:
            logger.removeHandler(handler)
        logger.addHandler(file_handler)
        logger.addHandler(console_handler)
        logging.info("日志系统初始化完成")
    def init_ui(self):
        logging.info("初始化UI...")
        self.setWindowTitle("智能语音识别系统")
        self.setGeometry(100, 100, 800, 600)

        # 主选项卡
        self.tabs = QTabWidget()
        self.main_tab = QWidget()
        self.speaker_tab = SpeakerManager(self.config)
        self.tabs.addTab(self.main_tab, "语音识别")
        self.tabs.addTab(self.speaker_tab, "说话人管理")
        self.setCentralWidget(self.tabs)

        # 主界面布局
        main_layout = QVBoxLayout()

        # 模型选择
        # 在状态显示之前添加模型选择
        model_layout = QHBoxLayout()
        model_layout.addWidget(QLabel("选择模型:"))
        self.model_combo = QComboBox()
        self.model_combo.addItem("Paraformer (默认)", "paraformer")
        self.model_combo.addItem("Fire Red ASR", "fire_red")
        self.model_combo.addItem("Sense Voice", "sense_voice")
        self.model_combo.addItem("Zipformer", "zipformer")
        self.model_combo.currentIndexChanged.connect(self.change_model)
        model_layout.addWidget(self.model_combo)
        main_layout.addLayout(model_layout)


        # 输入模式选择
        self.mode_layout = QHBoxLayout()
        self.mic_btn = QPushButton("麦克风输入")
        self.mic_btn.setCheckable(True)
        self.mic_btn.clicked.connect(self.switch_to_mic)
        self.file_btn = QPushButton("文件输入")
        self.file_btn.setCheckable(True)
        self.file_btn.clicked.connect(self.switch_to_file)
        self.mode_layout.addWidget(self.mic_btn)
        self.mode_layout.addWidget(self.file_btn)
        main_layout.addLayout(self.mode_layout)

        # 状态显示
        self.status_label = QLabel("准备就绪")
        main_layout.addWidget(self.status_label)

        # 结果显示
        self.result_text = QTextEdit()
        self.result_text.setReadOnly(True)
        main_layout.addWidget(self.result_text)

        # 控制按钮
        control_layout = QHBoxLayout()
        self.start_btn = QPushButton("开始")
        self.start_btn.clicked.connect(self.start_processing)
        self.stop_btn = QPushButton("停止")
        self.stop_btn.clicked.connect(self.stop_processing)
        self.export_btn = QPushButton("导出结果")
        self.export_btn.clicked.connect(self.export_results)
        control_layout.addWidget(self.start_btn)
        control_layout.addWidget(self.stop_btn)
        control_layout.addWidget(self.export_btn)
        main_layout.addLayout(control_layout)

        self.main_tab.setLayout(main_layout)
        logging.info("UI初始化完成")

    def init_recording(self):
        self.input_type = "mic"
        self.file_path = None
        self.mic_btn.setChecked(True)
        logging.info(f"当前选择模式为{self.input_type}")

    def switch_to_mic(self):
        self.input_type = "mic"
        self.mic_btn.setChecked(True)
        self.file_btn.setChecked(False)
        logging.info(f"当前选择模式为{self.input_type}")

    def switch_to_file(self):
        self.file_path, _ = QFileDialog.getOpenFileName(
            self, "选择音频文件", "", "音频文件 (*.wav *.flac)")
        if self.file_path:
            self.input_type = "file"
            self.file_btn.setChecked(True)
            self.mic_btn.setChecked(False)
            logging.info(f"当前选择文件为{self.file_path}")
        else:
            self.file_btn.setChecked(False)
        logging.info(f"当前选择模式为{self.input_type}")

    def start_processing(self):
        logging.info(f"点击开始按钮，开始处理，当前模型: {self.config['current_model']}")
        if self.asr_processor is None:
            QMessageBox.warning(self, "警告", "模型尚未加载完成，请稍候！")
            return
        if self.processor_thread and self.processor_thread.isRunning():
            QMessageBox.warning(self, "警告", "已有任务正在运行!")
            return
        if not hasattr(self.asr_processor, 'recognizer') or self.asr_processor.recognizer is None:
            QMessageBox.warning(self, "警告", "语音识别模型未正确加载！")
            return
        self.result_text.clear()
        self.status_label.setText("处理中...")
        
        self.processor_thread = AudioProcessorThread(
            config=self.config, 
            input_type=self.input_type,
            file_path=self.file_path,
            processor=self.asr_processor  # 传入预先加载的实例
        )
        self.processor_thread.result_ready.connect(self.update_result)
        self.processor_thread.error_occurred.connect(self.show_error)
        self.processor_thread.processing_finished.connect(self.on_processing_finished)
        self.processor_thread.start()

    def on_processing_finished(self):
        self.status_label.setText("处理完成")
    
    def stop_processing(self):
        if self.processor_thread:
            self.processor_thread.stop()
            # self.processor_thread.quit()
            self.processor_thread.wait()
            sd.stop()
        self.status_label.setText("已停止")
        logging.info("已停止")

    def update_result(self, speaker, text):
        self.result_text.append(f"[{speaker}]: {text}")

    def export_results(self):
        file_path, _ = QFileDialog.getSaveFileName(
            self, "导出结果", "", "文本文件 (*.txt)")
        if file_path:
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(self.result_text.toPlainText())
            QMessageBox.information(self, "导出成功", "结果已导出到指定文件")

    def show_error(self, msg):
        logging.error(f"发生错误: {msg}")
        self.status_label.setText(f"错误: {msg[:50]}...")  # 显示简略错误信息
        QMessageBox.critical(self, "错误", msg)
        self.stop_processing()

if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = MainWindow()
    window.show()
    sys.exit(app.exec_())
