import sys
import os
import threading
import queue
import pyaudio
import numpy as np
import webrtcvad
import torch
import time
from collections import deque
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, 
                            QTextEdit, QLabel, QFileDialog, QProgressBar, QGroupBox)
from PyQt5.QtCore import Qt, pyqtSignal, QObject
from PyQt5.QtGui import QFont, QTextCursor
from funasr import AutoModel  # FunASR语音识别库
# ========================================================
# 音频处理核心代码（来自原始代码）
# ========================================================

os.environ["TQDM_DISABLE"] = "1"
os.environ['MODELSCOPE_ENVIRONMENT'] = 'production'

# 音频参数配置
SAMPLE_RATE = 16000
CHUNK_SIZE = 480
FORMAT = pyaudio.paInt16
CHANNELS = 1

# 语音活动检测参数
SILENCE_THRESHOLD = 0.0003
VAD_AGGRESSIVENESS = 3
MIN_ENERGY = 0.0002

# 语音分段参数
MIN_SPEECH_DURATION = 0.2
MAX_SPEECH_DURATION = 10.0
MIN_PHRASE_DURATION = 0.4
MAX_PHRASE_DURATION = 5.0

# 静音检测参数
POST_SILENCE_TAIL = 0.25
INTRA_PHRASE_SILENCE = 0.15
INTER_PHRASE_SILENCE = 0.35

# 模型路径配置（示例路径，实际使用时请修改）
AUDIO_MODE = "D:/AI/MODEL/fun_asr_pytorch"
VAD_MODEL = "D:/AI/MODEL/fsmn_vad_pytorch"
PUNC_MODEL = "D:/AI/MODEL/punc_ct_pytorch"

class PhraseDetector:
    """语气停顿检测器：分析能量和音高变化以识别自然语言边界"""
    
    def __init__(self):
        self.energy_history = deque(maxlen=20)
        self.pitch_history = deque(maxlen=20)
        self.silence_counter = 0
        self.phrase_markers = []
        
    def analyze_frame(self, audio_frame):
        if audio_frame.size == 0:
            return 0.0, 0.0
            
        # 1. 预加重处理增强高频
        emphasized = np.append(audio_frame[0], audio_frame[1:] - 0.97 * audio_frame[:-1])
        
        # 2. 计算能量（均方根）
        energy = np.sqrt(np.mean(emphasized**2))
        self.energy_history.append(energy)
        
        # 3. 音高检测（自相关法）
        pitch = 0
        try:
            autocorr = np.correlate(emphasized, emphasized, mode='full')
            autocorr = autocorr[len(autocorr)//2:]
            peaks = np.where(autocorr > 0.5 * autocorr.max())[0]
            if len(peaks) > 0:
                fundamental = peaks[0]
                pitch = SAMPLE_RATE / fundamental if fundamental > 0 else 0
        except:
            pass
        
        self.pitch_history.append(pitch)
        return energy, pitch
    
    def detect_phrase_boundary(self, current_time):
        if not self.energy_history or not self.pitch_history:
            return False
            
        # 最近5帧的能量变化（方差）
        recent_energy = list(self.energy_history)[-5:]
        energy_decay = len(recent_energy) > 1 and (max(recent_energy) - min(recent_energy)) > SILENCE_THRESHOLD * 2
        
        # 最近5帧的音高稳定性（方差）
        recent_pitch = list(self.pitch_history)[-5:]
        pitch_var = np.var(recent_pitch) if len(recent_pitch) > 1 else 0
        pitch_stable = pitch_var < 50  # 音高变化小于50Hz视为稳定
        
        # 静音时长已达句内停顿阈值
        silence_duration = self.silence_counter * CHUNK_SIZE / SAMPLE_RATE
        silence_ok = silence_duration > INTRA_PHRASE_SILENCE
        
        # 边界判定：静音时长足够且出现音高稳定或能量衰减
        return silence_ok and (pitch_stable or energy_decay)

class AudioProcessor:
    """实时音频处理器：整合VAD和语气检测"""
    
    def __init__(self, rate=SAMPLE_RATE):
        self.rate = rate
        self.vad = webrtcvad.Vad(VAD_AGGRESSIVENESS)
        self.phrase_detector = PhraseDetector()
        self.last_phrase_time = 0
        self.energy_threshold = MIN_ENERGY
    
    def process_frame(self, audio_np):
        if audio_np.size == 0:
            return 0.0, 0.0, False
            
        # 分析语气特征
        energy, pitch = self.phrase_detector.analyze_frame(audio_np)
        
        # 更新静音计数器
        if energy < self.energy_threshold:
            self.phrase_detector.silence_counter += 1
        else:
            self.phrase_detector.silence_counter = 0
            
        # 动态调整能量阈值（指数平滑）
        if energy > MIN_ENERGY:
            self.energy_threshold = 0.9 * self.energy_threshold + 0.1 * max(MIN_ENERGY, energy * 0.5)
        
        # VAD检测
        is_speech = False
        if audio_np.size >= 30:
            try:
                audio_int16 = (audio_np * 32767).astype(np.int16)
                is_speech = self.vad.is_speech(audio_int16.tobytes(), self.rate)
            except Exception:
                pass
        
        return energy, pitch, is_speech
    
    def detect_phrase_boundary(self, current_time):
        # 时间约束：距上次分段至少MIN_PHRASE_DURATION
        time_since_last = current_time - self.last_phrase_time
        if time_since_last < MIN_PHRASE_DURATION:
            return False
            
        # 语气条件检测
        if self.phrase_detector.detect_phrase_boundary(current_time):
            self.last_phrase_time = current_time
            return True
            
        # 最大时长保护：超时强制分段
        return time_since_last >= MAX_PHRASE_DURATION

class SpeechBuffer:
    """智能语音缓冲：实时分割连续语音流"""
    
    def __init__(self):
        self.buffer = deque()
        self.lock = threading.Lock()
        self.transcription_queue = queue.Queue()
        self.audio_processor = AudioProcessor()
        self.is_speaking = False
        self.last_detected_speech = 0
        self.speech_start_time = 0
        self.phrase_start_index = 0
        self.true_speech_end_index = -1
        self.split_counter = 0

    def add_chunk(self, audio_np):
        if audio_np.size == 0:
            return
            
        with self.lock:
            current_time = time.time()
            
            # 1. 语音起始检测
            if not self.is_speaking:
                _, _, is_speech = self.audio_processor.process_frame(audio_np)
                if is_speech:
                    self._reset_buffer_state()
                    self.is_speaking = True
                    self.speech_start_time = current_time
                    self.buffer.append(audio_np)
                    return
            
            # 2. 进行中语音处理
            self.buffer.append(audio_np)
            buffer_size = len(self.buffer)
            
            # 分析当前帧
            energy, _, is_speech = self.audio_processor.process_frame(audio_np)
            
            # 更新语音结束位置
            if is_speech or energy > self.audio_processor.energy_threshold:
                self.last_detected_speech = current_time
                self.true_speech_end_index = buffer_size - 1
            
            # 3. 检查分段条件
            silence_duration = current_time - self.last_detected_speech
            audio_duration = buffer_size * CHUNK_SIZE / SAMPLE_RATE
            
            # 主要分段条件：超长语音或满足静音/语气边界
            force_segment = audio_duration > MAX_SPEECH_DURATION
            boundary_ok = silence_duration > INTER_PHRASE_SILENCE
            min_duration_ok = current_time - self.speech_start_time > MIN_PHRASE_DURATION
            prosodic_boundary = self.audio_processor.detect_phrase_boundary(current_time)
            
            if force_segment or (boundary_ok and min_duration_ok) or prosodic_boundary:
                self._segment_phrase(current_time)
    
    def _reset_buffer_state(self):
        self.buffer.clear()
        self.true_speech_end_index = 0
        self.phrase_start_index = 0

    def _segment_phrase(self, current_time):
        self.split_counter += 1
        
        # 计算分段结束位置（附加50ms余量）
        end_index = min(
            len(self.buffer),
            self.true_speech_end_index + int(0.05 * SAMPLE_RATE / CHUNK_SIZE)
        )
        
        # 提取分段音频
        segment_list = []
        for i in range(self.phrase_start_index, end_index):
            if i < len(self.buffer):
                segment_list.append(self.buffer[i])
        
        if not segment_list:
            return
            
        try:
            audio_segment = np.concatenate(segment_list)
            segment_duration = len(audio_segment) / SAMPLE_RATE
            
            # 有效时长检查
            if segment_duration >= MIN_SPEECH_DURATION:
                # 发送到转录队列
                self.transcription_queue.put((audio_segment, self.split_counter))
                
                # 更新缓冲区状态
                remaining_items = list(self.buffer)[end_index:]
                if remaining_items:
                    self._reset_buffer_state()
                    self.buffer = deque(remaining_items)
                    self.speech_start_time = current_time
                    self.last_detected_speech = current_time
                else:
                    self.is_speaking = False
                    self._reset_buffer_state()
        except ValueError:
            self._reset_buffer_state()
            self.is_speaking = False

    def get_next_transcription(self):
        try:
            return self.transcription_queue.get(timeout=0.1)
        except queue.Empty:
            return None

# ========================================================
# ASR处理器
# ========================================================

class SmartASRProcessor:
    """语音识别处理器：集成FunASR模型"""
    
    def __init__(self, model_paths):
        self.model_paths = model_paths
        self.model = None
        self.cache = {}
    
    def load_model(self):
        if self.model:
            return True
            
        try:
            print("加载语音识别模型...")
            device = "cuda" if torch.cuda.is_available() else "cpu"
            self.model = AutoModel(
                model=self.model_paths['asr'],
                vad_model=self.model_paths['vad'],
                punc_model=self.model_paths['punc'],
                model_revision="v2.0.4",
                device=device,
            )
            print(f"模型加载完成，使用设备: {device}")
            return True
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            return False
    
    def process_audio(self, audio_segment, segment_id):
        if not self.model or len(audio_segment) < int(0.05 * SAMPLE_RATE):
            return "", 0, 0, segment_id
        
        try:
            start_time = time.time()
            
            # 核心识别调用
            res = self.model.generate(
                input=audio_segment,
                cache=self.cache,
                is_final=True
            )
            
            actual_duration = len(audio_segment) / SAMPLE_RATE
            proc_time = time.time() - start_time
            
            if res and res[0]["text"]:
                return res[0]["text"].strip(), actual_duration, proc_time, segment_id
        except Exception as e:
            print(f"ASR处理失败: {str(e)}")
            self.cache = {}  # 出错时重置缓存
        
        return "", 0, 0, segment_id
    
    def process_audio_file(self, file_path):
        """处理整个音频文件"""
        if not self.model:
            return "模型未加载", 0, 0
            
        try:
            start_time = time.time()
            res = self.model.generate(input=file_path)
            proc_time = time.time() - start_time
            
            if res and res[0]["text"]:
                return res[0]["text"].strip(), os.path.getsize(file_path), proc_time
        except Exception as e:
            print(f"文件处理失败: {str(e)}")
        
        return "识别失败", 0, 0

# ========================================================
# 自定义信号类
# ========================================================

class RecognitionSignals(QObject):
    result_received = pyqtSignal(str, int)  # 识别结果和分段ID
    file_result_received = pyqtSignal(str)   # 文件识别结果
    status_update = pyqtSignal(str)          # 状态更新
    progress_update = pyqtSignal(int)        # 进度更新

# ========================================================
# 线程类
# ========================================================

class AudioCaptureThread(threading.Thread):
    """音频采集线程"""
    
    def __init__(self, audio_queue, stop_event, status_signal):
        super().__init__()
        self.audio_queue = audio_queue
        self.stop_event = stop_event
        self.status_signal = status_signal
        self.daemon = True
    
    def run(self):
        p = pyaudio.PyAudio()
        stream = None
        try:
            # 自动选择最佳输入设备
            device_index = None
            for i in range(p.get_device_count()):
                dev_info = p.get_device_info_by_index(i)
                if dev_info["maxInputChannels"] > 0:
                    device_index = i
                    break
            
            if device_index is None:
                self.status_signal.emit("错误: 未找到音频设备!")
                return
                
            # 创建音频流
            stream = p.open(
                format=FORMAT,
                channels=CHANNELS,
                rate=SAMPLE_RATE,
                input=True,
                input_device_index=device_index,
                frames_per_buffer=CHUNK_SIZE
            )
            
            self.status_signal.emit(f"音频采集启动: 设备{device_index}")
            
            # 持续读取数据
            while not self.stop_event.is_set():
                try:
                    data = stream.read(CHUNK_SIZE, exception_on_overflow=False)
                    self.audio_queue.put(data)
                except Exception as e:
                    self.status_signal.emit(f"音频采集异常: {str(e)}")
                    break
        finally:
            # 清理资源
            if stream:
                stream.stop_stream()
                stream.close()
            p.terminate()
            self.status_signal.emit("音频采集已停止")

class VADProcessThread(threading.Thread):
    """VAD处理线程"""
    
    def __init__(self, audio_queue, speech_buffer, stop_event):
        super().__init__()
        self.audio_queue = audio_queue
        self.speech_buffer = speech_buffer
        self.stop_event = stop_event
        self.daemon = True
    
    def run(self):
        while not self.stop_event.is_set():
            try:
                # 获取音频块并处理
                audio_chunk = self.audio_queue.get(timeout=0.05)
                audio_np = np.frombuffer(audio_chunk, dtype=np.int16).astype(np.float32) / 32768.0
                self.speech_buffer.add_chunk(audio_np)
            except queue.Empty:
                time.sleep(0.005)
            except Exception as e:
                print(f"VAD处理异常: {str(e)}")

class ASRProcessThread(threading.Thread):
    """语音识别线程"""
    
    def __init__(self, speech_buffer, asr_processor, signals):
        super().__init__()
        self.speech_buffer = speech_buffer
        self.asr_processor = asr_processor
        self.signals = signals
        self.stop_event = threading.Event()
        self.daemon = True
        self.last_segment_id = 0
    
    def run(self):
        self.asr_processor.load_model()
        self.signals.status_update.emit("ASR模型已加载，准备识别...")
        
        while not self.stop_event.is_set():
            result = self.speech_buffer.get_next_transcription()
            if not result:
                time.sleep(0.01)
                continue
                
            audio_segment, segment_id = result
            text, duration, proc_time, _ = self.asr_processor.process_audio(audio_segment, segment_id)
            
            # 连续性检测：非连续段重置缓存
            if segment_id != self.last_segment_id + 1 and self.last_segment_id > 0:
                self.asr_processor.cache = {}
            self.last_segment_id = segment_id
            
            if text:
                # 发送识别结果
                result_str = f"分段#{segment_id} (时长: {duration:.2f}s):\n{text}\n"
                if duration > 0:
                    result_str += f"处理速度: {proc_time/duration:.2f}x实时\n"
                self.signals.result_received.emit(result_str, segment_id)
            
            time.sleep(0.005)

class FileProcessingThread(threading.Thread):
    """文件处理线程"""
    
    def __init__(self, file_path, asr_processor, signals):
        super().__init__()
        self.file_path = file_path
        self.asr_processor = asr_processor
        self.signals = signals
        self.stop_event = threading.Event()
        self.daemon = True
    
    def run(self):
        self.signals.status_update.emit(f"开始处理文件: {os.path.basename(self.file_path)}")
        
        # 加载模型
        if not self.asr_processor.load_model():
            self.signals.status_update.emit("模型加载失败，无法处理文件")
            return
            
        # 处理整个文件
        text, file_size, proc_time = self.asr_processor.process_audio_file(self.file_path)
        
        if text:
            # 发送文件识别结果
            result_str = f"文件识别结果 ({os.path.basename(self.file_path)}):\n"
            result_str += f"处理时间: {proc_time:.2f}秒\n"
            result_str += f"文件大小: {file_size/1024:.2f}KB\n"
            result_str += "=" * 50 + "\n"
            result_str += text + "\n"
            self.signals.file_result_received.emit(result_str)
            self.signals.status_update.emit("文件处理完成")
        else:
            self.signals.status_update.emit("文件识别失败")

# ========================================================
# UI主窗口
# ========================================================

class RealTimeASRApp(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("智能语音识别系统")
        self.setGeometry(100, 100, 1000, 700)
        
        # 模型路径
        self.model_paths = {
            'asr': AUDIO_MODE,
            'vad': VAD_MODEL,
            'punc': PUNC_MODEL
        }
        
        # 初始化ASR处理器
        self.asr_processor = SmartASRProcessor(self.model_paths)
        
        # 创建信号对象
        self.signals = RecognitionSignals()
        
        # 初始化线程控制变量
        self.audio_capture_thread = None
        self.vad_process_thread = None
        self.asr_process_thread = None
        self.file_processing_thread = None
        self.stop_event = threading.Event()
        self.speech_buffer = SpeechBuffer()
        self.audio_queue = queue.Queue()
        
        # 初始化UI
        self.init_ui()
        
        # 连接信号
        self.signals.result_received.connect(self.append_result)
        self.signals.file_result_received.connect(self.display_file_result)
        self.signals.status_update.connect(self.update_status)
        self.signals.progress_update.connect(self.update_progress)
        
        # 检查模型加载
        self.check_model_status()
    
    def init_ui(self):
        # 中央部件和主布局
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        main_layout = QVBoxLayout(central_widget)
        
        # 标题
        title_label = QLabel("智能语音识别系统")
        title_label.setFont(QFont("Arial", 16, QFont.Bold))
        title_label.setAlignment(Qt.AlignCenter)
        title_label.setStyleSheet("padding: 10px; background-color: #f0f0f0; border-radius: 5px;")
        main_layout.addWidget(title_label)
        
        # 控制面板
        control_group = QGroupBox("控制面板")
        control_layout = QHBoxLayout(control_group)
        
        # 实时录音控制
        self.realtime_btn = QPushButton("开始实时录音")
        self.realtime_btn.setStyleSheet("background-color: #4CAF50; color: white; padding: 8px;")
        self.realtime_btn.clicked.connect(self.toggle_realtime_recording)
        control_layout.addWidget(self.realtime_btn)
        
        # 文件选择
        self.file_btn = QPushButton("选择音频文件")
        self.file_btn.setStyleSheet("background-color: #2196F3; color: white; padding: 8px;")
        self.file_btn.clicked.connect(self.select_audio_file)
        control_layout.addWidget(self.file_btn)
        
        # 模型状态
        self.model_status = QLabel("模型状态: 未加载")
        control_layout.addWidget(self.model_status)
        
        main_layout.addWidget(control_group)
        
        # 状态面板
        status_group = QGroupBox("系统状态")
        status_layout = QVBoxLayout(status_group)
        
        # 状态文本
        self.status_label = QLabel("就绪")
        self.status_label.setStyleSheet("padding: 5px; background-color: #e9f7ef; border: 1px solid #d4d4d4;")
        status_layout.addWidget(self.status_label)
        
        # 进度条
        self.progress_bar = QProgressBar()
        self.progress_bar.setRange(0, 100)
        self.progress_bar.setValue(0)
        self.progress_bar.setTextVisible(False)
        status_layout.addWidget(self.progress_bar)
        
        main_layout.addWidget(status_group)
        
        # 结果展示
        result_group = QGroupBox("识别结果")
        result_layout = QVBoxLayout(result_group)
        
        # 结果文本框
        self.result_text = QTextEdit()
        self.result_text.setReadOnly(True)
        self.result_text.setFont(QFont("Consolas", 10))
        self.result_text.setStyleSheet("background-color: #f8f8f8; border: 1px solid #d4d4d4;")
        result_layout.addWidget(self.result_text)
        
        main_layout.addWidget(result_group, 1)
        
        # 底部状态栏
        self.statusBar().showMessage("就绪")
    
    def check_model_status(self):
        """检查模型状态"""
        if self.asr_processor.load_model():
            self.model_status.setText("模型状态: 已加载")
            self.model_status.setStyleSheet("color: green;")
        else:
            self.model_status.setText("模型状态: 加载失败")
            self.model_status.setStyleSheet("color: red;")
    
    def toggle_realtime_recording(self):
        """切换实时录音状态"""
        if not self.audio_capture_thread or not self.audio_capture_thread.is_alive():
            self.start_realtime_recording()
        else:
            self.stop_realtime_recording()
    
    def start_realtime_recording(self):
        """开始实时录音"""
        # 重置停止事件
        self.stop_event.clear()
        
        # 重置语音缓冲区
        self.speech_buffer = SpeechBuffer()
        
        # 启动音频采集线程
        self.audio_capture_thread = AudioCaptureThread(
            self.audio_queue, self.stop_event, self.signals.status_update
        )
        self.audio_capture_thread.start()
        
        # 启动VAD处理线程
        self.vad_process_thread = VADProcessThread(
            self.audio_queue, self.speech_buffer, self.stop_event
        )
        self.vad_process_thread.start()
        
        # 启动ASR处理线程
        self.asr_process_thread = ASRProcessThread(
            self.speech_buffer, self.asr_processor, self.signals
        )
        self.asr_process_thread.start()
        
        # 更新UI
        self.realtime_btn.setText("停止实时录音")
        self.realtime_btn.setStyleSheet("background-color: #f44336; color: white; padding: 8px;")
        self.signals.status_update.emit("实时录音已启动...")
        self.result_text.clear()
    
    def stop_realtime_recording(self):
        """停止实时录音"""
        # 设置停止事件
        self.stop_event.set()
        
        # 等待线程结束
        if self.audio_capture_thread and self.audio_capture_thread.is_alive():
            self.audio_capture_thread.join(timeout=1.0)
        
        if self.vad_process_thread and self.vad_process_thread.is_alive():
            self.vad_process_thread.join(timeout=1.0)
        
        if self.asr_process_thread:
            self.asr_process_thread.stop_event.set()
            if self.asr_process_thread.is_alive():
                self.asr_process_thread.join(timeout=1.0)
        
        # 更新UI
        self.realtime_btn.setText("开始实时录音")
        self.realtime_btn.setStyleSheet("background-color: #4CAF50; color: white; padding: 8px;")
        self.signals.status_update.emit("实时录音已停止")
    
    def select_audio_file(self):
        """选择音频文件进行处理"""
        if self.file_processing_thread and self.file_processing_thread.is_alive():
            self.signals.status_update.emit("请等待当前文件处理完成")
            return
            
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getOpenFileName(
            self, "选择音频文件", "", 
            "音频文件 (*.wav *.mp3 *.flac);;所有文件 (*)", 
            options=options
        )
        
        if file_path:
            # 启动文件处理线程
            self.file_processing_thread = FileProcessingThread(
                file_path, self.asr_processor, self.signals
            )
            self.file_processing_thread.start()
            
            # 更新UI
            self.result_text.clear()
            self.signals.status_update.emit(f"开始处理文件: {os.path.basename(file_path)}")
    
    def append_result(self, text, segment_id):
        """追加实时识别结果"""
        self.result_text.moveCursor(QTextCursor.End)
        self.result_text.insertPlainText(text)
        self.result_text.insertPlainText("\n" + "═" * 10 + "\n\n")
        self.result_text.moveCursor(QTextCursor.End)
    
    def display_file_result(self, text):
        """显示文件识别结果"""
        self.result_text.setPlainText(text)
    
    def update_status(self, message):
        """更新状态信息"""
        self.status_label.setText(message)
        self.statusBar().showMessage(message)
    
    def update_progress(self, value):
        """更新进度条"""
        self.progress_bar.setValue(value)
    
    def closeEvent(self, event):
        """关闭窗口时确保所有线程停止"""
        self.stop_realtime_recording()
        
        if self.file_processing_thread and self.file_processing_thread.is_alive():
            self.file_processing_thread.stop_event.set()
            self.file_processing_thread.join(timeout=2.0)
        
        event.accept()

# ========================================================
# 应用启动
# ========================================================

if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = RealTimeASRApp()
    window.show()
    sys.exit(app.exec_())