# 在所有导入之前设置环境变量
import os
import platform

# os.environ['HF_HUB_DISABLE_PROGRESS_BARS'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
os.environ['TRANSFORMERS_VERBOSITY'] = 'error'

def check_dependencies():
    """检查依赖包并提供安装建议"""
    import platform
    
    is_mac = platform.system() == 'Darwin'
    missing_packages = []
    
    # 基础依赖包
    installation_guide = {
        'speech_recognition': 'pip install SpeechRecognition',
        'numpy': 'pip install numpy',
        'webrtcvad': 'pip install webrtcvad',
        'opencc': 'pip install opencc-python-reimplemented',
        'tqdm': 'pip install tqdm'
    }
    
    # 仅在 MacOS 上添加 MLX 相关依赖
    if is_mac:
        installation_guide.update({
            'mlx_whisper': 'pip install mlx-whisper',
            'mlx': 'pip install mlx'
        })
    else:
        # Windows 环境使用 faster-whisper
        installation_guide.update({
            'faster_whisper': 'pip install faster-whisper'
        })

    try:
        import warnings
        warnings.filterwarnings("ignore", category=Warning)
    except ImportError:
        pass  # warnings 是 Python 标准库

    for package, install_cmd in installation_guide.items():
        try:
            if package == 'mlx':
                __import__('mlx.core')
            else:
                __import__(package)
        except ImportError as e:
            missing_packages.append((package, install_cmd))

    if missing_packages:
        print("\n❌ 缺少必要的依赖包，请安装以下包：")
        for package, cmd in missing_packages:
            print(f"\n📦 {package}:")
            print(f"   运行: {cmd}")
        print("\n💡 提示：建议按照上述顺序安装依赖包")
        print("🔧 如果安装过程中遇到问题，请确保您的 Python 版本 >= 3.8\n")
        exit(1)

# 检查依赖
check_dependencies()

# 继续导入其他包
import warnings
warnings.filterwarnings("ignore", category=Warning)

# 禁用 tqdm
from functools import partialmethod
from tqdm import tqdm
tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)

import speech_recognition as sr
import numpy as np
import webrtcvad
import time
from opencc import OpenCC
from queue import Queue, Empty
from collections import deque
import threading

# 禁用 urllib3 警告
import urllib3
urllib3.disable_warnings()

# 条件导入
is_mac = platform.system() == 'Darwin'
if is_mac:
    import mlx_whisper
    import mlx.core as mx
    mx.set_default_device(mx.gpu)
else:
    from faster_whisper import WhisperModel

def check_gpu_availability():
    """
    检查是否可以使用硬件加速
    """
    if platform.system() == 'Darwin':
        try:
            # 尝试设置 GPU 设备
            mx.set_default_device(mx.gpu)
            # 进行一个简单的测试运算
            test_array = mx.array([1, 2, 3])
            test_array + test_array
            return True
        except Exception:
            return False
    else:
        # Windows 环境下检查 CUDA 可用性
        import torch
        return torch.cuda.is_available()

class AudioTranscriber:
    # 分段控制参数
    SHORT_PAUSE_THRESHOLD = 0.2  # 短停顿阈值（秒）
    MIN_PAUSES_FOR_SPLIT = 3     # 触发分段所需的最少短停顿次数
    LONG_PAUSE_THRESHOLD = 2.0    # 长停顿阈值（秒），遇到长停顿直接分段
    MAX_SEGMENT_DURATION = 30.0   # 最大语音段长度（秒）
    VERBOSE = False  # 添加 verbose 标志

    def __init__(self):
        # 调整 VAD 配置
        self.vad = webrtcvad.Vad(2)  # 降低攻击性级别，使检测更宽松
        self.frame_duration = 30  # 每帧30ms
        self.sample_rate = 16000
        # 将 silence_threshold 根据 SHORT_PAUSE_THRESHOLD 计算
        self.silence_threshold = int(self.SHORT_PAUSE_THRESHOLD * 1000 / self.frame_duration)
        self.min_speech_duration = 0.3  # 最小语音段长度
        
        # 计算5分钟的帧数
        self.buffer_size = int((300.0 * 1000) / self.frame_duration)  # 5分钟的帧数
        self.ring_buffer = deque(maxlen=self.buffer_size)  # 使用固定大小的双端队列
        self.current_segment_start = 0
        self.last_speech_end = 0  # 记录上一段语音的结束时间
        self.potential_speech_start = None  # 潜在的语音开始时间
        self.speech_ongoing = False
        self.cc = OpenCC('t2s')  # 简繁体转换
        self.last_processed_time = 0  # 上次处理的时间点
        self.transcription_queue = Queue()  # 存储待识别的音频段
        self.is_running = True
        self.is_mac = platform.system() == 'Darwin'
        if self.is_mac:
            self.using_gpu = check_gpu_availability()
        else:
            self.model = WhisperModel("large-v3", device="cuda", compute_type="float16")

        # 音频源配置
        self.mic = sr.Microphone(sample_rate=self.sample_rate)
        self.r = sr.Recognizer()
        self.r.energy_threshold = 4500  # 能量阈值
        self.r.pause_threshold = 0.5  # 更灵敏的停顿检测

        self.pause_count = 0  # 添加停顿计数器

    def transcription_worker(self):
        """
        语音识别工作线程
        """
        last_end_time = 0
        
        while self.is_running:
            try:
                # 使用 get_nowait 或带超时的 get，并正确处理 Empty 异常
                try:
                    segment_data = self.transcription_queue.get(timeout=0.5)
                except Empty:
                    continue

                if not segment_data:
                    continue

                # 确保时间连续性
                if last_end_time > 0:
                    if segment_data['start_time'] > last_end_time + 0.1:
                        segment_data['start_time'] = last_end_time
                last_end_time = segment_data['end_time']

                # 转换为AudioData格式
                audio_data = sr.AudioData(
                    frame_data=segment_data['audio'],
                    sample_rate=self.sample_rate,
                    sample_width=2
                )

                try:
                    if self.is_mac:
                        # MLX-Whisper 转录，添加 no_speech_threshold 参数来优化标点
                        result = mlx_whisper.transcribe(
                            np.frombuffer(audio_data.get_raw_data(), np.int16).astype(np.float32) / 32768.0,
                            path_or_hf_repo="mlx-community/whisper-large-v3-mlx",
                            language="zh",
                            task="transcribe",
                            temperature=0.0,  # 降低温度以获得更稳定的输出
                            no_speech_threshold=0.6,  # 调整无语音阈值
                            condition_on_previous_text=True,  # 考虑上下文
                            initial_prompt = "这是一段会议记录。"
                        )
                        text = result["text"]
                    else:
                        # faster-whisper 转录
                        audio_array = np.frombuffer(audio_data.get_raw_data(), np.int16).astype(np.float32) / 32768.0
                        segments, _ = self.model.transcribe(
                            audio_array,
                            language="zh",
                            beam_size=5,
                            temperature=0.0,  # 降低温度
                            condition_on_previous_text=True,  # 考虑上下文
                            no_speech_threshold=0.6  # 调整无语音阈值
                        )
                        text = " ".join([segment.text for segment in segments])

                    # 结果处理
                    if text.strip():
                        simplified = self.cc.convert(text)
                        if simplified == "请不吝点赞 订阅 转发 打赏支持明镜与点点栏目":
                            continue
                        duration = segment_data['end_time'] - segment_data['start_time']
                        if self.VERBOSE:
                            print(f"\033[92m[{segment_data['start_time']:.1f}s -> {segment_data['end_time']:.1f}s | {duration:.1f}s] {simplified}\033[0m")
                        else:
                            print(f"\033[92m{simplified}\033[0m")

                except Exception as e:
                    if self.VERBOSE:
                        print(f"❌ 识别错误: {str(e)}")
                    continue

            except Exception as e:
                # 只在发生非预期错误时显示错误信息
                if not isinstance(e, Empty):
                    if self.VERBOSE:
                        print(f"❌ 处理错误: {str(e)}")
                continue

    def vad_collector(self, source):
        """
        音频采集线程的主要逻辑
        """
        try:
            frame_size = int(self.sample_rate * self.frame_duration / 1000)
            silence_counter = 0
            speech_counter = 0
            frame_count = 0
            current_pause_duration = 0
            
            # 根据 VERBOSE 标志决定是否显示调试信息
            if self.VERBOSE:
                print("🎙️ 开始录音...")
            
            while self.is_running:
                chunk = source.stream.read(frame_size)
                if len(chunk) < frame_size * 2:
                    continue

                current_time = frame_count * (self.frame_duration / 1000.0)
                frame_count += 1
                
                self.ring_buffer.append((chunk, current_time))
                
                # VAD检测
                is_speech = self.vad.is_speech(chunk, self.sample_rate)
                
                if is_speech:
                    speech_counter += 1
                    if silence_counter >= self.silence_threshold:
                        self.pause_count += 1
                        if self.VERBOSE:
                            print(f"\n检测到停顿 #{self.pause_count}")
                    silence_counter = 0
                    current_pause_duration = 0
                    if self.VERBOSE:
                        print("\r" + " " * 50, end="\r")  # 清除当前行
                    
                    if not self.speech_ongoing and speech_counter >= 2:
                        self.speech_ongoing = True
                        self.current_segment_start = self.last_speech_end or (current_time - 0.5)
                        if self.VERBOSE:
                            print("\n检测到语音开始")
                        speech_counter = 0
                else:
                    silence_counter += 1
                    speech_counter = 0
                    current_pause_duration = silence_counter * (self.frame_duration / 1000.0)
                    
                    if self.speech_ongoing and self.VERBOSE:
                        print(f"\r当前停顿时长: {current_pause_duration:.1f}s", end="", flush=True)
                    
                    if silence_counter >= self.silence_threshold and self.speech_ongoing:
                        speech_duration = current_time - self.current_segment_start
                        current_pause_duration = silence_counter * (self.frame_duration / 1000.0)
                        
                        # 使用类变量进行判断
                        if (self.pause_count >= self.MIN_PAUSES_FOR_SPLIT and 
                            speech_duration >= self.min_speech_duration) or \
                           current_pause_duration >= self.LONG_PAUSE_THRESHOLD or \
                           (speech_duration >= self.MAX_SEGMENT_DURATION and 
                            current_pause_duration >= self.SHORT_PAUSE_THRESHOLD):
                            
                            if self.VERBOSE:
                                print(f"\n生成语音段 (停顿次数: {self.pause_count}, 持续时间: {speech_duration:.1f}s)")
                            
                            self.speech_ongoing = False
                            segment_end_time = current_time + 0.3
                            self.last_speech_end = current_time
                            
                            speech_segment = self.generate_continuous_segment(
                                self.current_segment_start,
                                segment_end_time
                            )
                            
                            if speech_segment:
                                segment_data = {
                                    'audio': speech_segment,
                                    'start_time': self.current_segment_start,
                                    'end_time': segment_end_time
                                }
                                self.transcription_queue.put(segment_data)
                                if self.VERBOSE:
                                    print("已将语音段加入识别队列")
                            
                            self.pause_count = 0
                            current_pause_duration = 0

        except Exception as e:
            if self.VERBOSE:
                print(f"❌ 采集错误: {str(e)}")
            self.is_running = False

    def generate_continuous_segment(self, start_time, end_time):
        """
        从环形缓冲区生成音频片段，确保包含完整的语音
        """
        segment_chunks = []
        has_valid_data = False
        
        for chunk, t in self.ring_buffer:
            if start_time <= t <= end_time:
                segment_chunks.append(chunk)
                has_valid_data = True
        
        if has_valid_data:
            return b''.join(segment_chunks)
        return None

    def transcribe(self):
        try:
            transcription_thread = threading.Thread(target=self.transcription_worker)
            transcription_thread.daemon = True
            transcription_thread.start()

            with self.mic as source:
                try:
                    self.r.adjust_for_ambient_noise(source, duration=2)
                    
                    # 显示启动信息
                    if self.using_gpu:
                        print("🎤 开始转录（GPU 加速）...")
                    else:
                        print("🎤 开始转录...")
                    
                    self.vad_collector(source)
                except AssertionError:
                    print("\n❌ 错误：无法访问音频输入设备")
                    print("💡 请检查：")
                    print("1. 是否已正确连接麦克风")
                    print("2. 是否已在系统设置中允许程序访问麦克风")
                    print("3. 是否已选择正确的音频输入设备")
                    self.is_running = False

        except KeyboardInterrupt:
            print("\n👋 转录已停止")
            self.is_running = False
            transcription_thread.join(timeout=1)

if __name__ == "__main__":
    import argparse
    
    # 命令行参数解析
    parser = argparse.ArgumentParser(description='实时语音转录程序')
    parser.add_argument('-v', '--verbose', action='store_true', 
                       help='显示详细的调试信息和时间戳')
    args = parser.parse_args()

    # ===== 全局参数设置 =====
    # 短停顿阈值（秒）- 建议范围：0.1-0.5秒
    SHORT_PAUSE = 0.2
    # 触发分段所需的最少短停顿次数 - 建议范围：2-5次
    MIN_PAUSES = 3
    # 长停顿阈值（秒）- 建议范围：1.0-3.0秒
    LONG_PAUSE = 2.0
    # 最大语音段长度（秒）- 建议范围：20.0-60.0秒
    MAX_DURATION = 30.0
    
    # 更新 AudioTranscriber 类的参数
    AudioTranscriber.SHORT_PAUSE_THRESHOLD = SHORT_PAUSE
    AudioTranscriber.MIN_PAUSES_FOR_SPLIT = MIN_PAUSES
    AudioTranscriber.LONG_PAUSE_THRESHOLD = LONG_PAUSE
    AudioTranscriber.MAX_SEGMENT_DURATION = MAX_DURATION
    AudioTranscriber.VERBOSE = args.verbose

    if args.verbose:
        print("\n=== 当前参数配置 ===")
        print(f"1. SHORT_PAUSE = {SHORT_PAUSE}  # 短停顿阈值（秒）")
        print(f"2. MIN_PAUSES = {MIN_PAUSES}    # 触发分段所需的最少短停顿次数")
        print(f"3. LONG_PAUSE = {LONG_PAUSE}    # 长停顿阈值（秒）")
        print(f"4. MAX_DURATION = {MAX_DURATION} # 最大语音段长度（秒）")
        print("\n要修改这些参数，请编辑源代码中 if __name__ == \"__main__\": 下的对应变量\n")
    else:
        print("提示：使用 -v 或 --verbose 参数运行可显示详细信息和调试输出")
    
    # 启动转录
    AudioTranscriber().transcribe()