import threading
import queue
import time
import numpy as np
import torch
import pyaudio
import platform
import sys

# Windows系统音频捕获支持
if platform.system() == "Windows":
    try:
        import soundcard as sc

        WINDOWS_AUDIO_AVAILABLE = True
    except ImportError:
        print("⚠️ 缺少soundcard库，无法捕获系统音频输出")
        print("请运行: pip install soundcard")
        WINDOWS_AUDIO_AVAILABLE = False
else:
    WINDOWS_AUDIO_AVAILABLE = False

try:
    import librosa

    USE_LIBROSA = True
except ImportError:
    import torchaudio

    USE_LIBROSA = False

from faster_whisper import WhisperModel
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM


# === 音频流采集类（支持mic、文件和系统音频输出） ===
class AudioStream:
    def __init__(self, source="mic", file_path=None, rate=16000, chunk_duration=2):
        self.rate = rate
        self.chunk = int(rate * chunk_duration)
        self.source = source
        self.file_path = file_path

        if source == "mic":
            self.audio_interface = pyaudio.PyAudio()
        elif source == "system" and platform.system() == "Windows":
            # if not WINDOWS_AUDIO_AVAILABLE:
            #     raise ImportError("需要安装soundcard库来捕获系统音频")
            # self._init_system_audio()
            self.audio_interface = pyaudio.PyAudio()

    def _init_system_audio(self):
        """初始化系统音频捕获"""
        try:
            # 获取默认扬声器（系统音频输出）
            self.default_speaker = sc.default_speaker()
            print(f"🔊 找到默认音频输出设备: {self.default_speaker.name}")

            # 获取所有可用的音频输出设备
            speakers = sc.all_speakers()
            print("📋 可用音频输出设备:")
            for i, speaker in enumerate(speakers):
                marker = " <- 当前使用" if speaker.name == self.default_speaker.name else ""
                print(f"   {i}: {speaker.name}{marker}")

            # 测试录音功能
            print("🔧 测试系统音频录制功能...")

        except Exception as e:
            print(f"❌ 初始化系统音频失败: {e}")
            raise

    def generator(self):
        if self.source == "mic":
            stream = self.audio_interface.open(
                format=pyaudio.paInt16,
                channels=2,
                rate=self.rate,
                input=True,
                frames_per_buffer=self.chunk,
            )
            print("🎙️ 使用麦克风输入...")

            while True:
                data = stream.read(self.chunk, exception_on_overflow=False)
                audio = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0
                yield audio

        elif self.source == "system":
            input_device_index = self.audio_interface.get_default_input_device_info().get("index")
            input_device_name = self.audio_interface.get_default_input_device_info().get("name")
            print(f"🔊 开始捕获系统音频{input_device_name}输出...")

            stream = self.audio_interface.open(
                format=pyaudio.paInt16,
                channels=2,
                rate=self.rate,
                input=True,
                frames_per_buffer=self.chunk,
                input_device_index=input_device_index,
            )

            while True:
                data = stream.read(self.chunk, exception_on_overflow=False)
                audio = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0
                yield audio

            # try:
            #     # 使用正确的soundcard API - 通过默认扬声器录制
            #     # chunk_duration = self.chunk / self.rate
            #
            #     with self.default_speaker.recorder(samplerate=self.rate) as mic:
            #         while True:
            #             try:
            #                 # 录制指定时长的音频
            #                 audio_data = mic.record(numframes=self.chunk)
            #
            #                 # 转换为numpy数组
            #                 if len(audio_data.shape) > 1:
            #                     # 多声道转单声道
            #                     audio = np.mean(audio_data, axis=1)
            #                 else:
            #                     audio = audio_data
            #
            #                 # 确保长度正确
            #                 if len(audio) < self.chunk:
            #                     audio = np.pad(audio, (0, self.chunk - len(audio)), mode='constant')
            #                 elif len(audio) > self.chunk:
            #                     audio = audio[:self.chunk]
            #
            #                 # 确保数据类型正确
            #                 audio = audio.astype(np.float32)
            #
            #                 # 检查音频能量
            #                 if np.max(np.abs(audio)) > 0.001:
            #                     yield audio
            #                 else:
            #                     yield np.zeros(self.chunk, dtype=np.float32)
            #
            #             except Exception as e:
            #                 print(f"⚠️ 系统音频录制警告: {e}")
            #                 # 产生静音块继续运行
            #                 yield np.zeros(self.chunk, dtype=np.float32)
            #                 time.sleep(0.1)
            #
            # except Exception as e:
            #     print(f"❌ 系统音频录制初始化失败: {e}")
            #     print("🔄 尝试备用方法...")
            #     # 尝试备用方法
            #     yield from self._fallback_system_audio_capture()

        elif self.source == "file" and self.file_path:
            if USE_LIBROSA:
                audio, sr = librosa.load(self.file_path, sr=self.rate, mono=True)
                audio = audio.astype(np.float32)
            else:
                waveform, sr = torchaudio.load(self.file_path)
                if sr != self.rate:
                    resampler = torchaudio.transforms.Resample(sr, self.rate)
                    waveform = resampler(waveform)
                audio = waveform.mean(dim=0).numpy()

            print(f"📁 加载音频文件: {self.file_path}")
            print(f"   采样率: {sr} -> {self.rate}")
            print(f"   时长: {len(audio) / self.rate:.2f}秒")

            chunk_samples = self.chunk
            for start in range(0, audio.shape[0], chunk_samples):
                end = start + chunk_samples
                chunk = audio[start:end]
                if len(chunk) < chunk_samples:
                    chunk = np.pad(chunk, (0, chunk_samples - len(chunk)), mode='constant')
                yield chunk.astype(np.float32)
                time.sleep(0.1)
        else:
            raise ValueError("不支持的音频源")

    def _fallback_system_audio_capture(self):
        """备用系统音频捕获方法 - 使用PyAudio WASAPI"""
        print("🔄 使用PyAudio WASAPI备用方法...")
        try:
            import pyaudio

            # 初始化PyAudio
            audio = pyaudio.PyAudio()

            # 查找所有音频设备，寻找支持loopback的设备
            print("🔍 搜索可用的音频设备...")
            loopback_device = None

            for i in range(audio.get_device_count()):
                try:
                    dev_info = audio.get_device_info_by_index(i)
                    dev_name = dev_info['name'].lower()

                    # 查找包含这些关键词的设备
                    keywords = ['loopback', 'stereo mix', 'what u hear', '立体声混音']
                    if any(keyword in dev_name for keyword in keywords):
                        if dev_info['maxInputChannels'] > 0:
                            loopback_device = dev_info
                            print(f"✅ 找到loopback设备: {dev_info['name']}")
                            break

                    # 也检查WASAPI设备
                    if 'wasapi' in dev_name and dev_info['maxInputChannels'] > 0:
                        # 尝试这个设备
                        print(f"🔍 发现WASAPI设备: {dev_info['name']}")
                        if loopback_device is None:  # 如果还没找到更好的，先用这个
                            loopback_device = dev_info

                except Exception as e:
                    continue

            if loopback_device:
                print(f"🎯 使用设备: {loopback_device['name']}")

                # 尝试打开音频流
                try:
                    stream = audio.open(
                        format=pyaudio.paInt16,
                        channels=min(int(loopback_device['maxInputChannels']), 2),  # 最多2声道
                        rate=self.rate,
                        input=True,
                        input_device_index=loopback_device['index'],
                        frames_per_buffer=self.chunk
                    )

                    print("✅ 音频流开启成功")
                    channels = min(int(loopback_device['maxInputChannels']), 2)

                    while True:
                        try:
                            data = stream.read(self.chunk, exception_on_overflow=False)
                            audio_np = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0

                            # 处理多声道
                            if channels > 1:
                                # 重塑为多声道格式并转为单声道
                                audio_np = audio_np.reshape(-1, channels).mean(axis=1)

                            # 确保长度正确
                            if len(audio_np) < self.chunk:
                                audio_np = np.pad(audio_np, (0, self.chunk - len(audio_np)), mode='constant')
                            elif len(audio_np) > self.chunk:
                                audio_np = audio_np[:self.chunk]

                            yield audio_np.astype(np.float32)

                        except Exception as e:
                            print(f"⚠️ 读取音频数据错误: {e}")
                            yield np.zeros(self.chunk, dtype=np.float32)
                            time.sleep(0.1)

                except Exception as e:
                    print(f"❌ 无法打开音频流: {e}")
                    raise

            else:
                print("❌ 未找到合适的loopback设备")
                print("💡 提示: 您可能需要在系统中启用'立体声混音'设备")
                print("   或安装虚拟音频线缆软件如VB-Cable")
                raise RuntimeError("无可用的系统音频捕获设备")

        except Exception as e:
            print(f"❌ 备用方法失败: {e}")
            print("🔄 返回静音数据流...")
            # 返回静音数据流避免程序崩溃
            while True:
                yield np.zeros(self.chunk, dtype=np.float32)
                time.sleep(self.chunk / self.rate)


# === 识别模型 ===
class SpeechRecognizer:
    def __init__(self, model_size="medium", device="cuda" if torch.cuda.is_available() else "cpu"):
        print(f"🧠 识别模型采用设备: {device}")
        self.model = WhisperModel(model_size, device=device, compute_type="int8" if device == "cuda" else "int8")

    def transcribe(self, audio: np.ndarray) -> str:
        # 检查音频质量，避免处理空音频
        if np.max(np.abs(audio)) < 0.001:
            return ""

        segments, _ = self.model.transcribe(audio, language="en", beam_size=5)
        return " ".join([seg.text.strip() for seg in segments])


# === 翻译模型 ===
class Translator:
    def __init__(self, model_name="facebook/nllb-200-distilled-600M", target_lang="zho_Hans"):
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        device = "cuda" if torch.cuda.is_available() else "cpu"
        print(f"🌍 翻译模型采用设备: {device}")
        self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
        self.target_lang = target_lang

        self.target_lang_id = self._get_lang_id(target_lang)
        print(f"🔤 翻译目标语言: {target_lang} (ID: {self.target_lang_id})")

    def _get_lang_id(self, lang_code):
        """获取语言代码对应的token id"""
        try:
            return self.tokenizer.convert_tokens_to_ids(lang_code)
        except:
            try:
                tokens = self.tokenizer.encode(lang_code, add_special_tokens=False)
                return tokens[0] if tokens else None
            except:
                print(f"[警告] 无法找到语言代码 {lang_code}，使用默认中文")
                return self.tokenizer.convert_tokens_to_ids("zho_Hans")

    def translate(self, text: str) -> str:
        if not text.strip():
            return ""

        try:
            inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(self.model.device)

            generate_kwargs = {
                'max_length': 128,
                'num_beams': 2,
                'early_stopping': True,
                'do_sample': False
            }

            if self.target_lang_id is not None:
                generate_kwargs['forced_bos_token_id'] = self.target_lang_id

            translated = self.model.generate(**inputs, **generate_kwargs)
            result = self.tokenizer.decode(translated[0], skip_special_tokens=True)

            return result.strip()

        except Exception as e:
            print(f"[翻译内部错误] {e}")
            return f"[翻译失败: {text}]"


# === 多线程实时处理器 ===
class RealTimeProcessorMT:
    def __init__(self, input_source="mic", file_path=None):
        self.audio_stream = AudioStream(source=input_source, file_path=file_path)
        self.recognizer = SpeechRecognizer()
        self.translator = Translator()

        self.audio_queue = queue.Queue(maxsize=1024)
        self.text_queue = queue.Queue(maxsize=1024)
        self.running = True

    def audio_producer(self):
        """音频采集线程"""
        try:
            for audio_chunk in self.audio_stream.generator():
                if not self.running:
                    break
                try:
                    self.audio_queue.put(audio_chunk, timeout=0.1)
                except queue.Full:
                    print("🔄 清空音频队列，保留最新数据")
                    try:
                        self.audio_queue.get_nowait()
                        self.audio_queue.put(audio_chunk, timeout=0.1)
                    except:
                        pass
        except Exception as e:
            print(f"[音频采集错误] {e}")
            self.running = False

    def asr_worker(self):
        """识别线程"""
        while self.running:
            try:
                audio_chunk = self.audio_queue.get(timeout=1)
            except queue.Empty:
                continue
            try:
                text = self.recognizer.transcribe(audio_chunk)
                if text.strip():
                    self.text_queue.put(text)
            except Exception as e:
                print(f"[识别错误] {e}")

    def translation_worker(self):
        """翻译线程"""
        while self.running:
            try:
                text = self.text_queue.get(timeout=1)
            except queue.Empty:
                continue
            try:
                zh_text = self.translator.translate(text)
                print(f"[识别] {text}")
                print(f"[翻译] {zh_text}\n")
            except Exception as e:
                print(f"[翻译错误] {e}")

    def run(self):
        threads = [
            threading.Thread(target=self.audio_producer, daemon=True),
            threading.Thread(target=self.asr_worker, daemon=True),
            threading.Thread(target=self.translation_worker, daemon=True),
        ]

        for t in threads:
            t.start()

        print("🚀 多线程实时识别+翻译启动，按 Ctrl+C 停止...")

        try:
            while True:
                time.sleep(0.1)
        except KeyboardInterrupt:
            print("🛑 停止中...")
            self.running = False
            time.sleep(2)


def check_dependencies():
    """检查依赖库"""
    missing_deps = []

    try:
        import faster_whisper
    except ImportError:
        missing_deps.append("faster-whisper")

    try:
        import transformers
    except ImportError:
        missing_deps.append("transformers")

    try:
        import pyaudio
    except ImportError:
        missing_deps.append("pyaudio")

    if platform.system() == "Windows":
        try:
            import soundcard
        except ImportError:
            missing_deps.append("soundcard (Windows系统音频捕获)")

    if missing_deps:
        print("❌ 缺少以下依赖库:")
        for dep in missing_deps:
            print(f"   - {dep}")
        print("\n请安装缺少的依赖库:")
        print("pip install faster-whisper transformers pyaudio")
        if platform.system() == "Windows":
            print("pip install soundcard  # Windows系统音频捕获")
        sys.exit(1)


if __name__ == "__main__":
    import argparse

    # 检查依赖
    check_dependencies()

    parser = argparse.ArgumentParser(description="多线程实时语音识别+翻译")
    parser.add_argument("--source", choices=["mic", "file", "system"], default="mic",
                        help="音频输入源 (mic: 麦克风, file: 文件, system: 系统音频输出)")
    parser.add_argument("--file", type=str, help="音频文件路径，仅source=file时有效")
    args = parser.parse_args()

    # 检查系统音频支持
    if args.source == "system":
        if platform.system() != "Windows":
            print("❌ 系统音频捕获目前仅支持Windows系统")
            sys.exit(1)
        if not WINDOWS_AUDIO_AVAILABLE:
            print("❌ 需要安装soundcard库以支持系统音频捕获")
            print("请运行: pip install soundcard")
            sys.exit(1)

    # 检查文件路径
    if args.source == "file":
        if not args.file:
            print("❌ 使用文件输入时必须指定 --file 参数")
            sys.exit(1)
        import os

        if not os.path.exists(args.file):
            print(f"❌ 文件不存在: {args.file}")
            sys.exit(1)

    print(f"🎯 音频源: {args.source}")
    if args.source == "system":
        print("💡 提示: 程序将捕获您电脑的音频输出（如播放器、浏览器等发出的声音）")

    processor = RealTimeProcessorMT(input_source=args.source, file_path=args.file)
    processor.run()
