import sys
import os
import tkinter as tk
from tkinter import ttk
import threading
import time
from faster_whisper import WhisperModel
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import speech_recognition as sr
import torch

# 动态路径设置
BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
MODEL_PATH = os.path.join(BASE_DIR, "faster-whisper-large-v3")
M2M_PATH = os.path.join(BASE_DIR, "m2m100_1.2B")
TEMP_DIR = os.path.join(BASE_DIR, "temp")
if not os.path.exists(TEMP_DIR):
    os.makedirs(TEMP_DIR)

# 语言代码
LANG_CODES = {
    "en": "英语", "de": "德语", "id": "印尼语", "ja": "日语", "zh": "汉语",
    "th": "泰语", "fr": "法语", "ru": "俄语", "bn": "孟加拉语", "ms": "马来语",
    "es": "西班牙语", "it": "意大利语", "pt": "葡萄牙语", "hi": "印地语", "ar": "阿拉伯语",
    "ko": "韩语", "vi": "越南语", "tr": "土耳其语", "pl": "波兰语", "nl": "荷兰语",
}

class BilingualSubtitleGUI:
    def __init__(self, root):
        self.root = root
        # 获取屏幕分辨率，设置窗口居中偏下
        screen_width = root.winfo_screenwidth()
        screen_height = root.winfo_screenheight()
        window_width = 900
        window_height = 200
        x = (screen_width - window_width) // 2
        y = screen_height - window_height - 50  # 偏下 50 像素
        self.root.geometry(f"{window_width}x{window_height}+{x}+{y}")
     #   self.root.overrideredirect(True)  # 去除窗口边框，像字幕
        self.root.attributes("-topmost", True)  # 始终置顶
        self.root.attributes("-alpha", 0.8)  # 半透明背景，80% 不透明

        # 自定义样式
        self.style = ttk.Style()
        self.style.theme_use("clam")
        self.style.configure("TLabel", background="#FFFFFF", font=("Helvetica", 14))
        self.style.configure("TCombobox", fieldbackground="#FFFFFF", background="#FFFFFF", padding=5)
        self.style.configure("TButton", padding=5, font=("Helvetica", 14, "bold"))

        # 主容器
        self.main_frame = tk.Frame(self.root, bg="#FFFFFF")
        self.main_frame.pack(fill="x", padx=10, pady=5)

        # 水平布局：状态按钮 + 三个下拉选择
        control_frame = tk.Frame(self.main_frame, bg="#FFFFFF")
        control_frame.pack(fill="x")

        # 状态切换按钮
        self.status = False
        self.status_button = ttk.Button(
            control_frame,
            text="OFF",
            command=self.toggle_status,
            style="TButton"
        )
        self.status_button.pack(side="left", padx=5)
        self.status_button.configure(
            style="Custom.TButton",
            command=self.toggle_status
        )
        self.style.configure("Custom.TButton", background="#FF6B6B", foreground="#FFFFFF")
        self.style.map("Custom.TButton",
                      background=[("active", "#FF8787"), ("!active", "#FF6B6B")],
                      foreground=[("active", "#FFFFFF"), ("!active", "#FFFFFF")])
        self.status_button.bind("<Enter>", lambda e: self.status_button.configure(style="Custom.TButton"))
        self.status_button.bind("<Leave>", lambda e: self.status_button.configure(style="Custom.TButton"))

        # 麦克风序号选择
        self.mic_label = ttk.Label(control_frame, text="麦克风:")
        self.mic_label.pack(side="left", padx=5)
        self.mic_var = tk.StringVar(value="0")
        self.mic_dropdown = ttk.Combobox(
            control_frame, textvariable=self.mic_var, values=[str(i) for i in range(10)],
            width=5, state="readonly"
        )
        self.mic_dropdown.pack(side="left", padx=5)

        # 源语言选择
        self.src_lang_label = ttk.Label(control_frame, text="源语言:")
        self.src_lang_label.pack(side="left", padx=5)
        self.src_lang_var = tk.StringVar(value="en")
        self.src_lang_dropdown = ttk.Combobox(
            control_frame, textvariable=self.src_lang_var,
            values=[f"{code} - {name}" for code, name in LANG_CODES.items()],
            width=15, state="readonly"
        )
        self.src_lang_dropdown.pack(side="left", padx=5)

        # 目的语言选择
        self.tgt_lang_label = ttk.Label(control_frame, text="目的语言:")
        self.tgt_lang_label.pack(side="left", padx=5)
        self.tgt_lang_var = tk.StringVar(value="zh")
        self.tgt_lang_dropdown = ttk.Combobox(
            control_frame, textvariable=self.tgt_lang_var,
            values=[f"{code} - {name}" for code, name in LANG_CODES.items()],
            width=15, state="readonly"
        )
        self.tgt_lang_dropdown.pack(side="left", padx=5)

        # 字幕显示区域
        self.subtitle_frame = tk.Frame(self.main_frame, bg="#333333", bd=0)
        self.subtitle_frame.pack(fill="both", expand=True, padx=10, pady=5)
        self.subtitle_text = tk.Text(
            self.subtitle_frame, height=48, width=100, bg="#FFFFFF", fg="#333333",
            font=("Helvetica", 16), bd=0, wrap="word", spacing1=2, spacing2=2
        )
        self.subtitle_text.pack(fill="both", expand=True)
        scrollbar = ttk.Scrollbar(self.subtitle_frame, orient="vertical", command=self.subtitle_text.yview)
        scrollbar.pack(side="right", fill="y")
        self.subtitle_text.config(yscrollcommand=scrollbar.set)

        # 初始化运行状态和模型
        self.running = False
        self.recognizer = sr.Recognizer()
        # 验证模型路径
        whisper_model_path = MODEL_PATH
        required_files = ["model.bin", "config.json", "vocabulary.json"]
        if not os.path.exists(whisper_model_path) or not os.path.isdir(whisper_model_path):
            raise FileNotFoundError(f"Whisper模型目录不存在: {whisper_model_path}")
        for file in required_files:
            if not os.path.exists(os.path.join(whisper_model_path, file)):
                raise FileNotFoundError(f"Whisper模型缺少文件: {os.path.join(whisper_model_path, file)}")
        # 加载 faster-whisper-large-v3 模型
        self.whisper_model = WhisperModel(
            whisper_model_path,
            device="cuda" if torch.cuda.is_available() else "cpu",
            compute_type="float16"
        )
        # 验证 M2M100 模型路径
        if not os.path.exists(M2M_PATH) or not os.path.isdir(M2M_PATH):
            raise FileNotFoundError(f"M2M100模型目录不存在: {M2M_PATH}")
        # 加载 M2M100_1.2B 翻译模型
        self.translation_model = M2M100ForConditionalGeneration.from_pretrained(
            M2M_PATH, device_map="cuda" if torch.cuda.is_available() else "cpu"
        )
        self.translation_tokenizer = M2M100Tokenizer.from_pretrained(M2M_PATH)

        # 确保翻译模型在 GPU 上运行
        if torch.cuda.is_available():
            self.translation_model = self.translation_model.to("cuda")

    def toggle_status(self):
        """切换程序运行状态"""
        if self.running:
            self.running = False
            self.status_button.configure(text="OFF", style="Custom.TButton")
        else:
            self.running = True
            self.status_button.configure(text="ON", style="Custom.TButton")
            threading.Thread(target=self.run_recognition, daemon=True).start()

    def run_recognition(self):
        """执行语音识别和翻译"""
        mic_index = int(self.mic_var.get())
        src_lang = self.src_lang_var.get().split(" - ")[0]
        tgt_lang = self.tgt_lang_var.get().split(" - ")[0]

        with sr.Microphone(device_index=mic_index) as source:
            while self.running:
                try:
                    audio = self.recognizer.listen(source, timeout=1)
                    temp_audio_path = os.path.join(TEMP_DIR, "temp_audio.wav")
                    with open(temp_audio_path, "wb") as f:
                        f.write(audio.get_wav_data())
                    segments, _ = self.whisper_model.transcribe(temp_audio_path, language=src_lang)
                    text = " ".join([seg.text for seg in segments])
                    translated_text = self.translate_text(text, src_lang, tgt_lang)
                    self.update_subtitle(text, translated_text)
                    os.remove(temp_audio_path)
                except sr.WaitTimeoutError:
                    continue
                except Exception as e:
                    print(f"发生错误: {e}")
                    self.running = False
                    self.status_button.configure(text="OFF", style="Custom.TButton")
                    break

    def translate_text(self, text, src_lang, tgt_lang):
        """将文本从源语言翻译到目的语言"""
        if not text.strip():
            return ""
        self.translation_tokenizer.src_lang = src_lang
        encoded = self.translation_tokenizer(text, return_tensors="pt")
        if torch.cuda.is_available():
            encoded = {k: v.to("cuda") for k, v in encoded.items()}
        generated_tokens = self.translation_model.generate(
            **encoded,
            forced_bos_token_id=self.translation_tokenizer.get_lang_id(tgt_lang)
        )
        translated_text = self.translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
        return translated_text

    def update_subtitle(self, original, translated):
        """更新字幕显示区域"""
        timestamp = time.strftime("%H:%M:%S", time.localtime())
        subtitle = f"[{timestamp}] 原文: {original}\n翻译: {translated}\n\n"
        self.subtitle_text.insert(tk.END, subtitle)
        self.subtitle_text.see(tk.END)

if __name__ == "__main__":
    root = tk.Tk()
    app = BilingualSubtitleGUI(root)
    root.mainloop()