# web_ui.py
# -------------
# Author: Zako888y
#
# This file is dedicated to the public domain under the Creative Commons CC0 1.0 Universal (CC0 1.0) Public Domain Dedication.
# You may use, copy, modify, distribute, and/or sell this code for any purpose, without restriction. No warranty is provided. The author waives all rights and claims to this work worldwide.
# To view a copy of this dedication, visit https://creativecommons.org/publicdomain/zero/1.0/

import argparse
import asyncio
import json
import logging
import os
import queue
import time
import tkinter as tk
import threading

from collections import deque

import gradio as gr
import gradio.blocks

from blive_danmu import DanmuClient, OnDanmuListener
from tts import TTSDeviced
from stt_listener import STTListener, SpeechSegmenter

logger = logging.getLogger(__name__)

VERSION = "0.1.3"


class Default:
    """默认配置"""

    CONFIG_PATH = "config/config.json"
    PORT = 7672


class App:

    def __init__(self, config_path=Default.CONFIG_PATH, port=Default.PORT):

        self.config_path = config_path
        self.port = port

        self.load_config()

        # 状态变量
        self.previous_text = ""
        self.current_text = ""
        self.text_lock = threading.Lock()
        self.danmu_running = False
        self.danmu_thread = None
        self.danmu_client = None
        self.danmu_queue = queue.Queue(maxsize=128)
        self.danmu_list = deque(maxlen=self.danmu_maxlen)
        self.danmu_stop_event = threading.Event()
        self.tts_deviced = TTSDeviced()
        self.window_visible = False
        self.recognition_running = False
        self.tk_root = None
        self.segmenter = None

    def tk_window(self):
        """字幕窗口"""
        root = tk.Tk()
        self.tk_root = root
        root.title("JSNEV 字幕")
        root.geometry("800x160+300+200")
        root.wm_attributes("-topmost", True)
        root.wm_attributes("-alpha", 1.0)
        root.config(bg=self.bg_color)
        root.wm_attributes("-transparentcolor", self.bg_color)
        label_prev = tk.Label(
            root,
            text=self.previous_text,
            font=(self.font_family, self.font_size - 4),
            fg=self.prev_color,
            bg=self.bg_color,
            anchor="w",
            justify="left",
        )
        label_curr = tk.Label(
            root,
            text=self.current_text,
            font=(self.font_family, self.font_size),
            fg=self.latest_color,
            bg=self.bg_color,
            anchor="w",
            justify="left",
        )
        label_prev.pack(fill="x", padx=30, pady=(20, 5))
        label_curr.pack(fill="x", padx=30, pady=(5, 20))

        def poll():
            with self.text_lock:
                new_curr = self.current_text
                new_prev = self.previous_text
            if label_curr.cget("text") != new_curr:
                label_curr.config(text=new_curr)
            if label_prev.cget("text") != new_prev:
                label_prev.config(text=new_prev)
            root.after(100, poll)

        poll()
        root.mainloop()

    def open_window(self):
        if not self.window_visible:
            self.window_visible = True
            if self.tk_root is None:
                threading.Thread(target=self.tk_window, daemon=True).start()
            else:
                try:
                    self.tk_root.deiconify()
                except Exception:
                    pass
        return "窗口已打开"

    def close_window(self):
        self.window_visible = False
        if self.tk_root:
            try:
                self.tk_root.withdraw()
            except Exception:
                pass
        self.stop_recognition()
        return "窗口已关闭"

    def on_stt_text(self, text, idx):
        with self.text_lock:
            self.previous_text = self.current_text
            self.current_text = text

    def set_subtitle_text(self, text):
        with self.text_lock:
            self.previous_text = self.current_text
            self.current_text = text

    def start_recognition(self):
        if not self.recognition_running:
            self.recognition_running = True
            if self.segmenter is None:
                listener = STTListener(on_text=self.on_stt_text)
                self.segmenter = SpeechSegmenter(listener=listener)
            self.segmenter.start()

    def stop_recognition(self):
        if self.recognition_running:
            self.recognition_running = False
            if self.segmenter:
                try:
                    self.segmenter.stop()
                except Exception as err:
                    logger.exception(err)

    def start_danmu_listener(self):
        self.danmu_stop_event.clear()

        app = self

        class AppDanmuListener(OnDanmuListener):

            def onDanmu(self, uname, text):
                msg = f"{uname}: {text}"
                try:
                    app.danmu_queue.put(msg, timeout=0.1)
                except queue.Full:
                    pass
                tts_speak = False
                if app.uname_filtered and uname == app.uname_filtered:
                    app.set_subtitle_text(text)
                    if app.tts_enabled:
                        tts_speak = True
                        app.tts_speak(text)
                logger.info(f"{"(TTS) " if tts_speak else ""}{msg}")

        def run_client():
            uid = app.uid
            room_id = app.room_id
            if not (uid.isdigit() and room_id.isdigit()):
                return
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            listener = AppDanmuListener()
            client = DanmuClient(int(uid), int(room_id), app.token, listener)
            app.danmu_client = client

            async def main():
                try:
                    await client.start()
                except Exception:
                    pass

            loop.create_task(main())
            while not app.danmu_stop_event.is_set():
                loop.run_until_complete(asyncio.sleep(0.2))
            client.stop()
            loop.stop()
            loop.close()

        self.danmu_thread = threading.Thread(target=run_client, daemon=True)
        self.danmu_thread.start()
        self.danmu_running = True

    def tts_speak(self, text: str) -> None:
        """语音转文字"""
        tts_deviced = self.tts_deviced
        tts_deviced.volume = self.tts_volume
        tts_deviced.rate = self.tts_rate
        tts_deviced.tts_to_device(text, device=self.device_selection)

    def stop_danmu_listener(self):
        self.danmu_running = False
        self.danmu_stop_event.set()
        if self.danmu_client:
            try:
                self.danmu_client.stop()
            except Exception:
                pass
        self.danmu_client = None
        self.danmu_thread = None

    def get_danmu_texts(self):
        items = self.danmu_list
        while not self.danmu_queue.empty():
            items.append(self.danmu_queue.get())
        return items

    @staticmethod
    def safe_select_device(index: int) -> int:
        """安全选择设备"""
        devices = TTSDeviced.list_devices()
        if index < 0 or index >= len(devices):
            return 1
        return index

    @property
    def device_selection(self) -> int:
        """安全获取当前设备选择"""
        return App.safe_select_device(self._device_selection)

    @device_selection.setter
    def device_selection(self, index: int) -> None:
        self._device_selection = App.safe_select_device(index)

    @property
    def device_selection_name(self) -> str:
        """获取输出设备名"""
        return TTSDeviced.list_devices()[self.device_selection]

    def load_config(self):
        """加载配置"""
        try:
            with open(self.config_path, "r", encoding="utf-8") as f:
                config = json.load(f)
        except Exception:
            config = {}

        # 用户可变参数
        self.uid = config.get("uid", "")
        self.room_id = config.get("room_id", "")
        self.token = config.get("token", "")
        self.uname_filtered = config.get("filter_uname", "")
        self.tts_enabled = config.get("tts_enabled", False)
        self.tts_volume = config.get("tts_volume", 1.0)
        self.tts_rate = config.get("tts_rate", 150)
        self.device_selection = config.get("tts_device", 1)

        # 高级参数
        self.danmu_maxlen = config.get("danmu_maxlen", App.Default.DANMU_MAXLEN)

        self.bg_color = config.get("background_color", App.Default.BACKGORUND_COLOR)
        self.latest_color = config.get(
            "latest_text_color", App.Default.LATEST_TEXT_COLOR
        )
        self.prev_color = config.get(
            "previous_text_color", App.Default.PREVIOUS_TEXT_COLOR
        )
        self.font_size = config.get("font_size", App.Default.FONT_SIZE)
        self.font_family = config.get("font_family", App.Default.FONT_FAMILY)

    def save_config(self):
        try:
            config_path = self.config_path
            if not os.path.exists(config_path):
                dir_path = os.path.dirname(config_path)
                os.makedirs(dir_path)
            with open(config_path, "r", encoding="utf-8") as f:
                config = json.load(f)
        except Exception:
            config = {}

        config["uid"] = self.uid
        config["room_id"] = self.room_id
        config["token"] = self.token
        config["filter_uname"] = self.uname_filtered
        config["tts_enabled"] = self.tts_enabled
        config["tts_volume"] = self.tts_volume
        config["tts_rate"] = self.tts_rate
        config["tts_device"] = self.device_selection

        with open(self.config_path, "w", encoding="utf-8") as f:
            json.dump(config, f, ensure_ascii=False, indent=4)

    def main(self):

        with gr.Blocks(title="JsNev") as demo:
            gr.Markdown("# 字幕窗口 & 弹幕监听控制台")

            with gr.Tab("监听直播弹幕"):
                self.tab_danmu()

            with gr.Tab("字幕窗口"):
                self.tab_subtitle()

            with gr.Tab("设备列表"):

                self.tab_device_list()

            with gr.Tab("关于"):

                self.tab_about()

            with gr.Column():

                def save_config(_):
                    self.save_config()
                    now = time.ctime()
                    return f"已保存 {now}"

                gr.Interface(
                    fn=save_config,
                    inputs=[gr.Textbox(visible=False, interactive=False)],
                    outputs=gr.Textbox(label="结果", value=""),
                    title="保存选项 (部分)",
                    description="保存到 config.json",
                    allow_flagging="never",
                    clear_btn=None,
                    submit_btn="保存",
                )

        demo.launch(server_name="127.0.0.1", server_port=self.port)

    def tab_danmu(self) -> gradio.blocks.Block:
        with gr.Column() as blk:
            gr.Markdown("## 监听B站直播弹幕")

            with gr.Row():

                with gr.Column():

                    markdown_entrance_status = gr.Markdown("# 连接直播间")

                    with gr.Group() as room_entrance:

                        # 访客 UID
                        uid_in = gr.Textbox(
                            label="访客 UID", value=self.uid, placeholder="你的 UID"
                        )

                        def set_uid(uid: str):
                            self.uid = uid

                        uid_in.change(set_uid, inputs=[uid_in])

                        # 主播直播间号
                        room_id_in = gr.Textbox(
                            label="主播直播间号",
                            value=self.room_id,
                            placeholder="主播的直播间号",
                        )

                        def set_room_id(room_id: str):
                            self.room_id = room_id

                        room_id_in.change(set_room_id, inputs=[room_id_in])

                        # Token
                        token_in = gr.Textbox(
                            label="Token",
                            value=self.token,
                            placeholder="登录获取的 token",
                        )

                        def set_token(token: str):
                            self.token = token

                        token_in.change(set_token, inputs=[token_in])

                    gr.Markdown("---")

                    # 监听弹幕
                    checkbox_danmu = gr.Checkbox(label="监听弹幕", value=False)

                    gr.Markdown('终端输出无 "验证失败" 或新弹幕正常接收, 即为认证成功')

                    def toggle_danmu(value: bool):
                        if value:
                            self.start_danmu_listener()
                            entrance_status_content = "直播间已连接"
                            logger.info("开始监听弹幕")
                        else:
                            self.stop_danmu_listener()
                            entrance_status_content = "连接直播间"
                            logger.info("暂停弹幕监听")
                        entrance_visble = not value
                        return {
                            markdown_entrance_status: f"# {entrance_status_content}",
                            room_entrance: gr.update(visible=entrance_visble),
                        }

                    checkbox_danmu.change(
                        toggle_danmu,
                        inputs=[checkbox_danmu],
                        outputs=[markdown_entrance_status, room_entrance],
                    )

                    # 弹幕输出
                    textbox_danmu_output = gr.Textbox(
                        label="弹幕输出", lines=self.danmu_maxlen
                    )

                    timer = gr.Timer(1.0)

                    def refresh_danmu_output():
                        items = self.get_danmu_texts()
                        text = "\n".join(items)
                        return text

                    timer.tick(refresh_danmu_output, outputs=[textbox_danmu_output])

                with gr.Column():

                    gr.Markdown("## 弹幕处理")

                    # 筛选用户名
                    textbox_uname_to_filter = gr.Textbox(
                        label="筛选用户名",
                        value=self.uname_filtered,
                        placeholder="需要筛选的用户名",
                    )
                    textbox_filter_uname = gr.Textbox(
                        label="正在筛选",
                        value=self.uname_filtered,
                    )

                    def set_uname_filtered(filter_uname: str):
                        self.uname_filtered = filter_uname

                    gr.Interface(
                        set_uname_filtered,
                        inputs=[textbox_uname_to_filter],
                        outputs=[textbox_filter_uname],
                        clear_btn=None,
                        allow_flagging="never",
                        submit_btn="筛选",
                    )

                    gr.Markdown("### 筛选弹幕转语音 (TTS)")

                    # TTS 开关
                    if tts_enabled := self.tts_enabled:
                        logger.info("启动 TTS 引擎")
                        self.tts_deviced.start()

                    checkbox_tts = gr.Checkbox(label="TTS 开关", value=tts_enabled)

                    def toggle_tts(value: bool):
                        if not self.tts_enabled and value:
                            logger.info("启动 TTS 引擎")
                            self.tts_deviced.start()
                        elif self.tts_enabled and not value:
                            logger.info("停止 TTS 引擎")
                            self.tts_deviced.stop()

                    checkbox_tts.change(toggle_tts, inputs=[checkbox_tts])

                    # TTS 选项
                    with gr.Row():

                        # 音量
                        number_tts_volume = gr.Number(
                            label="音量",
                            value=self.tts_volume,
                            step=0.1,
                            minimum=0.0,
                            maximum=1.0,
                        )

                        def set_tts_volume(value: float):
                            self.tts_volume = value

                        number_tts_volume.change(
                            set_tts_volume, inputs=[number_tts_volume]
                        )

                        # 语速
                        number_tts_rate = gr.Number(
                            label="语速",
                            value=self.tts_rate,
                            step=20,
                            minimum=20,
                            maximum=400,
                        )

                        def set_tts_rate(value: int):
                            self.tts_rate = value

                        number_tts_rate.change(set_tts_rate, inputs=[number_tts_rate])

                    # TTS 输出
                    with gr.Row():

                        number_device_to_select = gr.Number(
                            label="音频输出设备编号",
                            value=self.device_selection,
                            interactive=True,
                            precision=0,
                        )

                        def select_device(index: int):
                            self.device_selection = index
                            logger.info(f"选择设备 {self.device_selection_name}")
                            return gr.update(value=self.device_selection_name)

                        textbox_device_selected = gr.Textbox(
                            label="已选择设备",
                            value=self.device_selection_name,
                        )

                        number_device_to_select.change(
                            select_device,
                            inputs=[number_device_to_select],
                            outputs=[textbox_device_selected],
                        )

        return blk

    def tab_subtitle(self) -> gradio.blocks.Block:
        with gr.Column() as blk:
            gr.Markdown("## 字幕窗口控制")

            checkbox_window = gr.Checkbox(label="字幕窗口", value=self.window_visible)

            def toggle_window(value: bool):
                if value:
                    self.open_window()
                else:
                    self.close_window()
                return gr.update(value=False, visible=value)

            checkbox_stt = gr.Checkbox(label="语音识别", visible=self.window_visible)

            def toggle_stt(value: bool):
                if value:
                    self.start_recognition()
                else:
                    self.stop_recognition()

            checkbox_stt.change(toggle_stt, inputs=[checkbox_stt])

            checkbox_window.change(
                toggle_window, inputs=[checkbox_window], outputs=[checkbox_stt]
            )

        return blk

    def tab_device_list(self) -> gradio.blocks.Block:
        with gr.Column() as blk:
            gr.Markdown("# 所有音频设备（输入与输出）")

            btn_refresh = gr.Button("刷新")

            def format_device_list():
                devices = TTSDeviced.list_devices()
                if devices:
                    cnt = len(devices)
                    index_maxlen = len(str(cnt))
                    text = "\n".join(
                        [
                            f"{str(i).zfill(index_maxlen)}: {name}"
                            for i, name in enumerate(devices)
                        ]
                    )
                else:
                    text = "无可用音频设备"
                    logger.warning("无可用音频设备")
                return text

            device_list = gr.Textbox(
                label="编号: 名称",
                value=format_device_list(),
            )

            btn_refresh.click(
                format_device_list,
                outputs=[device_list],
            )
        return blk

    def tab_about(self) -> gradio.blocks.Block:
        with gr.Column() as blk:
            gr.Markdown("## JsNev Repeater")
            gr.Markdown(f"版本 {VERSION}")

            gr.Markdown("---")

            gr.Markdown(
                """
                哔哩哔哩直播辅助软件。
                
                1. 监听直播弹幕流，筛选主播弹幕并 TTS 为音频输出。
                建议配合 RVC 实时变声器使用。
                为了简化开发，目前使用 pyttsx3，代价是只能输出纯正的机械音。

                2. 将麦克风内容 STT 到字幕窗口，从而防止观众错过部分词语。
                简化安装考虑，使用 pip 的 openai-whisper CPU 版，
                代价是 base 或以下级别效果非常差——
                尤其是在主播离麦克风较远时，它完全听不清楚，
                而 small 或以上级别则对 CPU 有一定压力。
                VS Code 的 Github Copilot 的语音识别同样是本地运行，效果却比这好得多。
                可能是用了更好的模型或者选择用 GPU 来 STT 了？
                虽然 VS Code 是开源的，但其源代码难度太高，我看不懂，
                不知道微软是怎么实现这么强大的 VS Code Speech 的。

                3. 获取音频设备列表。用于获取音频设备的 ID。输入输出全在一个表，注意不要选错了。
                """
            )
        return blk

    class Default:
        """默认配置"""

        DANMU_MAXLEN = 8

        BACKGORUND_COLOR = "#010101"
        LATEST_TEXT_COLOR = "#FFFFFF"
        PREVIOUS_TEXT_COLOR = "#F0F0F0"
        FONT_SIZE = 18
        FONT_FAMILY = "微软雅黑"


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--config",
        type=str,
        default=Default.CONFIG_PATH,
        help=f"配置文件路径，默认值 {Default.CONFIG_PATH}",
    )
    parser.add_argument(
        "--port",
        type=int,
        default=Default.PORT,
        help=f"Gradio 服务器端口，默认值 {Default.PORT}",
    )

    # 入口
    logging.basicConfig(level=logging.INFO)
    App().main()
