import time
import threading
from fastapi import APIRouter, HTTPException
# from util import pyttsX
from data import use_sound, use_faster_whisper
import config
from api import svc_api, digital_human
from db.Do import BaseReq, we_library

router = APIRouter()


# 线程安全的状态管理
class ListenerState:
    def __init__(self):
        self.lock = threading.Lock()
        self.thread = None
        self.stop_event = threading.Event()
        self.current_role = None
        self.conversation_timeout = 180  # 对话超时时间(秒)

    def start(self, role):
        with self.lock:
            if self.thread and self.thread.is_alive():
                self.stop_event.set()
                self.thread.join(timeout=1.0)

            self.stop_event.clear()
            self.current_role = role
            self.thread = threading.Thread(target=self.audio_listener)
            self.thread.daemon = True
            self.thread.start()

    def stop(self):
        with self.lock:
            if not (self.thread and self.thread.is_alive()):
                return False

            self.stop_event.set()
            self.thread.join(timeout=2.0)
            return not self.thread.is_alive()

    @property
    def status(self):
        with self.lock:
            return "running" if self.thread and self.thread.is_alive() else "stopped"

    def audio_listener(self):
        """优化的音频监听核心逻辑"""
        id_counter = 0
        wake_words = {"小C", "小夕", "小溪", "小西", "小希", "小心"}

        while not self.stop_event.is_set():
            try:
                # 1. 语音唤醒检测
                audio_data = use_sound.recording(1)
                if self.stop_event.is_set():
                    break

                if not use_sound.is_speak(audio_data):
                    time.sleep(0.1)
                    continue

                # 2. 语音识别
                recognized_text = use_faster_whisper.transcription(audio_data, "zh")
                print(f"检测到语音: {recognized_text}")

                # 3. 精准唤醒词检测 (首词匹配)
                first_word = recognized_text.strip().split()[0] if recognized_text else ""
                if first_word not in wake_words:
                    continue

                print(f"唤醒词[{first_word}]触发，进入对话模式")
                # 播放唤醒响应
                access_url_path = config.ROOT_DIR_WIN / config.source_audios_dir / "在呢.wav"
                use_sound.play_audio(access_url_path)

                # 4. 对话处理循环
                last_active = time.time()
                while not self.stop_event.is_set():
                    # 超时检查
                    if time.time() - last_active > self.conversation_timeout:
                        print("对话超时，返回唤醒状态")
                        break

                    # 录制用户语音
                    audio_data = use_sound.recording(5)
                    if self.stop_event.is_set():
                        break

                    # 语音活动检测
                    if not use_sound.is_speak(audio_data):
                        time.sleep(0.3)
                        continue

                    # 更新最后活动时间
                    last_active = time.time()

                    # 语音识别
                    user_input = use_faster_whisper.transcription(audio_data, "zh")
                    print(f"用户输入: {user_input}")

                    # 5. 调用大模型
                    req_data = {
                        "currentRole": self.current_role,
                        "id": id_counter,
                        "prompt": user_input
                    }
                    try:
                        response = digital_human.chat(BaseReq(**req_data))
                        model_response = response.get('resp', "抱歉，我没有理解您的意思")
                        print(f"模型回复: {model_response}")
                        id_counter = response.get('id', id_counter)
                    except Exception as e:
                        print(f"大模型调用失败: {e}")
                        model_response = "网络连接不稳定，请稍后再试"

                    # 6. 语音合成
                    try:
                        role_info = we_library.fetch_one(
                            "SELECT role_sound_ray FROM chat_role WHERE id=?;",
                            (self.current_role,)
                        )
                        ref_wav_path = config.ROOT_DIR_WIN / config.source_audios_dir / role_info.get("role_sound_ray")

                        tts_req_data = {
                            "ref_wav_path": str(ref_wav_path),
                            "text": model_response,
                            "prompt_text": role_info.get("role_sound_ray", "")
                        }
                        tts_response = svc_api.tts_endpoint(BaseReq(**tts_req_data))
                        audio_url = tts_response.get('vitsV4Url')

                        if audio_url:
                            use_sound.play_audio(audio_url)  # 非阻塞播放
                        else:
                            print("TTS服务返回空音频URL")
                    except Exception as e:
                        print(f"语音合成失败: {e}")

            except KeyboardInterrupt:
                break
            except Exception as e:
                print(f"监听异常: {e}")
                time.sleep(1)


# 全局状态实例
listener_state = ListenerState()

from data import use_faster_whisper


@router.post("/real_start")
def start_listener(req: BaseReq):
    if not req.currentRole or req.currentRole == "请选择角色":
        raise HTTPException(status_code=500, detail="请选择有效角色")
    use_faster_whisper.init_model()
    listener_state.start(req.currentRole)
    return {"status": "success", "message": "音频监听已启动"}


@router.get("/real_stop")
def stop_listener():
    if listener_state.stop():
        return {"status": "success", "message": "音频监听已停止"}
    return {"status": "error", "message": "停止监听失败或未运行"}


@router.get("/real_status")
def get_status():
    return {
        "status": listener_state.status,
        "current_role": listener_state.current_role,
        "message": "监听正在运行" if listener_state.status == "running" else "监听已停止"
    }