# services/chat_service.py
import asyncio
import json
import random
import re
import time
import wave
from io import BytesIO

import numpy as np

from crud.database import Database
from model.llm_api import LangChainModel
from common.request_class import BaseChat,ChatAndSpeechRequest
from config import SYSTEM_PROMPT_MAP, MODEL_NAME, CHARACTER_MAP,AUDIO_CONFIG_MAP
from model.voice_v2pro1.local_api import pack_audio


class ChatService:
    def __init__(self, db: Database, model: LangChainModel,tts_model_manager,connection_manager):
        self.db = db
        self.model = model
        self.tts_model_manager = tts_model_manager
        self.connection_manager = connection_manager

    def get_chat_list(self) -> list:
        """获取聊天列表并处理角色名称映射"""
        data = []
        result = self.db.select_chat_list()
        for item in result:
            data.append({
                "id": item["id"],
                "characterName": CHARACTER_MAP.get(str(item["character_id"])),
                "modelName": item["model_name"],
                "createTime": item["create_time"]
            })
        return data

    def get_character_list(self) -> list:
        """获取角色信息列表"""
        from config import CHARACTER_INFO_LIST
        return CHARACTER_INFO_LIST

    async def chat_stream(self, request: BaseChat):
        """处理聊天流的核心逻辑"""
        chat_id = request.chat_id

        # 1. 获取系统提示词
        system_prompt = SYSTEM_PROMPT_MAP.get(request.character_id)

        # 2. 获取或创建会话历史
        message_list = self.db.select_chat_detail(chat_id)
        if not message_list:
            # 首次对话，创建新会话
            self.db.insert_session(chat_id, request.character_id, request.message, system_prompt, MODEL_NAME)
            message_list = [{"id": 1, "chat_id": chat_id, "message_type": "system", "content": system_prompt}]

        # 3. 统一保存用户消息
        self.db.update_session_info(chat_id, "user", request.message)

        # 4. 流式生成器 (消除了if/else中的代码重复)
        full_response = ""
        # 注意：此处应传入更新后的完整历史记录
        current_history = self.db.select_chat_detail(chat_id)
        async for chunk_content in self.model.chat(current_history, request.message):
            if chunk_content:  # 确保块不为空
                full_response += chunk_content
                yield chunk_content

        # 5. 流结束后，保存完整的助手回复
        self.db.update_session_info(chat_id, 'assistant', full_response)

    async def stream_chat_and_speech(self, request: BaseChat):
        """
        核心业务方法：同时处理文本和音频流。
        这个方法本身是一个异步生成器，用于生成SSE文本流。
        音频流则通过WebSocket并行发送。
        """
        # 创建一个队列，用于在LLM文本流和TTS消费者之间传递完整的句子
        sentence_queue_for_tts = asyncio.Queue()

        #获取基本参数
        chat_id = request.chat_id

        audio_config = AUDIO_CONFIG_MAP[request.character_id]





        # 1. 定义TTS消费者 (作为内部函数，可以访问request和self)
        async def tts_consumer():

            # --- 修改点 1: 在主协程中获取当前正在运行的事件循环 ---
            main_loop = asyncio.get_running_loop()

            while True:
                sentence = await sentence_queue_for_tts.get()
                if sentence is None: break

                try:
                    tts_model_instance = self.tts_model_manager.get_tts_model(request.character_id)


                    tts_params = {
                        "text": sentence,
                        "text_lang": "zh",
                        "ref_audio_path": audio_config["audio_path"],
                        "prompt_text": audio_config["prompt_text"],
                        "prompt_lang": "zh",
                        "text_split_method": "cut5",
                        "save_path": "1.wav"
                    }

                    tts_generator = tts_model_instance.tts_handle(**tts_params)



                    def run_tts_in_thread(loop):

                        sample_rate = None
                        try:

                            for sr, audio_chunk in tts_generator:
                                # 现在使用从参数传进来的 loop 对象，这是线程安全的
                                if sample_rate is None:
                                    sample_rate = sr
                                else:
                                    assert sample_rate == sr, "音频采样率不一致"
                                audio_data = audio_chunk.tobytes()

                                asyncio.run_coroutine_threadsafe(
                                    self.connection_manager.send_audio_chunk(self.connection_manager.client_id, audio_data),
                                    loop  # <-- 使用传进来的 loop
                                )
                        except Exception as e:
                            print(f"TTS处理出错: {e}")
                        # if sample_rate:
                        #     try:
                        #         audio_chunk = pack_audio(BytesIO(), audio_data, sample_rate, media_type="wav").getvalue()
                        #
                        #         file_path_to_save = f"1.wav"
                        #
                        #         with open(file_path_to_save, "wb") as f:
                        #             f.write(audio_chunk)
                        #
                        #
                        #         print(f"音频文件已成功保存到: {file_path_to_save}")
                        #
                        #     except Exception as e:
                        #         print(f"保存音频文件 {file_path_to_save} 时出错: {e}")

                    # --- 修改点 3: 将获取到的 main_loop 传给 to_thread ---
                    await asyncio.to_thread(run_tts_in_thread, main_loop)

                except Exception as e:
                    print(f"TTS处理出错: {e}")

        # 2. 启动TTS消费者任务，在后台运行
        consumer_task = asyncio.create_task(tts_consumer())

        # 3. 主流程：LLM文本生成器 (生产者)
        buffer = ""
        sentence_delimiters = re.compile(r'([。！,，？.!?\n])')
        non_speech_pattern = re.compile(r"（.*?）")
        full_response = ""
        system_prompt = SYSTEM_PROMPT_MAP.get(request.character_id)

        try:
            # 获取聊天历史
            message_list = self.db.select_chat_detail(request.chat_id) or []
            if not message_list:
                # 首次对话，创建新会话
                self.db.insert_session(chat_id, request.character_id, request.message, system_prompt, MODEL_NAME)
                message_list = [{"id": 1, "chat_id": chat_id, "message_type": "system", "content": system_prompt}]
            self.db.update_session_info(request.chat_id, "user", request.message)

            llm_stream = self.model.chat(message_list, request.message)

            async for text_chunk in llm_stream:
                if text_chunk:  # 确保块不为空
                    full_response += text_chunk
                    yield text_chunk

                # b) 缓冲文本并切分句子
                buffer += text_chunk
                while True:
                    match = sentence_delimiters.search(buffer)
                    if match:
                        sentence = buffer[:match.end()].strip()
                        buffer = buffer[match.end():]
                        cleaned_sentence = non_speech_pattern.sub("", sentence).strip()

                        if cleaned_sentence:  # 确保清洗后的句子不为空

                            await sentence_queue_for_tts.put(cleaned_sentence)

                    else:
                        break

            if buffer.strip():
                final_sentence = buffer.strip()
                cleaned_sentence = non_speech_pattern.sub("", final_sentence).strip()
                if cleaned_sentence:

                    await sentence_queue_for_tts.put(cleaned_sentence)

        finally:
            # d) LLM流结束后，发送结束信号并等待消费者完成
            await sentence_queue_for_tts.put(None)
            self.db.update_session_info(chat_id, 'assistant', full_response)
            await consumer_task






    def get_chat_history(self, chat_id: str):
        """获取聊天记录"""
        return self.db.select_chat_detail(chat_id)

    def delete_chat(self, chat_id: str):
        """删除聊天"""
        return self.db.delete_chat(chat_id)