import logging
from typing import Optional
import asyncio

from models.chat_models import ChatRequest, ChatResponse, AnimationType
from services.dashscope_service import DashScopeService
from backend.config.poets_knowledge import get_poet_system_prompt, get_poet_knowledge

logger = logging.getLogger(__name__)

class ChatService:
    """聊天服务类，整合AI对话和语音合成"""
    
    def __init__(self):
        """初始化聊天服务"""
        self.dashscope_service = DashScopeService()
        self.current_poet = "liqingzhao"  # 设置李清照为默认诗人
        self.system_prompt = self._get_system_prompt()
    
    def _get_system_prompt(self) -> str:
        """获取系统提示词"""
        if self.current_poet:
            return get_poet_system_prompt(self.current_poet)
        
        return """你是一个友好、智能的AI助手，正在为Vision Pro的沉浸式3D环境提供服务。
你的特点：
1. 性格友好、热情，善于交流
2. 回答简洁明了，通常控制在50字以内
3. 能够理解用户的情绪并给出合适的回应
4. 喜欢使用生动的表达方式
5. 会根据对话内容表现出不同的情绪（开心、惊讶、思考等）

请用自然、亲切的语气与用户对话，让用户感受到在3D世界中与真实朋友交流的体验。"""
    
    def set_poet(self, poet_id: str):
        """设置当前诗人角色"""
        self.current_poet = poet_id
        self.system_prompt = self._get_system_prompt()
        logger.info(f"切换到诗人角色: {poet_id}")
    
    def get_current_poet(self) -> Optional[str]:
        """获取当前诗人角色"""
        return self.current_poet
    
    async def process_chat(self, request: ChatRequest) -> ChatResponse:
        """
        处理聊天请求
        
        Args:
            request: 聊天请求
        
        Returns:
            ChatResponse: 聊天响应
        """
        try:
            logger.info(f"处理聊天请求: {request.message}")
            
            # 1. 生成AI回复
            reply_text = await self.dashscope_service.generate_response(
                message=request.message,
                system_prompt=self.system_prompt
            )
            
            # 2. 分析情绪和推荐动作
            emotion, animation = self.dashscope_service.analyze_emotion_and_action(reply_text)
            
            # 3. 生成语音（异步执行以提高响应速度）
            audio_url = None
            duration = None
            timestamps = None
            audio_error = None
            
            try:
                # 获取当前诗人ID作为voice参数
                current_poet = self.get_current_poet()
                audio_url, duration, timestamps = await self.dashscope_service.synthesize_speech(
                    reply_text,
                    voice=current_poet  # 传入当前诗人ID作为voice参数
                )
                logger.info("语音合成成功")
            except Exception as e:
                audio_error = str(e)
                logger.warning(f"语音合成失败，将返回纯文本回复: {audio_error}")
            
            # 4. 构建响应
            response = ChatResponse(
                reply=reply_text,
                audio_url=audio_url,
                animation=AnimationType(animation),
                emotion=emotion,
                duration=duration,
                timestamps=timestamps
            )
            
            # 如果音频生成失败，在日志中记录但不影响主要功能
            if audio_error:
                logger.info(f"聊天处理完成（无音频）: {response.reply[:50]}...")
            else:
                logger.info(f"聊天处理完成（含音频）: {response.reply[:50]}...")
            
            return response
            
        except Exception as e:
            logger.error(f"聊天处理失败: {str(e)}")
            # 返回错误回复
            return ChatResponse(
                reply="抱歉，我现在有点困惑，请稍后再试。",
                animation=AnimationType.THINKING,
                emotion="confused"
            )
    
    async def process_chat_stream(self, request: ChatRequest):
        """
        流式处理聊天请求（用于实时响应）
        
        Args:
            request: 聊天请求
        
        Yields:
            dict: 流式响应数据
        """
        try:
            # 先返回开始处理的状态
            yield {
                "type": "start",
                "message": "正在思考中...",
                "animation": "thinking"
            }
            
            # 生成回复
            reply_text = await self.dashscope_service.generate_response(
                message=request.message,
                system_prompt=self.system_prompt
            )
            
            # 分析情绪和动作
            emotion, animation = self.dashscope_service.analyze_emotion_and_action(reply_text)
            
            # 返回文本回复
            yield {
                "type": "text",
                "reply": reply_text,
                "emotion": emotion,
                "animation": animation
            }
            
            # 异步生成语音
            try:
                # 获取当前诗人ID作为voice参数
                current_poet = self.get_current_poet()
                audio_url, duration, timestamps = await self.dashscope_service.synthesize_speech(
                    reply_text,
                    voice=current_poet  # 传入当前诗人ID作为voice参数
                )
                yield {
                    "type": "audio",
                    "audio_url": audio_url,
                    "duration": duration,
                    "timestamps": timestamps
                }
            except Exception as e:
                logger.warning(f"语音合成失败: {str(e)}")
                yield {
                    "type": "error",
                    "message": "语音合成失败"
                }
            
            # 完成
            yield {
                "type": "complete"
            }
            
        except Exception as e:
            logger.error(f"流式聊天处理失败: {str(e)}")
            yield {
                "type": "error",
                "message": f"处理失败: {str(e)}"
            }