"""
角色扮演API
提供角色选择和对话的API接口，支持流式响应和知识库文件上传
支持自动生成角色提示词功能
"""

from fastapi import APIRouter, HTTPException, Depends, File, UploadFile, Form, WebSocket, WebSocketDisconnect
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel
from typing import Dict, Any, Optional, AsyncGenerator, List
import base64
import json
import asyncio
import os
import tempfile
import shutil
import logging

# 配置日志
logger = logging.getLogger(__name__)

from api.agent.role_chain_agent import RoleChainAgent
from api.models.llm_client import LLMClient
from api.models.knowledge_base import KnowledgeBase
from api.models.web_search import WebSearchTool
# 定义API模型
class RoleInfo(BaseModel):
    roleId: str
    voiceId: str
    roleName: str
    systemPrompt: str

class RoleChatRequest(BaseModel):
    message: Optional[str] = None
    role_id: Optional[str] = None
    session_id: Optional[str] = None
    messages: Optional[list] = []  # 添加messages参数，用于传递历史对话
    websh: Optional[bool] = False  # 添加websh参数，用于开启网络搜索
    
    # 新增参数格式支持
    roleInfo: Optional[RoleInfo] = None
    history: Optional[list] = None
    userInput: Optional[str] = None
    
class GeneratePromptRequest(BaseModel):
    role_name: str
    search_depth: int = 3  # 搜索深度，默认获取3条搜索结果
    language: str = "zh"  # 提示词语言，默认中文

class RoleResponse(BaseModel):
    id: str
    name: str
    description: str
    voice_id: str

class ChatResponse(BaseModel):
    success: bool
    role: Optional[Dict[str, Any]] = None
    text: Optional[str] = None
    audio: Optional[str] = None  # Base64编码的音频数据
    error: Optional[str] = None
    
class PromptGenerationResponse(BaseModel):
    success: bool
    role_name: str
    system_prompt: Optional[str] = None
    search_results: Optional[List[str]] = None
    error: Optional[str] = None

# 创建路由
router = APIRouter(prefix="/role-play", tags=["角色扮演"])

# 依赖注入
def get_role_play_agent():
    return RoleChainAgent()

def get_knowledge_base():
    return KnowledgeBase()

def get_web_search_tool():
    return WebSearchTool()

def get_llm_client():
    return LLMClient()

# 角色提示词自动生成功能
async def generate_role_prompt(role_name: str, search_depth: int = 3, language: str = "zh") -> Dict[str, Any]:
    """
    根据角色名自动生成角色提示词
    
    Args:
        role_name: 角色名称
        search_depth: 搜索深度，获取的搜索结果数量
        language: 提示词语言，默认中文
    
    Returns:
        包含生成结果的字典
    """
    try:
        # 初始化网络搜索工具和LLM客户端
        web_search = WebSearchTool()
        llm_client = LLMClient()
        
        # 构建搜索查询
        search_query = f"{role_name} 人物介绍 特点 背景"
        
        # 执行网络搜索
        logger.info(f"开始搜索角色信息: {search_query}")
        search_results = []
        
        async with web_search:
            # 使用百度搜索获取角色信息
            results = await web_search.search_baidu_ai(search_query, max_results=search_depth)
            logger.info(f"百度搜索的信息： {results} ")
            
            
            if results and len(results) > 0:
                for result in results:
                    if "content" in result and result["content"]:
                        search_results.append(result["content"])
        
        if not search_results:
            return {
                "success": False,
                "error": "未找到角色相关信息",
                "role_name": role_name
            }
        
        # 合并搜索结果
        combined_info = "\n\n".join(search_results)
        
        # 构建提示词生成的系统提示
        system_prompt = """
        你是一个专业的角色提示词生成器。根据提供的角色信息，生成一个详细的角色扮演系统提示词。
        提示词应该包含以下部分：
        1. 角色身份与背景
        2. 角色说话风格
        3. 角色回答方式
        4. 限制
        
        生成的提示词应该详细、具体，能够指导AI模型准确扮演该角色。
        直接输出提示词内容，不要包含额外的解释或格式。
        """
        
        # 构建用户提示
        lang_prompt = "用中文回答" if language == "zh" else "Answer in English"
        user_prompt = f"""
        根据以下关于"{role_name}"的信息，生成一个详细的角色扮演系统提示词：
        
        {combined_info}
        
        {lang_prompt}
        """
        
        # 调用LLM生成提示词
        logger.info(f"开始生成角色提示词: {role_name}")
        response = await llm_client.chat_completion(
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ],
            temperature=0.7
        )
        
        if not response or "choices" not in response:
            return {
                "success": False,
                "error": "生成提示词失败",
                "role_name": role_name,
                "search_results": search_results
            }
        
        # 提取生成的提示词
        generated_prompt = response["choices"][0]["message"]["content"]
        
        return {
            "success": True,
            "role_name": role_name,
            "search_results": search_results
        }
        
    except Exception as e:
        logger.error(f"生成角色提示词时发生错误: {str(e)}")
        return {
            "success": False,
            "error": str(e),
            "role_name": role_name
        }

@router.post("/generate-prompt", response_model=PromptGenerationResponse)
async def generate_prompt(
    request: GeneratePromptRequest,
    web_search: WebSearchTool = Depends(get_web_search_tool),
    llm_client: LLMClient = Depends(get_llm_client)
):
    """
    根据角色名自动生成角色提示词
    """
    result = await generate_role_prompt(
        role_name=request.role_name,
        search_depth=request.search_depth,
        language=request.language
    )
    return PromptGenerationResponse(**result)

@router.get("/roles", response_model=list[RoleResponse])
async def get_all_roles(agent: RoleChainAgent = Depends(get_role_play_agent)):
    """获取所有可用角色"""
    roles = agent.roles
    return roles

# 知识库相关API
class KnowledgeAddRequest(BaseModel):
    content: str
    role_id: Optional[str] = "general"

class KnowledgeSearchRequest(BaseModel):
    query: str
    role_id: Optional[str] = None
    top_k: Optional[int] = 5

class KnowledgeResponse(BaseModel):
    success: bool
    message: str
    data: Optional[List[Dict[str, Any]]] = None

@router.post("/knowledge/add", response_model=KnowledgeResponse)
async def add_knowledge(request: KnowledgeAddRequest, kb: KnowledgeBase = Depends(get_knowledge_base)):
    """添加知识到知识库"""
    try:
        success = kb.add_document(request.content, role_id=request.role_id)
        if success:
            return {"success": True, "message": "知识添加成功", "data": None}
        else:
            return {"success": False, "message": "知识添加失败", "data": None}
    except Exception as e:
        return {"success": False, "message": f"知识添加失败: {str(e)}", "data": None}

@router.post("/knowledge/search", response_model=KnowledgeResponse)
async def search_knowledge(request: KnowledgeSearchRequest, kb: KnowledgeBase = Depends(get_knowledge_base)):
    """搜索知识库"""
    try:
        results = kb.search(request.query, role_id=request.role_id, top_k=request.top_k)
        return {"success": True, "message": f"找到 {len(results)} 条结果", "data": results}
    except Exception as e:
        return {"success": False, "message": f"搜索失败: {str(e)}", "data": None}

@router.post("/knowledge/upload")
async def upload_knowledge_file(
    file: UploadFile = File(...),
    role_id: str = Form("general"),
    kb: KnowledgeBase = Depends(get_knowledge_base)
):
    """上传文件到知识库"""
    try:
        # 创建临时文件
        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as temp_file:
            # 写入上传的文件内容
            shutil.copyfileobj(file.file, temp_file)
            temp_path = temp_file.name
        
        # 上传文件到知识库
        success = kb.upload_file(temp_path, role_id=role_id, file_name=file.filename)
        
        # 删除临时文件
        os.unlink(temp_path)
        
        if success:
            return {"success": True, "message": f"文件 {file.filename} 上传成功", "role_id": role_id}
        else:
            return {"success": False, "message": f"文件 {file.filename} 上传失败", "role_id": role_id}
    except Exception as e:
        return {"success": False, "message": f"文件上传失败: {str(e)}", "role_id": role_id}

@router.get("/select/{role_id}", response_model=RoleResponse)
async def select_role(
    role_id: str,
    agent: RoleChainAgent = Depends(get_role_play_agent)
):
    """选择角色"""
    role = agent.select_role(role_id)
    if not role:
        raise HTTPException(status_code=404, detail=f"未找到ID为 {role_id} 的角色")
    return role

@router.post("/chat", response_model=ChatResponse)
async def chat_with_role(
    request: RoleChatRequest,
    agent: RoleChainAgent = Depends(get_role_play_agent)
):
    """与角色对话（支持多轮对话）"""
    # 如果提供了角色ID，先选择角色
    if request.role_id:
        role = agent.select_role(request.role_id)
        if not role:
            raise HTTPException(status_code=404, detail=f"未找到ID为{request.role_id}的角色")
    
    # 发送消息并获取回复，传入历史消息
    response = await agent.chat(request.message, messages=request.messages)
    
    if not response.get("success", False):
        raise HTTPException(status_code=400, detail=response.get("error", "未知错误"))
    
    return JSONResponse(content=response)

@router.post("/chat/stream")
async def chat_with_role_stream(
    request: RoleChatRequest,
    agent: RoleChainAgent = Depends(get_role_play_agent)
):
    """与角色进行流式对话，支持流式文本输出，支持多轮对话"""
    # 导入LangChain Agent
    from .agent.role_chain_agent import RoleChainAgent
    
    # 初始化LangChain Agent
    role_chain_agent = RoleChainAgent()
    
    # 处理新的参数格式
    message = request.message
    messages = request.messages
    roleInfo = request.roleInfo
    websh = request.websh
    
    # 如果使用新的参数格式
    if request.userInput is not None:
        message = request.userInput
    
    if request.history is not None:
        messages = request.history
    


    
    async def generate_stream():
        # 将历史消息转换为LangChain Agent需要的格式
        history_messages = []
        if messages:
            for msg in messages:
                if isinstance(msg, dict):
                    history_messages.append(msg)
    
        
        # 使用流式处理
        current_text = ""  # 初始化 current_text 变量
        async for text_chunk in role_chain_agent.process_custom_role(
            role_info=roleInfo,
            user_input=message,
            history=history_messages,
            websh=websh
        ):
            current_text += text_chunk
            # 构建响应
            response = {
                "success": True,
                "text": current_text,
                "is_final": False
            }
            # 将响应转换为JSON并发送
            yield f"data: {json.dumps(response)}\n\n"
        
        # 发送最终响应
        final_response = {
            "success": True,
            "text": current_text,
            "is_final": True
        }
        yield f"data: {json.dumps(final_response)}\n\n"
        
    
    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream"
    )

@router.websocket("/ws/voice")
async def chat_with_role_websocket(
    websocket: WebSocket,
    agent: RoleChainAgent = Depends(get_role_play_agent)
):
    """与角色进行WebSocket对话，支持流式文本输出和音频，支持多轮对话"""
    # 导入LangChain Agent
    from .agent.role_chain_agent import RoleChainAgent
    import asyncio
    import re
    
    # 初始化LangChain Agent
    role_chain_agent = RoleChainAgent()
    
    # 接受WebSocket连接
    await websocket.accept()
    logger.info("WebSocket连接已接受")
    
    try:
        
        
        # 接收客户端发送的请求数据
        request_data = await websocket.receive_json()
        logger.info(f"接收到客户端消息: {request_data}")
        
        # 解析请求数据
        text = request_data.get("text", "")
        vioiceID = request_data.get("vioiceID", [])

    
        
        try:
            # 导入ChatTTS客户端
            from api.models.chattts import ChatTTSClient
            
            # 初始化ChatTTS客户端
            chattts_client = ChatTTSClient()
        except Exception as e:
            logger.error(f"初始化ChatTTS客户端失败: {str(e)}")
            await websocket.send_json({
                "success": False,
                "error": str(e),
                "is_final": True
            })
            return

        

        try:
            # 清理文本，删除不必要的符号
            clean_sentence = chattts_client.clean_text(text)
            
            if clean_sentence:
                logger.info(f"生成语音文本: {clean_sentence}  音色：{voice_ID}")
                
                # 使用ChatTTS生成语音
                audio_path = await chattts_client.generate_speech_streaming(
                    text=clean_sentence,
                    voice=vioiceID
                )
                
                # 读取生成的音频文件
                with open(audio_path, "rb") as f:
                    audio_data = f.read()
                
                logger.info(f"成功使用ChatTTS生成语音，文件路径: {audio_path}")
                
                last_tts_text = current_text  # 更新已生成语音的文本
                logger.info(f"成功生成语音，长度: {len(audio_data)} 字节")
                
                # 构建包含音频的响应
                response = {
                    "success": True,
                    "text": current_text,
                    "is_final": False,
                    "highlight_position": len(last_tts_text),  # 添加高亮位置
                    "audio_hex": audio_data.hex(),  # 将二进制数据转为十六进制字符串
                    "audio": audio_data.hex()  # 保持兼容性
                }
                logger.info(f"添加音频数据到响应，十六进制长度: {len(response['audio_hex'])}")
                
                # 通过WebSocket发送包含音频的响应
                await websocket.send_json(response)
                
                # 清空句子缓冲区
                sentence_buffer = ""
        except Exception as e:
            logger.error(f"生成语音失败: {str(e)}")
        
        # 无论是否生成语音，都发送文本更新
        if not complete_sentence:
            # 只发送文本更新，不包含音频
            response = {
                "success": True,
                "text": current_text,
                "is_final": False,
                "highlight_position": len(last_tts_text)  # 添加高亮位置
            }
            
            # 通过WebSocket发送纯文本响应
            await websocket.send_json(response)
        
        # 添加短暂延迟，避免消息发送过快
        await asyncio.sleep(0.05)
        
    
        
        # 发送最终响应
        final_response = {
            "success": True,
            "text": current_text,
            "is_final": True,
            "highlight_position": len(current_text)
        }
        await websocket.send_json(final_response)
    
    except WebSocketDisconnect:
        logger.warning("WebSocket连接已断开")
    except Exception as e:
        logger.error(f"WebSocket处理过程中发生错误: {str(e)}")
        # 尝试发送错误消息
        try:
            await websocket.send_json({
                "success": False,
                "error": str(e),
                "is_final": True
            })
        except:
            pass