from tools.chat_tools import get_chat_tools
from models.json_response import JsonData
from typing import List, AsyncIterator, Dict, AsyncGenerator
from langchain.agents import create_openai_functions_agent, AgentExecutor
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder 
from langchain.tools import Tool
from core.llm import get_default_llm
import asyncio
from service.chat_service import ChatService
import logging
import os
from docx2txt import process
import PyPDF2
import io

logger = logging.getLogger(__name__)

def create_chat_agent(tools: List[Tool]):
    system_prompt = """你是一个教学助手，你可以：
    1. 进行日常对话和问答
    2. 使用搜索工具获取最新消息
    3. 记住与用户的对话历史
    4. 主要根据资料回答
    请保持专业回答，友好且准确，如果用户问题有最新信息，请使用搜索工具，请结合资料回答。"""
    prompt = ChatPromptTemplate.from_messages([
        ("system", system_prompt),
        ("system", "以下是之前的对话摘要：{summary}"),
        ("system", "以下是用户问题的相关资料：{information}"),
        ("human", "{input}"),
        MessagesPlaceholder(variable_name="agent_scratchpad")
    ])
    llm = get_default_llm()
    agent = create_openai_functions_agent(
        llm=llm,
        tools=tools,
        prompt=prompt,
        )
    agent_executor = AgentExecutor.from_agent_and_tools(
        agent=agent,
        tools=tools,
        verbose=False,  # 减少日志输出
        max_iterations=2,  # 减少最大迭代次数
        handle_parsing_errors=True,
        )
    return agent_executor

async def read_file_content(file_path: str) -> str:
    """
    读取文件内容，支持多种文件格式
    
    Args:
        file_path: 文件路径
        
    Returns:
        str: 文件内容
    """
    try:
        if not os.path.exists(file_path):
            return f"文件不存在: {file_path}"
        
        file_extension = os.path.splitext(file_path)[1].lower()
        
        if file_extension in ['.txt', '.md', '.py', '.js', '.html', '.css', '.json']:
            # 文本文件
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        
        elif file_extension == '.docx':
            # Word文档
            return process(file_path)
        
        elif file_extension == '.pdf':
            # PDF文件
            text = ""
            with open(file_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                for page in pdf_reader.pages:
                    text += page.extract_text() + "\n"
            return text
        
        else:
            return f"不支持的文件格式: {file_extension}"
            
    except Exception as e:
        logger.error(f"读取文件失败 {file_path}: {e}")
        return f"读取文件失败: {str(e)}"

async def get_files_content(files: List[dict]) -> str:
    """
    获取所有文件的内容
    
    Args:
        files: 文件信息列表
        
    Returns:
        str: 所有文件内容的组合
    """
    if not files:
        return ""
    
    file_contents = []
    upload_dir = "./file"
    
    for file_info in files:
        file_name = file_info.get('name', '')
        file_path = os.path.join(upload_dir, file_name)
        
        content = await read_file_content(file_path)
        if content and not content.startswith("文件不存在") and not content.startswith("不支持的文件格式"):
            file_contents.append(f"=== 文件: {file_name} ===\n{content}\n")
        else:
            file_contents.append(f"=== 文件: {file_name} ===\n{content}\n")
    
    return "\n".join(file_contents)

# 简化的聊天函数，直接调用LLM
async def simple_chat_with_llm(
    chat_service: ChatService,
    account_id: str,
    input_text: str,
    course_id: str,
    files: List[dict] = None):
    try:
        llm = get_default_llm()
        
        # 获取文件内容
        file_content = ""
        if files and len(files) > 0:
            file_names = [f.get('name', '未知文件') for f in files]
            file_content = await get_files_content(files)
            
            if file_content:
                prompt = f"""你是一个教学助手。用户上传了以下文件：{', '.join(file_names)}

文件内容如下：
{file_content}

用户问题：{input_text}

请根据文件内容回答用户的问题。如果文件内容与问题相关，请详细分析文件内容并给出专业回答。如果文件内容与问题无关，请直接回答用户的问题。

请保持专业、友好且准确的回答。"""
            else:
                prompt = f"""你是一个教学助手。用户上传了文件：{', '.join(file_names)}，但无法读取文件内容。

用户问题：{input_text}

请直接回答用户的问题，并说明无法读取文件内容的原因。

请保持专业、友好且准确的回答。"""
        else:
            prompt = f"""你是一个教学助手。请回答用户的问题：{input_text}
            
            请保持专业、友好且准确的回答。"""
        
        response = await llm.ainvoke(prompt)
        return response.content
        
    except Exception as e:
        logger.error(f"简单聊天失败: {e}")
        return "抱歉，我现在无法回答您的问题，请稍后再试。"

async def chat_with_agent(
    agent_executor: AgentExecutor,
    chat_service: ChatService,
    account_id: str,
    input_text: str,
    course_id: str):
    try:
        # 异步并行获取摘要和信息
        summary_task = asyncio.create_task(chat_service.generate_summary(account_id))
        information_task = asyncio.create_task(chat_service.get_information_async(input_text, course_id))
        
        summary, information = await asyncio.gather(summary_task, information_task)
        
        async for chunk in agent_executor.astream({"input": input_text, "summary": summary, "information": information}):
            if "output" in chunk:
                response = chunk["output"]
                
                # 异步保存聊天记录，不阻塞响应
                asyncio.create_task(chat_service.save_chat_message_async(account_id, input_text, response))
                
                # 减少token延迟，提高响应速度
                for token in response:
                    yield token
                    await asyncio.sleep(0.05) # 减少延迟到5ms
    except Exception as e:
        logger.error(e)
        
async def generate_stream_response(
    chat_service: ChatService,
    account_id: str,
    message: str,
    course_id: str,
    files: List[dict] = None) -> AsyncIterator:
    
    # 使用简化的聊天函数
    try:
        response = await simple_chat_with_llm(chat_service, account_id, message, course_id, files)
        
        # 模拟流式输出
        current_chunk = ""
        for token in response:
            current_chunk += token
            if token in ["\n", "\r\n", "。", ".", "，", "！", "？"] or len(current_chunk) > 10:
                response_data = JsonData.stream_data(data=current_chunk)
                yield f"data: {response_data.model_dump_json()}\n\n"
                current_chunk = ""
                await asyncio.sleep(0.02)  # 更快的响应
            
        if current_chunk:
            response_data = JsonData.stream_data(data=current_chunk)
            yield f"data: {response_data.model_dump_json()}\n\n"
            
    except Exception as e:
        logger.error(f"生成流式响应失败: {e}")
        error_data = JsonData.stream_data(data="抱歉，服务暂时不可用，请稍后再试。")
        yield f"data: {error_data.model_dump_json()}\n\n"

