# main.py
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Dict, Any, Optional, Union
from enum import Enum
from fastapi.responses import StreamingResponse
from fastapi.encoders import jsonable_encoder
import json
import time
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.runnables import RunnableConfig

from graph import graph, persistent_memory

app = FastAPI()

# 添加CORS支持
app.add_middleware(
    CORSMiddleware,
    allow_origins=['*'],
    allow_credentials=True,
    allow_methods=['*'],
    allow_headers=['*'],
)

# LangGraph SQLite记忆存储已替代手动历史管理

def _convert_message_to_dict(message):
    """将LangChain消息转换为字典格式"""
    if hasattr(message, 'type'):
        msg_type = message.type
    elif isinstance(message, HumanMessage):
        msg_type = 'human'
    elif isinstance(message, AIMessage):
        msg_type = 'ai'
    elif isinstance(message, SystemMessage):
        msg_type = 'system'
    else:
        msg_type = 'unknown'
    
    # 确保content是正确的格式
    content = message.content if hasattr(message, 'content') else str(message)
    if isinstance(content, str):
        content = [{'type': 'text', 'text': content}]
    elif not isinstance(content, list):
        content = [{'type': 'text', 'text': str(content)}]
    
    return {
        'id': getattr(message, 'id', f'msg_{int(time.time() * 1000)}'),
        'type': msg_type,
        'content': content
    }

# LangGraph标准消息格式
class MessageContent(BaseModel):
    type: str = 'text'
    text: str

class Message(BaseModel):
    id: str
    type: str  # human, assistant, system
    content: List[MessageContent]

class LangGraphInput(BaseModel):
    messages: List[Message]

# LangGraph标准请求格式 - 严格要求
class LangGraphConfig(BaseModel):
    configurable: Optional[Dict[str, Any]] = None

class LangGraphRequest(BaseModel):
    input: LangGraphInput
    config: Optional[LangGraphConfig] = None
    stream_mode: Optional[List[str]] = None
    assistant_id: Optional[str] = None
    on_disconnect: Optional[str] = None

# 限制请求格式
class LimitRequest(BaseModel):
    limit: Optional[int] = 1000

# 服务信息接口
@app.get('/info')
async def get_info():
    return {
        'name': 'LangGraph Chatbot Agent',
        'version': '1.0.0',
        'status': 'running',
        'endpoints': {
            'chat': '/run_agent',
            'runs': '/runs/stream'
        }
    }

# 线程管理接口
@app.options('/threads')
@app.post('/threads')
@app.get('/threads')
async def manage_threads(request: Optional[LimitRequest] = None):
    return {
        'threads': [], 
        'message': 'Thread management not implemented',
        'limit': request.limit if request else 1000
    }

# 线程搜索接口
@app.options('/threads/search')
@app.get('/threads/search')
@app.post('/threads/search')
async def search_threads(request: Optional[LimitRequest] = None):
    return {
        'threads': [], 
        'message': 'Thread search not implemented',
        'limit': request.limit if request else 1000
    }

# 工具函数：规范化消息为 LangGraph SDK 期望的格式
# 已使用LangGraph原生流式处理，无需手动规范化消息

# 线程历史接口 - 严格LangGraph标准（返回规范化消息数组）
@app.options('/threads/{thread_id}/history')
@app.post('/threads/{thread_id}/history')
async def thread_history(thread_id: str, request: Optional[Union[LangGraphRequest, LimitRequest]] = None):
    try:
        # 兼容 SDK 会发送 {"limit": N} 的情况，此时不包含 input
        if request is None or isinstance(request, LimitRequest):
            limit = getattr(request, 'limit', 1000) if request else 1000
            print(f'Thread {thread_id} history limit-only request: limit={limit}')
            # 返回缓存的最近消息，避免前端被清空
            return THREAD_HISTORY.get(thread_id, [])

        # 以下为包含 input 的请求，按对话输入计算新结果
        messages = []
        for msg in request.input.messages:  # type: ignore[attr-defined]
            if msg.type == 'human':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('user', text_content))
            elif msg.type == 'assistant':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('assistant', text_content))
            elif msg.type == 'system':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('system', text_content))
        
        print(f'Thread {thread_id} history request messages:', messages)
        result = graph.invoke({'messages': messages})
        normalized = _normalize_messages_from_result(result)
        # 写入缓存
        THREAD_HISTORY[thread_id] = normalized
        # 直接返回消息数组，便于前端渲染
        return normalized
    except Exception as e:
        print(f'Error in thread_history: {e}')
        raise HTTPException(status_code=500, detail=str(e))

# 线程流式接口 - 严格LangGraph标准（SSE）
@app.options('/threads/{thread_id}/runs/stream')
@app.post('/threads/{thread_id}/runs/stream')
async def thread_runs_stream(thread_id: str, request: LangGraphRequest):
    try:
        print(f"=== 开始处理线程流式请求 {thread_id} ===")
        print(f"请求体: {request}")
        
        # 获取历史对话并构建完整上下文
        history = persistent_memory.get_conversation_history(thread_id)
        
        # 转换历史消息为LangChain格式
        langchain_messages = []
        for hist_msg in history:
            if hist_msg['type'] == 'user':
                langchain_messages.append(HumanMessage(content=hist_msg['content']))
            elif hist_msg['type'] == 'assistant':
                langchain_messages.append(AIMessage(content=hist_msg['content']))
        
        # 添加当前请求的消息
        for msg in request.input.messages:
            text_content = ''
            for content in msg.content:
                if content.type == 'text':
                    text_content += content.text
            
            if msg.type == 'human':
                langchain_messages.append(HumanMessage(content=text_content))
                # 保存用户消息到持久化存储
                persistent_memory.save_message(thread_id, 'user', text_content)
            elif msg.type == 'assistant':
                langchain_messages.append(AIMessage(content=text_content))
            elif msg.type == 'system':
                langchain_messages.append(SystemMessage(content=text_content))
        
        print(f'Thread {thread_id} 完整对话上下文 ({len(langchain_messages)} 条):', [type(msg).__name__ for msg in langchain_messages])
        
        # 使用LangGraph原生流式接口
        config = RunnableConfig(configurable={"thread_id": thread_id})
        print(f"✅ 使用LangGraph流式调用，配置: {config}")

        def event_generator():
            try:
                print("🚀 开始LangGraph流式处理...")
                
                # 使用LangGraph的astream方法进行流式处理
                for chunk in graph.stream({"messages": langchain_messages}, config=config):
                    print(f"📦 收到chunk: {chunk}")
                    
                    # 发送metadata事件
                    if 'metadata' not in locals():
                        metadata_event = {
                            'event': 'metadata',
                            'data': {
                                'run_id': f'run_{int(time.time() * 1000)}',
                                'thread_id': thread_id,
                            }
                        }
                        yield f"event: metadata\ndata: {json.dumps(metadata_event['data'], ensure_ascii=False)}\n\n"
                        metadata = True
                    
                    # 处理消息chunk
                    if 'agent' in chunk and 'messages' in chunk['agent']:
                        messages = chunk['agent']['messages']
                        print(f"🔄 处理消息chunk: {len(messages)} 条消息")
                        
                        # 保存AI回复到持久化存储
                        for msg in messages:
                            if isinstance(msg, AIMessage):
                                persistent_memory.save_message(thread_id, 'assistant', msg.content)
                                print(f"💾 保存AI回复到持久化存储")
                        
                        # 转换消息格式
                        converted_messages = [_convert_message_to_dict(msg) for msg in messages]
                        print(f"🔄 全局转换后的消息: {converted_messages}")
                        
                        # 构建values事件
                        values_event = {
                            'values': {
                                'messages': converted_messages
                            },
                            'metadata': {
                                'step': 1,
                                'source': 'loop',
                                'writes': None,
                                'thread_id': thread_id,
                            },
                            'next': [],
                        }
                        
                        print(f"🔧 全局构建的values事件: {values_event}")
                        
                        sse_data = f"event: values\ndata: {json.dumps(values_event, ensure_ascii=False)}\n\n"
                        print(f"📤 发送values事件")
                        yield sse_data
                
                # 发送结束事件
                end_data = f"event: end\ndata: {json.dumps({'event': 'end', 'type': 'end'})}\n\n"
                print(f"📤 发送end事件")
                yield end_data
                
                print("✅ LangGraph流式处理完成")
            except Exception as e:
                print(f"❌ LangGraph流式处理错误: {e}")
                import traceback
                traceback.print_exc()
                error_data = f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
                yield error_data
        
        print("返回StreamingResponse...")
        return StreamingResponse(
            event_generator(),
            media_type='text/event-stream',
            headers={
                'Cache-Control': 'no-cache, no-transform',
                'Connection': 'keep-alive',
                'Access-Control-Allow-Origin': '*',
                'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
                'Access-Control-Allow-Headers': 'Content-Type, Authorization',
            }
        )
    except Exception as e:
        print(f'Error in thread_runs_stream: {e}')
        import traceback
        print(f'Traceback: {traceback.format_exc()}')
        raise HTTPException(status_code=500, detail=str(e))

# 标准LangGraph接口 - 严格LangGraph标准
@app.post('/run_agent')
async def run_agent(request: LangGraphRequest):
    try:
        messages = []
        for msg in request.input.messages:
            if msg.type == 'human':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('user', text_content))
            elif msg.type == 'assistant':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('assistant', text_content))
            elif msg.type == 'system':
                text_content = ''
                for content in msg.content:
                    if content.type == 'text':
                        text_content += content.text
                messages.append(('system', text_content))
        
        print('LangGraph request messages:', messages)
        result = graph.invoke({'messages': messages})
        
        return {
            'response': result,
            'assistant_id': request.assistant_id or 'chatbot',
            'status': 'completed'
        }
    except Exception as e:
        print(f'Error in run_agent: {e}')
        raise HTTPException(status_code=500, detail=str(e))

# 流式接口 - 严格LangGraph标准（SSE）
@app.options('/runs/stream')
@app.post('/runs/stream')
async def runs_stream(request: LangGraphRequest):
    try:
        print(f"=== 开始处理全局流式请求 ===")
        print(f"请求体: {request}")
        
        # 从config中获取thread_id，如果没有则生成一个
        thread_id = None
        if request.config and request.config.configurable:
            thread_id = request.config.configurable.get('thread_id')
        
        if not thread_id:
            thread_id = f"thread_{int(time.time() * 1000)}"
            
        print(f"✅ 使用thread_id: {thread_id}")
        print(f"📋 请求config: {request.config}")
        
        # 获取历史对话并构建完整上下文
        history = persistent_memory.get_conversation_history(thread_id)
        
        # 转换历史消息为LangChain格式
        langchain_messages = []
        for hist_msg in history:
            if hist_msg['type'] == 'user':
                langchain_messages.append(HumanMessage(content=hist_msg['content']))
            elif hist_msg['type'] == 'assistant':
                langchain_messages.append(AIMessage(content=hist_msg['content']))
        
        # 添加当前请求的消息
        for msg in request.input.messages:
            text_content = ''
            for content in msg.content:
                if content.type == 'text':
                    text_content += content.text
            
            if msg.type == 'human':
                langchain_messages.append(HumanMessage(content=text_content))
                # 保存用户消息到持久化存储
                persistent_memory.save_message(thread_id, 'user', text_content)
            elif msg.type == 'assistant':
                langchain_messages.append(AIMessage(content=text_content))
            elif msg.type == 'system':
                langchain_messages.append(SystemMessage(content=text_content))
        
        print(f'全局流式请求完整对话上下文 ({len(langchain_messages)} 条):', [type(msg).__name__ for msg in langchain_messages])
        
        # 使用LangGraph原生流式接口
        config = RunnableConfig(configurable={"thread_id": thread_id})
        print(f"✅ 使用LangGraph流式调用，配置: {config}")

        def event_generator():
            try:
                print("🚀 开始LangGraph流式处理...")
                
                # 使用LangGraph的stream方法进行流式处理
                chunk_count = 0
                for chunk in graph.stream({"messages": langchain_messages}, config=config):
                    chunk_count += 1
                    print(f"📦 收到chunk #{chunk_count}: {chunk}")
                    print(f"🔍 chunk类型: {type(chunk)}, 键: {list(chunk.keys()) if isinstance(chunk, dict) else 'N/A'}")
                    
                    # 发送metadata事件
                    if 'metadata' not in locals():
                        metadata_event = {
                            'event': 'metadata',
                            'data': {
                                'run_id': f'run_{int(time.time() * 1000)}',
                                'thread_id': thread_id,
                            }
                        }
                        yield f"event: metadata\ndata: {json.dumps(metadata_event['data'], ensure_ascii=False)}\n\n"
                        metadata = True
                    
                    # 处理消息chunk
                    if 'agent' in chunk and 'messages' in chunk['agent']:
                        messages = chunk['agent']['messages']
                        print(f"🔄 处理消息chunk: {len(messages)} 条消息")
                        print(f"📋 消息详情: {[type(msg).__name__ for msg in messages]}")
                        
                        # 保存AI回复到持久化存储
                        for msg in messages:
                            print(f"📝 检查消息: {type(msg).__name__} - {getattr(msg, 'content', 'no content')}")
                            if isinstance(msg, AIMessage):
                                persistent_memory.save_message(thread_id, 'assistant', msg.content)
                                print(f"💾 保存AI回复到持久化存储: {msg.content}")
                        
                        # 转换消息格式
                        converted_messages = [_convert_message_to_dict(msg) for msg in messages]
                        print(f"🔄 转换后的消息: {converted_messages}")
                        
                        # 构建values事件
                        values_event = {
                            'values': {
                                'messages': converted_messages
                            },
                            'metadata': {
                                'step': 1,
                                'source': 'loop',
                                'writes': None,
                                'thread_id': thread_id,
                            },
                            'next': [],
                        }
                        
                        print(f"🔧 构建的values事件: {values_event}")
                        
                        sse_data = f"event: values\ndata: {json.dumps(values_event, ensure_ascii=False)}\n\n"
                        print(f"📤 发送values事件: {sse_data[:200]}...")
                        yield sse_data
                
                # 发送结束事件
                end_data = f"event: end\ndata: {json.dumps({'event': 'end', 'type': 'end'})}\n\n"
                print(f"📤 发送end事件")
                yield end_data
                
                print("✅ LangGraph流式处理完成")
            except Exception as e:
                print(f"❌ LangGraph流式处理错误: {e}")
                import traceback
                traceback.print_exc()
                error_data = f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
                yield error_data

        print("返回StreamingResponse...")
        return StreamingResponse(
            event_generator(),
            media_type='text/event-stream',
            headers={
                'Cache-Control': 'no-cache, no-transform',
                'Connection': 'keep-alive',
                'Access-Control-Allow-Origin': '*',
                'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
                'Access-Control-Allow-Headers': 'Content-Type, Authorization',
            }
        )
    except Exception as e:
        print(f'Error in runs_stream: {e}')
        import traceback
        print(f'Traceback: {traceback.format_exc()}')
        raise HTTPException(status_code=500, detail=str(e))

# 获取记忆数据库信息
@app.get('/memory/info')
async def get_memory_info():
    """获取记忆数据库信息"""
    try:
        import os
        db_path = persistent_memory.db_path
        db_exists = os.path.exists(db_path)
        db_size = os.path.getsize(db_path) if db_exists else 0
        
        return {
            "database_path": db_path,
            "database_exists": db_exists,
            "database_size_bytes": db_size,
            "storage_type": "Custom SQLite"
        }
    except Exception as e:
        print(f'Error in get_memory_info: {e}')
        raise HTTPException(status_code=500, detail=str(e))

# 获取指定线程的对话历史
@app.get('/threads/{thread_id}/history')
async def get_thread_history_api(thread_id: str):
    """获取指定线程的对话历史"""
    try:
        history = persistent_memory.get_conversation_history(thread_id)
        return {
            "thread_id": thread_id,
            "messages": history,
            "count": len(history)
        }
    except Exception as e:
        print(f'Error in get_thread_history_api: {e}')
        raise HTTPException(status_code=500, detail=str(e))

# 清空指定线程的历史
@app.delete('/threads/{thread_id}/history')
async def clear_thread_history_api(thread_id: str):
    """清空指定线程的对话历史"""
    try:
        persistent_memory.clear_conversation(thread_id)
        return {"thread_id": thread_id, "message": "对话历史已清空"}
    except Exception as e:
        print(f'Error in clear_thread_history_api: {e}')
        raise HTTPException(status_code=500, detail=str(e))

# 清空整个记忆数据库
@app.delete('/memory/clear')
async def clear_all_memory():
    """清空整个记忆数据库"""
    try:
        import os
        db_path = persistent_memory.db_path
        if os.path.exists(db_path):
            os.remove(db_path)
            # 重新初始化数据库
            persistent_memory.init_database()
            return {"message": "记忆数据库已清空并重新初始化", "database_path": db_path}
        else:
            return {"message": "记忆数据库不存在", "database_path": db_path}
    except Exception as e:
        print(f'Error in clear_all_memory: {e}')
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app, host='0.0.0.0', port=2024, reload=True)
