"""
LangChain集成API端点
提供多模型切换、智能体功能和工具调用
"""
import json
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, Depends, HTTPException, status, Body, Query
from fastapi.responses import StreamingResponse
from sqlalchemy.ext.asyncio import AsyncSession

from app.api.deps import get_current_active_user, get_db
from app.models.user import User
from app.services.ai_service import (
    AIService, ai_service,
    AIModelConfig, ChatMessage,
    DEFAULT_TOOLS
)
from app.core.logging import get_logger

logger = get_logger(__name__)

router = APIRouter()


# 数据模型定义
class ChatRequest:
    """聊天请求模型"""
    def __init__(self, **data):
        self.messages = data.get('messages', [])
        self.provider = data.get('provider', 'openai')
        self.model_name = data.get('model_name', 'gpt-3.5-turbo')
        self.temperature = data.get('temperature', 0.7)
        self.max_tokens = data.get('max_tokens')
        self.stream = data.get('stream', False)
        self.conversation_id = data.get('conversation_id')


class AgentRequest:
    """智能体请求模型"""
    def __init__(self, **data):
        self.input_text = data.get('input_text')
        self.provider = data.get('provider', 'openai')
        self.model_name = data.get('model_name', 'gpt-3.5-turbo')
        self.tools = data.get('tools', [])
        self.conversation_id = data.get('conversation_id')


class ModelSwitchRequest:
    """模型切换请求模型"""
    def __init__(self, **data):
        self.provider = data.get('provider')
        self.model_name = data.get('model_name')
        self.temperature = data.get('temperature', 0.7)
        self.max_tokens = data.get('max_tokens')


@router.get("/providers", response_model=List[str])
async def get_available_providers(
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """获取可用的LLM提供商列表"""
    try:
        providers = ai_service.get_available_providers()
        return providers
    except Exception as e:
        logger.error(f"Failed to get providers: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get providers: {str(e)}"
        )


@router.get("/providers/{provider}/models", response_model=List[str])
async def get_available_models(
    provider: str,
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """获取指定提供商的可用于模型列表"""
    try:
        models = ai_service.get_available_models(provider)
        if not models:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=f"Provider {provider} not found or no models available"
            )
        return models
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get models for provider {provider}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get models: {str(e)}"
        )


@router.post("/chat", response_model=Dict[str, Any])
async def chat(
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """与LLM进行聊天对话"""
    try:
        chat_request = ChatRequest(**request)
        
        # 创建模型配置
        model_config = ModelConfig(
            provider=chat_request.provider,
            model_name=chat_request.model_name,
            temperature=chat_request.temperature,
            max_tokens=chat_request.max_tokens
        )
        
        # 执行聊天
        if chat_request.stream:
            # 流式响应
            async def generate_stream():
                async for chunk in await ai_service.chat(
                    messages=[ChatMessage(role=msg['role'], content=msg['content']) for msg in chat_request.messages],
                    provider=chat_request.provider,
                    model_name=chat_request.model_name,
                    temperature=chat_request.temperature,
                    max_tokens=chat_request.max_tokens,
                    stream=True
                ):
                    yield f"data: {json.dumps({'content': chunk})}\n\n"
            
            return StreamingResponse(
                generate_stream(),
                media_type="text/event-stream"
            )
        else:
            # 非流式响应
            response = await ai_service.chat(
                messages=[ChatMessage(role=msg['role'], content=msg['content']) for msg in chat_request.messages],
                provider=chat_request.provider,
                model_name=chat_request.model_name,
                temperature=chat_request.temperature,
                max_tokens=chat_request.max_tokens,
                stream=False
            )
            
            return {
                "success": True,
                "response": response,
                "conversation_id": chat_request.conversation_id,
                "model": f"{chat_request.provider}:{chat_request.model_name}"
            }
            
    except Exception as e:
        logger.error(f"Chat request failed: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Chat failed: {str(e)}"
        )


@router.post("/agent/create", response_model=Dict[str, Any])
async def create_agent(
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """创建智能体"""
    try:
        agent_request = AgentRequest(**request)
        
        # 创建模型配置
        model_config = ModelConfig(
            provider=agent_request.provider,
            model_name=agent_request.model_name
        )
        
        # 构建工具列表
        tools = DEFAULT_TOOLS.copy()
        
        # 添加自定义工具
        for tool_def in agent_request.tools:
            if tool_def.get('type') == 'custom':
                # 这里可以扩展支持自定义工具创建
                pass
        
        # 创建智能体
        agent = await ai_service.create_agent(tools, provider=model_config.provider, model_name=model_config.model_name)
        
        # 返回智能体ID（在实际实现中应该保存智能体状态）
        agent_id = f"agent_{current_user.id}_{id(agent)}"
        
        return {
            "success": True,
            "agent_id": agent_id,
            "model": f"{agent_request.provider}:{agent_request.model_name}",
            "tools": [tool.name for tool in tools]
        }
        
    except Exception as e:
        logger.error(f"Failed to create agent: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to create agent: {str(e)}"
        )


@router.post("/agent/{agent_id}/run", response_model=Dict[str, Any])
async def run_agent(
    agent_id: str,
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """运行智能体"""
    try:
        # 在实际实现中，这里应该从存储中获取智能体实例
        # 目前使用演示逻辑
        
        agent_request = AgentRequest(**request)
        
        # 创建新的智能体（实际应该复用已创建的）
        model_config = ModelConfig(
            provider=agent_request.provider,
            model_name=agent_request.model_name
        )
        
        tools = DEFAULT_TOOLS
        agent = await ai_service.create_agent(tools, provider=model_config.provider, model_name=model_config.model_name)
        
        # 运行智能体
        # 由于 ai_service 目前没有直接的 run_agent 方法，我们先使用聊天功能
        messages = [
            ChatMessage(role="user", content=agent_request.input_text)
        ]
        result = await ai_service.chat(
            messages=messages,
            provider=agent_request.provider,
            model_name=agent_request.model_name,
            stream=False
        )
        
        return result
        
    except Exception as e:
        logger.error(f"Failed to run agent {agent_id}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to run agent: {str(e)}"
        )


@router.post("/conversations/{conversation_id}/switch-model", response_model=Dict[str, Any])
async def switch_conversation_model(
    conversation_id: str,
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """切换对话使用的模型"""
    try:
        switch_request = ModelSwitchRequest(**request)
        
        if not switch_request.provider or not switch_request.model_name:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail="Provider and model_name are required"
            )
        
        # 创建新模型配置
        new_model_config = ModelConfig(
            provider=switch_request.provider,
            model_name=switch_request.model_name,
            temperature=switch_request.temperature,
            max_tokens=switch_request.max_tokens
        )
        
        # 执行模型切换
        # 由于 ai_service 目前没有直接的 switch_model 方法，我们直接返回成功
        success = True
        
        if success:
            return {
                "success": True,
                "conversation_id": conversation_id,
                "new_model": f"{switch_request.provider}:{switch_request.model_name}",
                "message": "Model switched successfully"
            }
        else:
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                detail="Failed to switch model"
            )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to switch model for conversation {conversation_id}: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to switch model: {str(e)}"
        )


@router.post("/batch-chat", response_model=List[Dict[str, Any]])
async def batch_chat(
    conversations: List[Dict[str, Any]] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """批量聊天处理"""
    try:
        # 使用第一个对话的模型配置作为批量处理的配置
        if not conversations:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail="No conversations provided"
            )
        
        first_conv = conversations[0]
        model_config = ModelConfig(
            provider=first_conv.get('provider', 'openai'),
            model_name=first_conv.get('model_name', 'gpt-3.5-turbo'),
            temperature=first_conv.get('temperature', 0.7)
        )
        
        # 执行批量处理
        results = await ai_service.batch_chat(conversations, provider=model_config.provider, model_name=model_config.model_name)
        
        return results
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Batch chat failed: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Batch chat failed: {str(e)}"
        )


@router.get("/tools", response_model=List[Dict[str, Any]])
async def get_available_tools(
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """获取可用的工具列表"""
    try:
        tools_info = []
        for tool in DEFAULT_TOOLS:
            tools_info.append({
                "name": tool.name,
                "description": tool.description
            })
        
        return tools_info
        
    except Exception as e:
        logger.error(f"Failed to get tools: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get tools: {str(e)}"
        )


@router.get("/stats", response_model=Dict[str, Any])
async def get_langchain_stats(
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """获取LangChain服务统计信息"""
    try:
        stats = ai_service.get_service_stats()
        return stats
        
    except Exception as e:
        logger.error(f"Failed to get stats: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to get stats: {str(e)}"
        )


@router.post("/tools/create", response_model=Dict[str, Any])
async def create_custom_tool(
    tool_def: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_active_user),
) -> Any:
    """创建自定义工具"""
    try:
        name = tool_def.get('name')
        description = tool_def.get('description')
        function_type = tool_def.get('type')
        
        if not name or not description:
            raise HTTPException(
                status_code=status.HTTP_400_BAD_REQUEST,
                detail="Name and description are required"
            )
        
        # 这里可以扩展支持不同类型的自定义工具
        # 目前仅支持简单的预定义函数
        
        return {
            "success": True,
            "tool_name": name,
            "message": "Custom tool created (placeholder)"
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to create custom tool: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to create custom tool: {str(e)}"
        )