"""
LLM API端点
"""
import os
from typing import List, Optional
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi.responses import StreamingResponse
from sqlalchemy.orm import Session
from app.api.deps import get_db, get_current_user
from app.models import ChatMessage
from app.models.user import User
from app.services.ai_service import AIService, ChatMessage as AIChatMessage
from app.services.llm_service import LLMService
from app.schemas.llm import (
    LLMModelCreate,
    LLMModelResponse,
    ChatConversationCreate,
    ChatConversationResponse,
    ChatRequest,
    ChatResponse,
    ChatStreamResponse,
    ConversationHistoryResponse,
    LLMProviderInfo,
    TokenUsageResponse,
    ChatMessageResponse
)
from pydantic import BaseModel, Field
import json
from dotenv import load_dotenv
from app.core.logging import get_logger

load_dotenv()

logger = get_logger(__name__)
router = APIRouter()


@router.get("/providers", response_model=List[LLMProviderInfo])
async def get_llm_providers():
    """获取可用的LLM提供商列表"""
    # 使用统一的AI服务
    ai_service = AIService()
    available_providers = ai_service.get_available_providers()
    
    providers = []
    for provider in available_providers:
        models = ai_service.get_available_models(provider)
        providers.append(
            LLMProviderInfo(
                provider=provider,
                name=provider.capitalize(),
                description=f"{provider.capitalize()} AI模型服务",
                models=models,
                is_available=True  # 简化检查，实际应根据API密钥验证
            )
        )
    
    return providers


@router.post("/models", response_model=LLMModelResponse)
async def create_llm_model(
        model: LLMModelCreate,
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """创建LLM模型配置"""
    service = LLMService()

    # 验证提供商是否支持
    supported_providers = ["openai", "anthropic", "deepseek", "kimi", "doubao"]
    if model.provider not in supported_providers:
        raise HTTPException(
            status_code=400,
            detail=f"不支持的提供商: {model.provider}"
        )

    # 如果没有提供 API Key，尝试从环境变量获取
    if not model.api_key:
        env_key_map = {
            "openai": "OPENAI_API_KEY",
            "anthropic": "ANTHROPIC_API_KEY",
            "deepseek": "DEEPSEEK_API_KEY",
            "kimi": "KIMI_API_KEY",
            "doubao": "DOUBAO_API_KEY"
        }
        provider_key = model.provider
        env_key_name = env_key_map.get(provider_key)
        env_key = os.getenv(env_key_name) if env_key_name else None
        if env_key:
            model.api_key = env_key
        else:
            raise HTTPException(
                status_code=400,
                detail=f"未提供 API Key 且环境变量中也未找到 {model.provider} 的配置"
            )

    # 如果没有提供 base_url，使用默认值
    if not model.base_url:
        default_base_urls = {
            "openai": "https://api.openai.com/v1",
            "anthropic": "https://api.anthropic.com",
            "deepseek": "https://api.deepseek.com/v1",
            "kimi": "https://api.moonshot.cn/v1",
            "doubao": os.getenv("DOUBAO_ENDPOINT", "https://ark.cn-beijing.volces.com/api/v3")
        }
        model.base_url = default_base_urls.get(model.provider)

    # 创建模型记录
    from app.models.llm_model import LLMModel
    db_model = LLMModel(
        name=model.name,
        provider=model.provider,
        model_name=model.llm_model_id,
        api_key=model.api_key,
        base_url=model.base_url,
        max_tokens=model.max_tokens,
        temperature={"default": model.temperature, "min": 0, "max": 2},
        is_active=model.is_active
    )
    db.add(db_model)
    db.commit()
    db.refresh(db_model)

    return LLMModelResponse(
        id=int(db_model.id) if db_model.id else 0,  # type: ignore
        name=str(db_model.name) if db_model.name else "",  # type: ignore
        provider=str(db_model.provider) if db_model.provider else "",  # type: ignore
        llm_model_id=str(db_model.model_name) if db_model.model_name else "",  # type: ignore
        max_tokens=int(db_model.max_tokens) if db_model.max_tokens else 0,  # type: ignore
        temperature=float(db_model.temperature.get('default', 0.7)) if isinstance(db_model.temperature, dict) else 0.7,  # type: ignore
        is_active=bool(db_model.is_active) if db_model.is_active is not None else False,  # type: ignore
        created_at=db_model.created_at,  # type: ignore
        updated_at=db_model.updated_at  # type: ignore
    )


@router.post("/models/auto-create-from-env")
async def auto_create_models_from_env(
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """从环境变量自动创建模型配置"""
    from app.models.llm_model import LLMModel

    created_models = []

    # 定义模型配置映射
    provider_configs = [
        {
            "provider": "openai",
            "name": "OpenAI GPT-3.5",
            "env_key": "OPENAI_API_KEY",
            "model_id": os.getenv("OPENAI_MODEL", "gpt-3.5-turbo"),
            "base_url": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
        },
        {
            "provider": "deepseek",
            "name": "DeepSeek Chat",
            "env_key": "DEEPSEEK_API_KEY",
            "model_id": os.getenv("DEEPSEEK_MODEL", "deepseek-chat"),
            "base_url": "https://api.deepseek.com/v1"
        },
        {
            "provider": "anthropic",
            "name": "Claude 3 Haiku",
            "env_key": "ANTHROPIC_API_KEY",
            "model_id": os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307"),
            "base_url": "https://api.anthropic.com"
        },
        {
            "provider": "kimi",
            "name": "Kimi 8K",
            "env_key": "KIMI_API_KEY",
            "model_id": os.getenv("KIMI_MODEL", "moonshot-v1-8k"),
            "base_url": "https://api.moonshot.cn/v1"
        },
        {
            "provider": "doubao",
            "name": "Doubao Lite",
            "env_key": "DOUBAO_API_KEY",
            "model_id": os.getenv("DOUBAO_MODEL", "doubao-lite-4k"),
            "base_url": os.getenv("DOUBAO_ENDPOINT", "https://ark.cn-beijing.volces.com/api/v3")
        }
    ]

    for config in provider_configs:
        api_key = os.getenv(config["env_key"])
        if api_key:
            # 检查是否已存在
            existing = db.query(LLMModel).filter(
                LLMModel.provider == config["provider"],
                LLMModel.model_name == config["model_id"]
            ).first()

            if not existing:
                db_model = LLMModel(
                    name=config["name"],
                    provider=config["provider"],
                    model_name=config["model_id"],
                    api_key=api_key,
                    base_url=config["base_url"],
                    max_tokens=int(os.getenv(f"{config['provider'].upper()}_MAX_TOKENS", "4000")),
                    temperature={
                        "default": float(os.getenv(f"{config['provider'].upper()}_TEMPERATURE", "0.7")),
                        "min": 0,
                        "max": 2
                    },
                    is_active=True
                )
                db.add(db_model)
                created_models.append(config["name"])

    db.commit()

    return {
        "message": f"成功创建 {len(created_models)} 个模型配置",
        "created_models": created_models
    }


@router.get("/models", response_model=List[LLMModelResponse])
async def get_llm_models(
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """获取所有LLM模型配置"""
    from app.models.llm_model import LLMModel
    models = db.query(LLMModel).filter(LLMModel.is_active == True).all()

    return [
        LLMModelResponse(
            id=int(model.id) if model.id else 0,  # type: ignore
            name=str(model.name) if model.name else "",  # type: ignore
            provider=str(model.provider) if model.provider else "",  # type: ignore
            llm_model_id=str(model.model_name) if model.model_name else "",  # type: ignore
            max_tokens=int(model.max_tokens) if model.max_tokens else 0,  # type: ignore
            temperature=float(model.temperature.get('default', 0.7)) if isinstance(model.temperature, dict) else float(model.temperature) if model.temperature else 0.7,  # type: ignore
            is_active=bool(model.is_active) if model.is_active is not None else False,  # type: ignore
            created_at=model.created_at,  # type: ignore
            updated_at=model.updated_at  # type: ignore
        ) for model in models
    ]


@router.post("/conversations", response_model=ChatConversationResponse)
async def create_conversation(
        conversation: ChatConversationCreate,
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """创建新的对话"""
    service = LLMService()

    try:
        new_conversation = await service.create_conversation(
            db=db,
            user_id=int(current_user.id),  # type: ignore
            title=conversation.title,
            model_id=conversation.llm_model_id
        )

        from app.models.llm_model import LLMModel
        model = db.query(LLMModel).filter(LLMModel.id == conversation.llm_model_id).first()

        response = ChatConversationResponse(
            id=int(new_conversation.id) if new_conversation.id else 0,  # type: ignore
            user_id=int(new_conversation.user_id) if new_conversation.user_id else 0,  # type: ignore
            title=str(new_conversation.title) if new_conversation.title else "",  # type: ignore
            llm_model_id=int(new_conversation.model_id) if new_conversation.model_id else 0,  # type: ignore
            llm_model_name=str(getattr(model, 'name', '')) if model else "未知",
            provider=str(getattr(model, 'provider', '')) if model else "未知",
            is_active=bool(new_conversation.is_active) if new_conversation.is_active is not None else False,  # type: ignore
            created_at=new_conversation.created_at,  # type: ignore
            updated_at=new_conversation.updated_at,  # type: ignore
            message_count=0
        )

        return response

    except Exception as e:
        logger.error(f"创建对话失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/conversations", response_model=List[ChatConversationResponse])
async def get_conversations(
    skip: int = Query(0, ge=0),
    limit: int = Query(20, ge=1, le=100),
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取用户的对话列表"""
    from app.models.chat_conversation import ChatConversation
    from app.models.llm_model import LLMModel

    conversations = db.query(ChatConversation).filter(
        ChatConversation.user_id == current_user.id,
        ChatConversation.is_active == True
    ).order_by(ChatConversation.updated_at.desc()).offset(skip).limit(limit).all()

    result = []
    for conv in conversations:
        model = db.query(LLMModel).filter(LLMModel.id == conv.model_id).first()
        message_count = db.query(ChatMessage).filter(
            ChatMessage.conversation_id == conv.id
        ).count()

        result.append(ChatConversationResponse(
            id=int(conv.id) if conv.id else 0,  # type: ignore
            user_id=int(conv.user_id) if conv.user_id else 0,  # type: ignore
            title=str(conv.title) if conv.title else "",  # type: ignore
            llm_model_id=int(conv.model_id) if conv.model_id else 0,  # type: ignore
            llm_model_name=str(model.name) if model and hasattr(model, 'name') and model.name is not None else "未知",
            provider=str(model.provider) if model and hasattr(model, 'provider') and model.provider is not None else "未知",
            is_active=bool(conv.is_active) if conv.is_active is not None else False,  # type: ignore
            created_at=conv.created_at,  # type: ignore
            updated_at=conv.updated_at,  # type: ignore
            message_count=message_count
        ))

    return result


@router.get("/conversations/{conversation_id}", response_model=ConversationHistoryResponse)
async def get_conversation_history(
    conversation_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取对话历史"""
    from app.models.chat_conversation import ChatConversation
    from app.models.llm_model import LLMModel
    from app.models.chat_message import ChatMessage

    conversation = db.query(ChatConversation).filter(
        ChatConversation.id == conversation_id,
        ChatConversation.user_id == current_user.id
    ).first()

    if not conversation:
        raise HTTPException(status_code=404, detail="对话不存在")

    model = db.query(LLMModel).filter(LLMModel.id == conversation.model_id).first()
    messages = db.query(ChatMessage).filter(
        ChatMessage.conversation_id == conversation_id,
        ChatMessage.is_deleted == False
    ).order_by(ChatMessage.created_at.asc()).all()

    return ConversationHistoryResponse(
        conversation=ChatConversationResponse(
            id=int(conversation.id) if conversation.id else 0,  # type: ignore
            user_id=int(conversation.user_id) if conversation.user_id else 0,  # type: ignore
            title=str(conversation.title) if conversation.title else "",  # type: ignore
            llm_model_id=int(conversation.model_id) if conversation.model_id else 0,  # type: ignore
            llm_model_name=str(model.name) if model and model.name else "未知",  # type: ignore
            provider=str(model.provider) if model and model.provider else "未知",  # type: ignore
            is_active=bool(conversation.is_active) if conversation.is_active is not None else False,  # type: ignore
            created_at=conversation.created_at,  # type: ignore
            updated_at=conversation.updated_at,  # type: ignore
            message_count=len(messages)
        ),
        messages=[
            ChatMessageResponse(
                id=int(getattr(msg, 'id', 0)),
                conversation_id=int(getattr(msg, 'conversation_id', 0)),
                role=str(getattr(msg, 'role', '')),
                content=str(getattr(msg, 'content', '')),
                created_at=getattr(msg, 'created_at', datetime.now())
            ) for msg in messages
        ]
    )


@router.post("/chat", response_model=ChatResponse)
async def chat(
    request: ChatRequest,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """发送聊天消息"""
    service = LLMService()

    try:
        from app.models.chat_conversation import ChatConversation
        conversation = db.query(ChatConversation).filter(
            ChatConversation.id == request.conversation_id,
            ChatConversation.user_id == current_user.id
        ).first()

        if not conversation:
            raise HTTPException(status_code=404, detail="对话不存在")

        response = await service.generate_chat_response(
            db=db,
            conversation_id=request.conversation_id,
            user_message=request.message,
            stream=False
        )

        from app.models.chat_message import ChatMessage
        last_message = db.query(ChatMessage).filter(
            ChatMessage.conversation_id == request.conversation_id
        ).order_by(ChatMessage.created_at.desc()).first()

        # 确保response是字符串类型
        response_str = str(response) if not hasattr(response, '__aiter__') else "Stream response completed"
        
        return ChatResponse(
            message=response_str,
            conversation_id=request.conversation_id,
            message_id=int(getattr(last_message, 'id', 0)) if last_message else 0
        )

    except Exception as e:
        logger.error(f"聊天失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/chat/stream")
async def chat_stream(
    request: ChatRequest,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """发送聊天消息（流式响应）"""
    service = LLMService()

    try:
        from app.models.chat_conversation import ChatConversation
        conversation = db.query(ChatConversation).filter(
            ChatConversation.id == request.conversation_id,
            ChatConversation.user_id == current_user.id
        ).first()

        if not conversation:
            raise HTTPException(status_code=404, detail="对话不存在")

        response_stream = await service.generate_chat_response(
            db=db,
            conversation_id=request.conversation_id,
            user_message=request.message,
            stream=True
        )

        # 处理流式响应
        async def generate_stream():
            try:
                # 检查response_stream是否是可调用的生成器函数
                if callable(response_stream):
                    # 调用生成器函数获取实际的异步生成器
                    actual_generator = response_stream()
                    if hasattr(actual_generator, '__aiter__'):
                        async for chunk in actual_generator:
                            if isinstance(chunk, str):
                                data = ChatStreamResponse(
                                    content=chunk,
                                    is_complete=False
                                )
                                yield f"data: {json.dumps(data.dict())}\n\n"
                    else:
                        # 如果不是生成器，直接处理返回值
                        resp_str = str(actual_generator)
                        data = ChatStreamResponse(
                            content=resp_str,
                            is_complete=False
                        )
                        yield f"data: {json.dumps(data.dict())}\n\n"
                elif hasattr(response_stream, '__aiter__') and callable(getattr(response_stream, '__aiter__', None)):
                    async for chunk in response_stream:  # type: ignore
                        if isinstance(chunk, str):
                            data = ChatStreamResponse(
                                content=chunk,
                                is_complete=False
                            )
                            yield f"data: {json.dumps(data.dict())}\n\n"
                else:
                    # 如果不是生成器，直接处理返回值
                    resp_str = str(response_stream)
                    data = ChatStreamResponse(
                        content=resp_str,
                        is_complete=False
                    )
                    yield f"data: {json.dumps(data.dict())}\n\n"
            except Exception as e:
                logger.error(f"流式响应处理错误: {str(e)}")
                error_data = ChatStreamResponse(
                    content=f"流式响应处理错误: {str(e)}",
                    is_complete=True
                )
                yield f"data: {json.dumps(error_data.dict())}\n\n"
                # 不要使用return语句，而是直接结束函数

            data = ChatStreamResponse(
                content="",
                is_complete=True
            )
            yield f"data: {json.dumps(data.dict())}\n\n"

        return StreamingResponse(
            generate_stream(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
            }
        )

    except Exception as e:
        logger.error(f"流式聊天失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/conversations/{conversation_id}/tokens", response_model=TokenUsageResponse)
async def get_conversation_tokens(
    conversation_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """获取对话的token使用情况"""
    from app.models.chat_conversation import ChatConversation
    service = LLMService()

    conversation = db.query(ChatConversation).filter(
        ChatConversation.id == conversation_id,
        ChatConversation.user_id == current_user.id
    ).first()

    if not conversation:
        raise HTTPException(status_code=404, detail="对话不存在")

    try:
        total_tokens = await service.count_conversation_tokens(db, conversation_id)

        # 根据模型计算估算成本
        from app.models.llm_model import LLMModel
        model = db.query(LLMModel).filter(LLMModel.id == conversation.model_id).first()

        # 简化的成本计算（实际应该根据不同模型的定价）
        cost_per_1k_tokens = {
            "openai": {"gpt-3.5-turbo": 0.002, "gpt-4": 0.03},
            "anthropic": {"claude-3-haiku": 0.0025, "claude-3-sonnet": 0.015, "claude-3-opus": 0.075},
            "deepseek": {"deepseek-chat": 0.001, "deepseek-coder": 0.001},
            "kimi": {"moonshot-v1-8k": 0.012, "moonshot-v1-32k": 0.024, "moonshot-v1-128k": 0.06},
            "doubao": {"doubao-lite-4k": 0.0008, "doubao-pro-4k": 0.0012, "doubao-pro-32k": 0.005}
        }

        estimated_cost = 0.0
        if model:
            provider_name = str(getattr(model, 'provider', ''))
            model_name = str(getattr(model, 'model_name', ''))
            if provider_name in cost_per_1k_tokens:
                provider_costs = cost_per_1k_tokens[provider_name]
                model_cost = provider_costs.get(model_name, 0.001)
                estimated_cost = (total_tokens / 1000) * model_cost

        return TokenUsageResponse(
            conversation_id=conversation_id,
            total_tokens=total_tokens,
            prompt_tokens=int(total_tokens * 0.6),  # 估算
            completion_tokens=int(total_tokens * 0.4),  # 估算
            estimated_cost=round(estimated_cost, 4)
        )

    except Exception as e:
        logger.error(f"计算token失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.delete("/conversations/{conversation_id}")
async def delete_conversation(
    conversation_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """删除对话"""
    from app.models.chat_conversation import ChatConversation

    conversation = db.query(ChatConversation).filter(
        ChatConversation.id == conversation_id,
        ChatConversation.user_id == current_user.id
    ).first()

    if not conversation:
        raise HTTPException(status_code=404, detail="对话不存在")

    setattr(conversation, 'is_active', False)
    db.commit()

    return {"message": "对话已删除"}


@router.put("/conversations/{conversation_id}")
async def update_conversation(
    conversation_id: int,
    title: str,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user)
):
    """更新对话标题"""
    from app.models.chat_conversation import ChatConversation

    conversation = db.query(ChatConversation).filter(
        ChatConversation.id == conversation_id,
        ChatConversation.user_id == current_user.id
    ).first()

    if not conversation:
        raise HTTPException(status_code=404, detail="对话不存在")

    setattr(conversation, 'title', title)
    db.commit()

    return {"message": "对话标题已更新"}


class LLMModelUpdate(BaseModel):
    """更新LLM模型配置"""
    name: Optional[str] = Field(None, description="模型名称")
    provider: Optional[str] = Field(None, description="提供商")
    llm_model_id: Optional[str] = Field(None, description="模型标识符")
    api_key: Optional[str] = Field(None, description="API密钥")
    base_url: Optional[str] = Field(None, description="基础URL")
    max_tokens: Optional[int] = Field(None, description="最大token数")
    temperature: Optional[float] = Field(None, description="温度参数", ge=0, le=2)
    is_active: Optional[bool] = Field(None, description="是否激活")


@router.put("/models/{model_id}", response_model=LLMModelResponse)
async def update_llm_model(
        model_id: int,
        model_update: LLMModelUpdate,
        db: Session = Depends(get_db),
        current_user: User = Depends(get_current_user)
):
    """更新LLM模型配置"""
    from app.models.llm_model import LLMModel
    
    # 获取要更新的模型
    db_model = db.query(LLMModel).filter(LLMModel.id == model_id).first()
    if not db_model:
        raise HTTPException(status_code=404, detail="模型不存在")
    
    # 更新模型字段
    update_data = model_update.dict(exclude_unset=True)
    for field, value in update_data.items():
        if value is not None:
            if field == "llm_model_id":
                setattr(db_model, "model_name", value)
            elif field == "temperature":
                setattr(db_model, "temperature", {"default": value, "min": 0, "max": 2})
            else:
                setattr(db_model, field, value)
    
    db.commit()
    db.refresh(db_model)
    
    return LLMModelResponse(
        id=int(db_model.id) if db_model.id else 0,  # type: ignore
        name=str(db_model.name) if db_model.name else "",  # type: ignore
        provider=str(db_model.provider) if db_model.provider else "",  # type: ignore
        llm_model_id=str(db_model.model_name) if db_model.model_name else "",  # type: ignore
        max_tokens=int(db_model.max_tokens) if db_model.max_tokens else 0,  # type: ignore
        temperature=float(db_model.temperature.get('default', 0.7)) if isinstance(db_model.temperature, dict) else 0.7,  # type: ignore
        is_active=bool(db_model.is_active) if db_model.is_active is not None else False,  # type: ignore
        created_at=db_model.created_at,  # type: ignore
        updated_at=db_model.updated_at  # type: ignore
    )
