from ninja import NinjaAPI, Router
from django.http import HttpRequest
from typing import Optional, Dict, Any
from . import services
from django.conf import settings
from .schemas import LoginIn, LoginOut, ChatIn, ChatOut, HistoryOut, ErrorResponse
from .models import APIKey
from .services import (
    get_or_create_session, 
    deepseek_r1_api_call, 
    deepseek_chat_api_call,
    call_llm_with_tools,
    get_cached_reply, 
    set_cached_reply,
    llm_service
)
from datetime import datetime
import logging
import json

logger = logging.getLogger(__name__)

api = NinjaAPI(title="DeepSeek-KAI API", version="0.0.1")

def api_key_auth(request):
    """验证请求头中的API Key"""
    auth_header = request.headers.get("Authorization")
    if not auth_header:
        return None

    try:
        scheme, key = auth_header.split()
        if scheme.lower() != "bearer":
            return None

        api_key = APIKey.objects.get(key=key)
        return api_key
    except (ValueError, APIKey.DoesNotExist):
        return None

router = Router(auth=api_key_auth)

@api.post("/login", response={200: LoginOut, 400: ErrorResponse, 403: ErrorResponse})
def login(request, data: LoginIn):
    """登录接口"""
    username = data.username.strip()
    password = data.password.strip()
    
    if not username or not password:
        return 400, {"error": "用户名和密码不能为空"}
    
    if password != 'secret':
        return 403, {"error": "密码错误"}
    
    key = services.create_api_key(username)
    return {"api_key": key, "expiry": settings.TOKEN_EXPIRY_SECONDS}

@router.post("/chat", response={200: ChatOut, 401: ErrorResponse})
def chat(request, data: ChatIn):
    """对话接口 - 支持多种大模型"""
    if not request.auth:
        return 401, {"error": "请先登录获取API Key"}
    
    session_id = data.session_id.strip() or "default_session"
    user_input = data.user_input.strip()
    model_type = getattr(data, 'model_type', 'r1')  # 'r1' 或 'chat'
    
    if not user_input:
        return 400, {"error": "请输入消息内容"}
    
    user = request.auth
    session = get_or_create_session(session_id, user)
    
    # 拼接上下文
    pure_context = session.context
    prompt = pure_context + f"用户：{user_input}\n回复："
    logger.info(f"传递给大模型的prompt：\n{prompt}")
    
    # 根据模型类型选择调用方式
    cached_reply = get_cached_reply(prompt, session_id, user)
    if cached_reply:
        reply = cached_reply
    else:
        if model_type == 'chat':
            reply = deepseek_chat_api_call(prompt)
        else:
            reply = deepseek_r1_api_call(prompt)
        set_cached_reply(prompt, reply, session_id, user)
    
    # 保存上下文
    session.context += f"用户：{user_input}\n回复：{reply}\n"
    session.save()

    return {
        "reply": reply,
        "timestamp": datetime.now().strftime("%H:%M:%S"),
        "model_used": model_type
    }

@router.post("/chat/advanced", response={200: Dict[str, Any], 401: ErrorResponse})
def advanced_chat(request, data: Dict[str, Any]):
    """高级对话接口 - 支持工具调用和复杂配置"""
    if not request.auth:
        return 401, {"error": "请先登录获取API Key"}
    
    user_input = data.get('user_input', '').strip()
    session_id = data.get('session_id', 'default_session').strip()
    tools_config = data.get('tools', None)
    model_config = data.get('model_config', {})
    
    if not user_input:
        return 400, {"error": "请输入消息内容"}
    
    user = request.auth
    session = get_or_create_session(session_id, user)
    
    # 构建提示词
    pure_context = session.context
    prompt = pure_context + f"用户：{user_input}\n回复："
    
    # 调用带工具的LLM
    result = call_llm_with_tools(prompt, tools_config)
    
    if result["success"]:
        # 保存成功响应到上下文
        session.context += f"用户：{user_input}\n回复：{result['response']}\n"
        session.save()
    
    return {
        "success": result["success"],
        "reply": result["response"],
        "timestamp": datetime.now().strftime("%H:%M:%S"),
        "usage": result.get("usage", {}),
        "model": result.get("model", "unknown")
    }

@router.get("/models", response={200: Dict[str, Any]})
def list_models(request):
    """获取可用模型列表"""
    config = getattr(settings, 'LLM_CONFIG', {})
    
    models_info = {
        "current_mode": config.get('mode', 'local'),
        "available_models": {
            "local": config.get('local_model', 'deepseek-r1:7b'),
            "online": config.get('online_model', 'deepseek-chat')
        },
        "service_status": {
            "ollama_available": False,
            "online_api_available": False
        }
    }
    
    # 测试Ollama服务状态
    try:
        ollama_url = config.get('local_ollama_url', 'http://localhost:11434')
        response = requests.get(f"{ollama_url}/api/tags", timeout=5)
        if response.status_code == 200:
            models_info["service_status"]["ollama_available"] = True
    except:
        pass
    
    # 测试在线API状态（简单检查配置）
    if config.get('online_api_key'):
        models_info["service_status"]["online_api_available"] = True
    
    return models_info

@router.get("/history", response={200: HistoryOut})
def history(request, session_id: str = "default_session"):
    """查看对话历史接口"""
    processed_session_id = session_id.strip() or "default_session"
    session = services.get_or_create_session(processed_session_id, request.auth)
    return {"history": session.context}

@router.delete("/history", response={200: dict})
def clear_history(request, session_id: str = "default_session"):
    """清空对话历史接口"""
    processed_session_id = session_id.strip() or "default_session"
    session = services.get_or_create_session(processed_session_id, request.auth)
    session.clear_context()
    return {"message": "历史记录已清空"}

# 将路由添加到API
api.add_router("", router)