import logging
from fastapi import APIRouter, HTTPException, Depends
from pydantic import BaseModel, Field, UUID4
from typing import List, Dict, Optional, Any
import requests
import json

from backend.common.config import config_manager
from backend.logger_setup import get_logger_with_trace_id
from backend.middleware.trace_middleware import get_trace_id
from backend.middleware.auth_middleware import admin_required
from backend.llm import llm_manager

# 创建路由器
router = APIRouter(
    prefix=f"/{config_manager.api.version}/settings",
    tags=["设置"],
    responses={404: {"description": "未找到"}},
)

logger = logging.getLogger('api.settings')

# 定义模型配置请求/响应模型
class ModelConfig(BaseModel):
    id: Optional[str] = None
    name: str
    modelId: str
    temperature: float = Field(default=0.7, ge=0.0, le=2.0)
    maxTokens: int = Field(default=4096, ge=1)
    topP: float = Field(default=1.0, ge=0.0, le=1.0)
    frequencyPenalty: float = Field(default=0.0, ge=-2.0, le=2.0)
    presencePenalty: float = Field(default=0.0, ge=-2.0, le=2.0)
    isDefault: bool = False
    type: str = "ollama"

# 定义系统配置响应模型
class SystemConfigResponse(BaseModel):
    apiConfig: Optional[Dict] = None
    modelConfig: Optional[Dict] = None
    ragConfig: Optional[Dict] = None
    toolConfig: Optional[Dict] = None
    learningConfig: Optional[Dict] = None
    logConfig: Optional[Dict] = None

# 定义API配置模型
class APIConfig(BaseModel):
    baseUrl: str
    timeout: int
    retryCount: int
    corsEnabled: bool

# 获取系统配置
@router.get("/", response_model=SystemConfigResponse)
async def get_system_config(
    trace_id: str = Depends(get_trace_id)
):
    """获取系统配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("获取系统配置")
    
    try:
        # 构建系统配置响应
        response = SystemConfigResponse(
            apiConfig={
                "baseUrl": config_manager.api.host,
                "timeout": 60000,  # 默认60秒
                "retryCount": 3,  # 默认重试3次
                "corsEnabled": True
            },
            modelConfig={
                "name": config_manager.model.name,
                "path": config_manager.model.model_path or "./models",
                "type": "gptq" if config_manager.model.quantized else "fp16",
                "quantBits": config_manager.model.quantization_bits if config_manager.model.quantized else None,
                "useCache": True,
                "cacheSize": 1024,
                "deviceMap": config_manager.model.device_map
            },
            ragConfig={
                "enabled": config_manager.rag.enabled,
                "embeddingModel": "all-MiniLM-L6-v2",  # 默认嵌入模型
                "topK": config_manager.rag.top_k,
                "scoreThreshold": config_manager.rag.score_threshold,
                "chunkSize": config_manager.rag.chunk_size,
                "chunkOverlap": config_manager.rag.chunk_overlap,
                "maxContextSize": 4096
            },
            toolConfig={
                "webSearch": {
                    "enabled": config_manager.tools.web_search_enabled,
                    "provider": config_manager.tools.web_search_engine,
                    "apiKey": config_manager.tools.web_search_api_key,
                    "maxResults": 3
                },
                "databaseQuery": {
                    "enabled": "database_query" in config_manager.tools.allowed_tools,
                    "connectionString": "",
                    "allowedTables": []
                },
                "fileOperation": {
                    "enabled": "file_operation" in config_manager.tools.allowed_tools,
                    "allowedPaths": [],
                    "readOnly": True
                }
            },
            learningConfig={
                "enabled": not config_manager.startup.skip_learning,
                "trainingBatchSize": 4,
                "learningRate": 0.0001,
                "numEpochs": 3,
                "adapterName": "lora-finetune",
                "loraRank": 8,
                "loraAlpha": 32,
                "loraDropout": 0.1
            }
        )
        
        return response
    except Exception as e:
        log.error(f"获取系统配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to get system config: {str(e)}")

# 获取API配置
@router.get("/api")
async def get_api_config(
    trace_id: str = Depends(get_trace_id)
):
    """获取API配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("获取API配置")
    
    try:
        return {
            "baseUrl": config_manager.api.host,
            "timeout": 60000,  # 默认60秒
            "retryCount": 3,  # 默认重试3次
            "corsEnabled": True
        }
    except Exception as e:
        log.error(f"获取API配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to get API config: {str(e)}")

# 更新API配置
@router.put("/api")
async def update_api_config(
    config: APIConfig,
    trace_id: str = Depends(get_trace_id)
):
    """更新API配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("更新API配置")
    
    try:
        # 这里应该实现API配置的更新逻辑
        # 由于配置管理器可能是只读的，我们只返回成功消息
        
        log.info(f"API配置已更新: {config}")
        return {
            "success": True,
            "message": "API配置已更新",
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"更新API配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to update API config: {str(e)}")

# 从Ollama获取可用模型列表
def get_ollama_models():
    """从Ollama服务获取可用模型列表"""
    try:
        # 使用Ollama配置的基础URL
        ollama_base_url = config_manager.ollama.base_url
        tags_url = f"{ollama_base_url}/api/tags"
        
        logger.info(f"正在请求Ollama服务获取模型列表: {tags_url}")
        response = requests.get(tags_url, timeout=10)
        
        if response.status_code == 200:
            data = response.json()
            if 'models' in data and data['models']:
                return data['models']
            else:
                logger.warning("Ollama服务中没有找到任何模型")
        else:
            logger.error(f"请求Ollama服务失败！状态码: {response.status_code}")
    except requests.exceptions.ConnectionError:
        logger.error("无法连接到Ollama服务")
    except Exception as e:
        logger.error(f"获取Ollama模型列表时发生错误: {str(e)}")
    
    return []

# 获取模型配置列表
@router.get("/models", response_model=List[ModelConfig])
async def get_model_configs(
    trace_id: str = Depends(get_trace_id)
):
    """获取模型配置列表"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("获取模型配置列表")
    
    try:
        # 获取Ollama可用模型列表
        ollama_models = get_ollama_models()
        
        # 转换为前端需要的ModelConfig格式
        model_configs = []
        
        if ollama_models:
            for i, model in enumerate(ollama_models):
                model_name = model.get('name')
                model_id = model.get('model', model_name)
                
                # 检查是否是默认模型（第一个模型设为默认）
                is_default = i == 0
                
                # 检查是否是当前配置的模型
                if model_id == config_manager.ollama.model:
                    is_default = True
                
                model_config = ModelConfig(
                    id=model_id,
                    name=model_name,
                    modelId=model_id,
                    temperature=0.7,  # 默认温度
                    maxTokens=4096,  # 默认最大token数
                    topP=1.0,
                    frequencyPenalty=0.0,
                    presencePenalty=0.0,
                    isDefault=is_default,
                    type="ollama"
                )
                
                model_configs.append(model_config)
        else:
            # 如果没有从Ollama获取到模型，返回默认模型配置
            model_configs = [
                ModelConfig(
                    id="qwen2.5:7b",
                    name="Qwen2.5-7B-Instruct",
                    modelId="qwen2.5:7b",
                    temperature=0.7,
                    maxTokens=4096,
                    topP=1.0,
                    frequencyPenalty=0.0,
                    presencePenalty=0.0,
                    isDefault=True,
                    type="ollama"
                )
            ]
        
        return model_configs
    except Exception as e:
        log.error(f"获取模型配置列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to get model configs: {str(e)}")

# 添加或更新模型配置
@router.post("/models")
async def save_model_config(
    config: ModelConfig,
    trace_id: str = Depends(get_trace_id)
):
    """添加或更新模型配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"保存模型配置: {config.name}")
    
    try:
        # 生成ID（如果是新模型）
        if not config.id:
            config.id = config.modelId
        
        # 如果设置为默认模型，需要更新Ollama配置
        if config.isDefault:
            # 更新配置管理器中的Ollama模型设置
            config_manager.ollama.model = config.modelId
            logger.info(f"已将Ollama默认模型设置为: {config.modelId}")
        
        return {
            "success": True,
            "message": "模型配置已保存",
            "modelConfig": config,
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"保存模型配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to save model config: {str(e)}")

# 删除模型配置
@router.delete("/models/{model_id}")
async def delete_model_config(
    model_id: str,
    trace_id: str = Depends(get_trace_id)
):
    """删除模型配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"删除模型配置: {model_id}")
    
    try:
        # 检查是否是当前使用的默认模型
        if model_id == config_manager.ollama.model:
            raise HTTPException(status_code=400, detail="不能删除当前正在使用的默认模型")
        
        return {
            "success": True,
            "message": "模型配置已删除",
            "trace_id": trace_id
        }
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        log.error(f"删除模型配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to delete model config: {str(e)}")

# 设置默认模型
@router.put("/models/{model_id}/default")
async def set_default_model(
    model_id: str,
    trace_id: str = Depends(get_trace_id)
):
    """设置默认模型"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"设置默认模型: {model_id}")
    
    try:
        # 更新配置管理器中的Ollama模型设置
        config_manager.ollama.model = model_id
        logger.info(f"已将Ollama默认模型设置为: {model_id}")
        
        return {
            "success": True,
            "message": "默认模型已设置",
            "defaultModelId": model_id,
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"设置默认模型失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to set default model: {str(e)}")

# 获取RAG配置
@router.get("/rag")
async def get_rag_config(
    trace_id: str = Depends(get_trace_id)
):
    """获取RAG配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("获取RAG配置")
    
    try:
        return {
            "enabled": config_manager.rag.enabled,
            "embeddingModel": "all-MiniLM-L6-v2",  # 默认嵌入模型
            "topK": config_manager.rag.top_k,
            "scoreThreshold": config_manager.rag.score_threshold,
            "chunkSize": config_manager.rag.chunk_size,
            "chunkOverlap": config_manager.rag.chunk_overlap,
            "maxContextSize": 4096
        }
    except Exception as e:
        log.error(f"获取RAG配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to get RAG config: {str(e)}")

# 更新RAG配置
@router.put("/rag")
async def update_rag_config(
    config: Dict[str, Any],
    trace_id: str = Depends(get_trace_id)
):
    """更新RAG配置"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("更新RAG配置")
    
    try:
        # 这里应该实现RAG配置的更新逻辑
        # 由于配置管理器可能是只读的，我们只返回成功消息
        
        log.info(f"RAG配置已更新: {config}")
        return {
            "success": True,
            "message": "RAG配置已更新",
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"更新RAG配置失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to update RAG config: {str(e)}")