"""Ollama模型管理接口实现"""

from fastapi import APIRouter, HTTPException, Query, Depends
from fastapi.responses import StreamingResponse
from typing import List, Dict, Any, Optional, Generator
import logging
import requests
import json
from pydantic import BaseModel

from backend.common.config import config_manager
from backend.logger_setup import get_logger_with_trace_id
from backend.middleware.trace_middleware import get_trace_id
from backend.llm import llm_manager
from backend.database.db import db_manager


# 定义请求模型
class MessageItem(BaseModel):
    role: str
    content: str


class ChatRequest(BaseModel):
    session_id: str
    # 兼容单消息与多消息两种格式
    message: Optional[str] = None
    messages: Optional[List[MessageItem]] = None
    stream: Optional[bool] = None

logger = logging.getLogger('api.ollama')

router = APIRouter(
    prefix=f"/{config_manager.api.version}/llm/ollama",
    tags=["Ollama模型管理"],
    responses={404: {"description": "Not found"}},
)

class OllamaManager:
    """Ollama模型管理器，负责处理Ollama模型的读取和切换"""
    
    @staticmethod
    def get_available_models() -> List[Dict[str, Any]]:
        """获取Ollama中可用的模型列表"""
        try:
            # 从配置中获取Ollama的base_url
            ollama_config = config_manager.llm_providers.ollama
            base_url = ollama_config.base_url
            
            # 发送请求到Ollama的/api/tags端点
            url = f"{base_url}/api/tags"
            response = requests.get(url, timeout=ollama_config.timeout)
            
            # 检查响应状态
            if response.status_code != 200:
                logger.error(f"获取Ollama模型列表失败，状态码: {response.status_code}")
                raise HTTPException(
                    status_code=500,
                    detail=f"获取Ollama模型列表失败，状态码: {response.status_code}"
                )
            
            # 解析响应
            data = response.json()
            models = data.get('models', [])
            
            # 格式化模型列表
            formatted_models = []
            for model in models:
                formatted_models.append({
                    "name": model.get('name', ''),
                    "model": model.get('model', ''),
                    "modified_at": model.get('modified_at', ''),
                    "size": model.get('size', 0),
                    "digest": model.get('digest', '')
                })
            
            logger.info(f"成功获取Ollama模型列表，共 {len(formatted_models)} 个模型")
            return formatted_models
        except requests.exceptions.ConnectionError:
            logger.error(f"无法连接到Ollama服务: {base_url}")
            raise HTTPException(
                status_code=503,
                detail=f"无法连接到Ollama服务: {base_url}"
            )
        except Exception as e:
            logger.error(f"获取Ollama模型列表时发生异常: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"获取Ollama模型列表时发生异常: {str(e)}"
            )
    
    @staticmethod
    def switch_model(model_name: str, session_id: Optional[str] = None) -> Dict[str, str]:
        """切换当前使用的Ollama模型"""
        try:
            # 验证模型名称不为空
            if not model_name:
                raise HTTPException(status_code=400, detail="模型名称不能为空")
            
            # 获取Ollama配置
            ollama_config = config_manager.llm_providers.ollama
            
            # 检查模型是否存在
            available_models = OllamaManager.get_available_models()
            model_exists = any(model['name'] == model_name for model in available_models)
            
            if not model_exists:
                logger.warning(f"请求的模型 {model_name} 不在Ollama的可用模型列表中")
                # 我们仍然尝试设置，因为可能是一个新下载的模型或者用户知道自己在做什么
            
            # 如果提供了session_id，则只切换该会话的模型
            if session_id:
                try:
                    db_manager.set_session_model(session_id, model_name)
                except Exception as db_error:
                    logger.error(f"保存会话模型映射失败: {db_error}")
                    raise HTTPException(status_code=500, detail="保存会话模型失败")
                
                logger.info(f"切换会话 {session_id} 的模型为: {model_name}")
                return {
                    "success": True,
                    "message": f"已切换会话 {session_id} 的模型为: {model_name}",
                    "model": model_name,
                    "type": "session",
                    "session_id": session_id
                }
            else:
                # 切换全局默认模型
                # 注意：修改配置管理器的配置不会持久化，只在当前运行时有效
                # 持久化需要修改配置文件
                original_model = ollama_config.model
                ollama_config.model = model_name
                
                # 重置llm_manager中的Ollama提供商实例，使其使用新模型
                if 'ollama' in llm_manager.providers:
                    del llm_manager.providers['ollama']
                
                # 预加载新模型以确保可用性
                try:
                    llm_provider = llm_manager.get_provider('ollama')
                    if llm_provider.load_model():
                        logger.info(f"成功切换全局默认Ollama模型: {original_model} -> {model_name}")
                    else:
                        logger.warning(f"切换模型成功，但加载新模型时出现问题")
                except Exception as e:
                    logger.error(f"加载新模型时发生异常: {str(e)}")
                
                return {
                    "success": True,
                    "message": f"已切换全局默认模型为: {model_name}",
                    "model": model_name,
                    "type": "global",
                    "previous_model": original_model
                }
        except HTTPException as e:
            raise e
        except Exception as e:
            logger.error(f"切换Ollama模型时发生异常: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"切换Ollama模型时发生异常: {str(e)}"
            )
    
    @staticmethod
    def get_current_model(session_id: Optional[str] = None) -> Dict[str, str]:
        """获取当前使用的Ollama模型"""
        try:
            # 如果提供了session_id，则返回该会话的模型
            if session_id:
                mapped_model = db_manager.get_session_model(session_id)
                if mapped_model:
                    return {
                        "model": mapped_model,
                        "type": "session",
                        "session_id": session_id
                    }
                ollama_config = config_manager.llm_providers.ollama
                return {
                    "model": ollama_config.model,
                    "type": "global_fallback",
                    "session_id": session_id
                }
            else:
                # 返回全局默认模型
                ollama_config = config_manager.llm_providers.ollama
                return {
                    "model": ollama_config.model,
                    "type": "global"
                }
        except Exception as e:
            logger.error(f"获取当前Ollama模型时发生异常: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"获取当前Ollama模型时发生异常: {str(e)}"
            )

@router.get("/models", summary="获取Ollama可用模型列表")
async def get_ollama_models():
    """
    获取可用的Ollama模型列表，返回模型名称数组
    """
    try:
        ollama_config = config_manager.llm_providers.ollama
        timeout = getattr(ollama_config, "timeout", 10)
        base_url = ollama_config.base_url.rstrip("/")
        tags_url = f"{base_url}/api/tags"
        
        logger.info(f"正在从 {tags_url} 获取模型列表")
        response = requests.get(tags_url, timeout=timeout)
        response.raise_for_status()
        
        data = response.json() if response.content else {}
        raw_models = data.get("models", [])
        model_names = [
            item.get("name")
            for item in raw_models
            if isinstance(item, dict) and item.get("name")
        ]
        
        logger.info(f"成功获取到 {len(model_names)} 个模型")
        return {"models": model_names}
    except requests.exceptions.RequestException as request_error:
        logger.error(f"获取Ollama模型列表失败: {request_error}")
        raise HTTPException(status_code=503, detail=f"无法获取Ollama模型列表: {request_error}")
    except Exception as e:
        logger.error(f"获取模型列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取模型列表失败: {str(e)}")

@router.post("/use-model", summary="切换Ollama模型")
async def use_ollama_model(
    model: str,
    session_id: Optional[str] = Query(None, description="可选的会话ID，指定后只切换该会话的模型")
):
    """
    切换当前使用的Ollama模型
    
    - **model**: 要切换的模型名称
    - **session_id**: 可选，指定后只切换该会话的模型
    
    返回值：
    - success: 是否成功
    - message: 操作消息
    - model: 当前使用的模型
    - type: 切换类型（global或session）
    - previous_model: 切换前的模型（仅全局切换时返回）
    """
    try:
        result = OllamaManager.switch_model(model, session_id)
        return result
    except HTTPException as e:
        raise e
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.get("/current-model", summary="获取当前使用的Ollama模型")
async def get_current_ollama_model(
    session_id: Optional[str] = Query(None, description="可选的会话ID，指定后返回该会话的模型")
):
    """
    获取当前使用的Ollama模型
    
    - **session_id**: 可选，指定后返回该会话的模型
    
    返回值：
    - success: 是否成功
    - model: 当前使用的模型
    - type: 模型类型（global或session）
    """
    try:
        result = OllamaManager.get_current_model(session_id)
        return {
            "success": True,
            **result
        }
    except HTTPException as e:
        raise e


@router.post("/chat", summary="Ollama聊天接口")
async def ollama_chat(
    request: ChatRequest,
    trace_id: str = Depends(get_trace_id)
):
    """
    Ollama直连聊天接口，以SSE流返回响应
    
    - **session_id**: 会话ID
    - **messages**: 消息列表
    - **stream**: 是否使用流式响应（固定为True）
    
    返回SSE格式的流式响应，格式为：data: {json}\n\n
    响应字段说明：
    - **data**: JSON字符串，包含以下字段：
      - **type**: chunk/done/error
      - **content**: 内容文本
      - **trace_id**: 跟踪ID
    """
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"接收到Ollama聊天请求: session_id={request.session_id}")
    
    # 确保使用流式响应
    request.stream = True
    
    # 获取Ollama配置
    ollama_config = config_manager.llm_providers.ollama
    
    # 构建转发到Ollama的请求数据
    def generate_events():
        try:
            # 准备消息列表
            if request.messages:
                # 直接使用传入的messages
                messages = request.messages
            elif request.message:
                # 如果传入的是单条消息，转换为messages格式
                messages = [{
                    "role": "user",
                    "content": request.message
                }]
            else:
                error_data = json.dumps({
                    "type": "error",
                    "content": "消息不能为空",
                    "trace_id": trace_id
                }, ensure_ascii=False)
                yield f"data: {error_data}\n\n"
                return
            
            # 构建Ollama请求参数
            ollama_params = {
                "model": ollama_config.model,
                "messages": messages,
                "stream": True,
                "options": {
                    "temperature": 0.7,
                    "top_p": 0.95,
                    "num_predict": 4096,
                }
            }
            
            # 发送请求到Ollama API
            ollama_chat_url = f"{ollama_config.base_url}/api/chat"
            log.debug(f"转发请求到Ollama: {ollama_chat_url}")
            
            with requests.post(ollama_chat_url, json=ollama_params, stream=True, timeout=ollama_config.timeout) as response:
                if response.status_code != 200:
                    error_content = response.text
                    log.error(f"Ollama API调用失败: {response.status_code}, {error_content}")
                    error_data = json.dumps({
                        "type": "error",
                        "content": f"Ollama服务错误: {response.status_code}",
                        "trace_id": trace_id
                    }, ensure_ascii=False)
                    yield f"data: {error_data}\n\n"
                    return
                
                # 处理流式响应
                for line in response.iter_lines():
                    if line:
                        try:
                            line_str = line.decode('utf-8')
                            ollama_response = json.loads(line_str)
                            
                            # 提取内容并发送到客户端
                            if "message" in ollama_response and "content" in ollama_response["message"]:
                                content = ollama_response["message"]["content"]
                                if content:
                                    event_data = json.dumps({
                                        "type": "chunk",
                                        "content": content,
                                        "trace_id": trace_id
                                    }, ensure_ascii=False)
                                    yield f"data: {event_data}\n\n"
                            
                            # 检查是否完成
                            if ollama_response.get("done", False):
                                break
                        except json.JSONDecodeError as e:
                            log.error(f"解析Ollama响应失败: {str(e)}, 原始数据: {line}")
                            continue
            
            # 发送完成标志
            complete_data = json.dumps({
                "type": "done",
                "content": "",
                "trace_id": trace_id
            }, ensure_ascii=False)
            yield f"data: {complete_data}\n\n"
            
        except Exception as e:
            log.error(f"Ollama聊天处理失败: {str(e)}")
            error_data = json.dumps({
                "type": "error",
                "content": f"内部错误: {str(e)}",
                "trace_id": trace_id
            }, ensure_ascii=False)
            yield f"data: {error_data}\n\n"
    
    # 返回StreamingResponse
    return StreamingResponse(
        generate_events(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive"
        }
    )

@router.get("/health", summary="检查Ollama服务健康状态")
async def check_ollama_health():
    """
    检查Ollama服务的健康状态
    
    返回值：
    - success: 是否健康
    - status: 状态信息
    - version: Ollama版本（如果可用）
    """
    try:
        # 从配置中获取Ollama的base_url
        ollama_config = config_manager.llm_providers.ollama
        base_url = ollama_config.base_url
        
        # 发送请求到Ollama的/api/tags端点来检查健康状态
        url = f"{base_url}/api/tags"
        response = requests.get(url, timeout=ollama_config.timeout)
        
        if response.status_code == 200:
            # 尝试获取版本信息（Ollama API没有直接提供版本端点，这里只是演示）
            version = "unknown"
            
            logger.info(f"Ollama服务健康检查通过: {base_url}")
            return {
                "success": True,
                "status": "healthy",
                "version": version,
                "base_url": base_url
            }
        else:
            logger.error(f"Ollama服务健康检查失败，状态码: {response.status_code}")
            return {
                "success": False,
                "status": "unhealthy",
                "error": f"状态码: {response.status_code}",
                "base_url": base_url
            }
    except requests.exceptions.ConnectionError:
        logger.error(f"无法连接到Ollama服务: {base_url}")
        return {
            "success": False,
            "status": "unreachable",
            "error": "无法连接到Ollama服务",
            "base_url": base_url
        }
    except Exception as e:
        logger.error(f"Ollama服务健康检查时发生异常: {str(e)}")
        return {
            "success": False,
            "status": "error",
            "error": str(e),
            "base_url": base_url
        }


@router.post("/refresh-models", summary="刷新Ollama模型列表")
async def refresh_ollama_models():
    """
    刷新Ollama模型列表
    
    返回值：
    - success: 是否成功
    - message: 操作消息
    - models: 更新后的模型列表（可选）
    """
    try:
        # 从Ollama API获取最新的模型列表
        ollama_config = config_manager.llm_providers.ollama
        base_url = ollama_config.base_url
        tags_url = f"{base_url}/api/tags"
        
        logger.info(f"正在从{tags_url}刷新模型列表")
        
        response = requests.get(tags_url, timeout=ollama_config.timeout)
        
        # 检查响应状态
        if response.status_code != 200:
            logger.error(f"刷新Ollama模型列表失败，状态码: {response.status_code}")
            raise HTTPException(
                status_code=500,
                detail=f"刷新Ollama模型列表失败，状态码: {response.status_code}"
            )
        
        data = response.json()
        
        # 格式化模型列表
        models = []
        if "models" in data:
            for model in data["models"]:
                # 提取模型名称（去掉tag部分）
                model_id = model.get("name", "")
                if ":" in model_id:
                    model_name = model_id.split(":")[0]
                else:
                    model_name = model_id
                
                models.append({
                    "name": model_name,
                    "model": model_id,
                    "id": model_id,
                    "modified_at": model.get("modified_at", ""),
                    "size": model.get("size", 0),
                    "digest": model.get("digest", "")
                })
        
        logger.info(f"成功刷新模型列表，共获取到{len(models)}个模型")
        
        return {
            "success": True,
            "message": f"成功刷新模型列表，共 {len(models)} 个模型",
            "models": models,
            "count": len(models)
        }
    except requests.exceptions.ConnectionError:
        logger.error(f"无法连接到Ollama服务: {base_url}")
        raise HTTPException(
            status_code=503,
            detail=f"无法连接到Ollama服务: {base_url}"
        )
    except HTTPException as e:
        raise e
    except Exception as e:
        logger.error(f"刷新Ollama模型列表时发生异常: {str(e)}")
        raise HTTPException(status_code=500, detail=f"刷新Ollama模型列表失败: {str(e)}")
