from fastapi import APIRouter, UploadFile, File, HTTPException, Depends, Request, Body
from pydantic import BaseModel
from typing import Optional, Any, Dict
from fastapi.responses import StreamingResponse
from utils.file_handler import save_temp_file, remove_temp_file
from models.dependencies import get_dependencies
from utils.logger import setup_logger

# 假设ModelHandler和ModelConfig定义在你的代码中
from data_handler.data_model_process import ModelHandler
from config import ModelConfig  # 替换为实际模块路径

logger = setup_logger()

router_ollama = APIRouter(prefix="/ollama", tags=["Ollama"])


# 模型配置
class OllamaChatConfig(BaseModel):
    model_name: str = "qwen2.5:7b"
    model: str = "ollama"
    temperature: float = 0.7
    max_tokens: int = 512
    stream: bool = False  # 添加stream字段以支持流式控制

class OllamaVisionConfig(BaseModel):
    model_name: str = "llama3.2-vision"
    model: str = "ollama"
    temperature: float = 0.7
    max_tokens: int = 1024
    stream: bool = False  # 添加stream字段以支持流式控制

# 请求体模型
class ChatRequest(BaseModel):
    prompt: str
    config: Optional[OllamaChatConfig] = OllamaChatConfig()

class VisionRequest(BaseModel):
    prompt: str
    images: Optional[list[str]] = None  # 假设Vision模型需要图像路径
    config: Optional[OllamaVisionConfig] = OllamaVisionConfig()


# Ollama Chat 路由（非流式）
@router_ollama.post("/chat")
async def ollama_chat_endpoint(
    request: ChatRequest,
    deps: Dict = Depends(get_dependencies)
):
    """
    处理Ollama Chat模型的非流式请求。
    """
    try:
        handler = ModelHandler()
        # logger.debug(f"Ollama Chat请求: {handler}")
        # 转换OllamaChatConfig为ModelConfig（假设ModelConfig兼容）
        config = ModelConfig(
            model = request.config.model,
            model_name=request.config.model_name,
            temperature=request.config.temperature,
            max_tokens=request.config.max_tokens,
            stream=request.config.stream
        )
        logger.debug(f"Ollama Chat请求 配置: {config}")

        result = await handler.call_model(
            model=request.config.model,
            model_type="ollama_chat",
            data=request.prompt,
            streaming=False,  # 强制非流式
            config=config,
            dependencies=deps
        )
        logger.debug(f"Ollama Chat请求 结果: {result}")
        return result
    except Exception as e:
        logger.error(f"Ollama Chat处理失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"处理失败: {str(e)}")

# Ollama Vision 路由（非流式，示例）
@router_ollama.post("/vision")
async def ollama_vision_endpoint(
    request: VisionRequest,
    deps: Dict = Depends(get_dependencies)
):
    """
    处理Ollama Vision模型的非流式请求。
    """
    try:
        handler = ModelHandler()
        # 转换OllamaVisionConfig为ModelConfig
        config = ModelConfig(
            model = request.config.model,
            model_name=request.config.model_name,
            temperature=request.config.temperature,
            max_tokens=request.config.max_tokens,
            stream=request.config.stream
        )
        # 如果有图像，将其包含在data中（假设ModelHandler支持）
        data = {"prompt": request.prompt, "images": request.images or []}
        result = await handler.call_model(
            model=request.config.model,
            model_type="ollama_vision",
            data=data,
            streaming=False,  # 强制非流式
            config=config,
            dependencies=deps
        )
        return {"response": result}
    except Exception as e:
        logger.error(f"Ollama Vision处理失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"处理失败: {str(e)}")