import json

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import requests

# 初始化 FastAPI 应用
app = FastAPI()

# Ollama 的基础 URL
OLLAMA_API_URL = "http://localhost:11434/api/"  # Ollama 的默认地址


# 定义请求体模型（兼容 OpenAI 格式）
class CompletionRequest(BaseModel):
    model: str
    prompt: str
    max_tokens: int = 100
    temperature: float = 0.7


class ChatCompletionRequest(BaseModel):
    model: str
    messages: list
    max_tokens: int = 100
    temperature: float = 0.7


# 定义请求体模型（兼容 OpenAI 格式）
class EmbeddingRequest(BaseModel):
    model: str
    input: str


# 文本补全接口
@app.post("/v1/completions")
async def completions(request: CompletionRequest):
    try:
        # 构造 Ollama 的请求体
        ollama_data = {
            "model": request.model,
            "prompt": request.prompt,
            "options": {
                "max_tokens": request.max_tokens,
                "temperature": request.temperature
            }
        }
        # 调用 Ollama 的接口
        response = requests.post(OLLAMA_API_URL + "generate", json=ollama_data)
        response.raise_for_status()
        text = json.loads(response.text)['response']
        # 返回 OpenAI 兼容的响应
        return {
            "id": "ollama-completion-id",  # 模拟 OpenAI 的 ID
            "object": "text_completion",
            "created": 1677649420,  # 模拟时间戳
            "model": request.model,
            "choices": [
                {
                    "text": text,
                    "index": 0,
                    "logprobs": None,
                    "finish_reason": "length"  # 模拟结束原因
                }
            ],
            "usage": {
                "prompt_tokens": 0,  # 模拟 token 计数
                "completion_tokens": 0,
                "total_tokens": 0
            }
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


# 聊天接口
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    try:
        # print(request.json())
        # 将 messages 转换为 Ollama 的 prompt 格式
        prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in request.messages])
        # 构造 Ollama 的请求体
        ollama_data = {
            "model": request.model,
            "messages": [
                {
                    "role": "user",
                    "content": "为什么草是绿的？"
                }
            ],
            "options": {
                "max_tokens": request.max_tokens,
                "temperature": request.temperature
            },
            "stream": False
        }
        # 调用 Ollama 的接口
        response = requests.post(OLLAMA_API_URL + "chat", json=ollama_data)
        response.raise_for_status()
        content = response.json().get("message").get("content")
        # 返回 OpenAI 兼容的响应
        return {
            "id": "ollama-chat-id",  # 模拟 OpenAI 的 ID
            "object": "chat.completion",
            "created": 1677649420,  # 模拟时间戳
            "model": request.model,
            "choices": [
                {
                    "message": {
                        "role": "assistant",
                        "content": content
                    },
                    "index": 0,
                    "finish_reason": "stop"  # 模拟结束原因
                }
            ],
            "usage": {
                "prompt_tokens": 0,  # 模拟 token 计数
                "completion_tokens": 0,
                "total_tokens": 0
            }
        }
    except Exception as e:
        print(e)
        raise HTTPException(status_code=500, detail=str(e))


# 获取 Ollama 支持的模型列表
def get_ollama_models():
    try:
        response = requests.get(f"{OLLAMA_API_URL}/tags")
        response.raise_for_status()
        models = response.json().get("models", [])
        return [
            {"id": model["name"], "object": "model", "owned_by": "ollama"}
            for model in models
        ]
    except Exception as e:
        print(f"Failed to fetch models from Ollama: {e}")
        return []


# 模型列表接口
@app.get("/v1/models")
async def list_models():
    # 从 Ollama 获取支持的模型列表
    models = get_ollama_models()
    return {
        "object": "list",
        "data": models
    }


@app.post("/v1/embeddings")
async def embeddings(request: EmbeddingRequest):
    try:
        # 构造 Ollama 的请求体
        ollama_data = {
            "model": request.model,
            "input": request.input,
            "stream": False
        }
        print(ollama_data)
        # 调用 Ollama 的接口
        response = requests.post(OLLAMA_API_URL + "embed", json=ollama_data)
        # 假设 Ollama 返回的向量在 response["embedding"] 中
        embedding = json.loads(response.content)['embeddings']
        # 返回 OpenAI 兼容的响应
        return {
            "object": "list",
            "data": [
                {
                    "object": "embedding",
                    "embedding": embedding,
                    "index": 0
                }
            ],
            "model": request.model,
            "usage": {
                "prompt_tokens": 0,  # 模拟 token 计数
                "total_tokens": 0
            }
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


# 健康检查接

# 健康检查接口
@app.get("/health")
async def health():
    return {"status": "ok"}
