from fastapi import FastAPI
from pydantic import BaseModel
import ollama  # 假设 Ollama 提供了 Python 客户端

app = FastAPI()

model_name = 'llama3.2:latest'


class QueryRequest(BaseModel):
    prompt: str

# 定义请求体模型
class CompletionRequest(BaseModel):
    prompt: str
    model: str = "text-davinci-003"
    max_tokens: int = 100
    temperature: float = 0.7

class ChatRequest(BaseModel):
    messages: list
    model: str = "gpt-3.5-turbo"
    max_tokens: int = 100
    temperature: float = 0.7


@app.post("/generate")
async def generate_text(request: QueryRequest):
    # 调用 Ollama 模型生成文本
    # response = ollama.generate(prompt=request.prompt)
    print("111111111111111")
    response = ollama.generate(model=model_name, prompt=request.prompt)
    return {"response": response}


@app.post("/v1/chat/completions")
async def generate_text2(request: ChatRequest):
    # 调用 Ollama 模型生成文本
    # response = ollama.generate(prompt=request.prompt)
    print("111111111111111")
    response = ollama.generate(model=model_name, prompt=request.prompt)
    return {"response": response}

@app.post("/v1/embeddings")
async def embeddings(request: QueryRequest):
    # 调用 Ollama 模型生成项目
    response = ollama.embeddings(model=model_name, prompt=request.prompt)
    return {"response": response}

# 运行服务：uvicorn main:app --reload
