import json

import requests

OLLAMA_API_URL = "http://localhost:11434/api/"  # 这里填写 Ollama 的 API 地址，默认为 http://localhost:11434/api/generate "  # Ollama 的默认地址


# 文本生成(回答补全)
def generate():
    ollama_data = {
        "model": "llama3.2:latest",
        "prompt": "您好",
        "stream": False
    }
    res = requests.post(OLLAMA_API_URL + "generate", json=ollama_data)
    print(res.json().get("response"))


# 对话补全
def chat():
    ollama_data = {
        "model": "llama3.2:latest",
        "messages": [
            {
              "role": "user",
              "content": "为什么草是绿的？"
            }
         ],
        "stream": False
    }
    res = requests.post(OLLAMA_API_URL + "chat", json=ollama_data)
    print(res.json().get("message").get("content"))



def embeddings():
    ollama_data = {
        "model": "llama3.2:latest",
        "input": "为什么草是绿的?",
        "stream": False
    }
    res = requests.post(OLLAMA_API_URL + "embed", json=ollama_data)
    print(json.loads(res.content)['embeddings'])


def get_ollama_models():
    try:
        response = requests.get(f"{OLLAMA_API_URL}/tags")
        response.raise_for_status()
        models = response.json().get("models", [])
        print(models)
        return [
            {"id": model["name"], "object": "model", "owned_by": "ollama"}
            for model in models
        ]
    except Exception as e:
        print(f"Failed to fetch models from Ollama: {e}")
        return []
def list_models():
    res = requests.get(OLLAMA_API_URL + "tags")
    print(json.loads(res.content))


if __name__ == '__main__':
    # embeddings()
   # list_models()
   # get_ollama_models()
    chat()
    #generate()