import requests
import json

"""
windows电脑上使用ollama的步骤：
  1.下载Windows版本的Ollama，如ollama下载在本地的路径为：D:/programFile/llama-b5937-bin-win-cpu-x64。
    并在Ollama文件夹下载创建models/7B文件夹下
  2.下载一个支持ollama运行的大模型（文件名后缀为.gguf，如D:/ideaSpace/MyPython/models/TinyLlama-1.1B-Chat-v1.0-GGUF/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf）
  3.然后把后缀名为.gguf格式的大模型文件放到Ollama文件夹下的models/7B文件夹下，并把名称修改为ggml-model-f16.gguf
  4.最后运行Ollama文件夹下llama-server.exe，当看到server is listening on http://127.0.0.1:8080就说明Ollama服务已经启动成功，
    可以在浏览器上用http://127.0.0.1:8080访问
在命令行中执行curl http://localhost:8080/api/tags | ConvertFrom-Json 可看到可用的模型信息，如：
models                                                                                                                                                                               
------                                                                                                                                                                               
{@{name=models/7B/ggml-model-f16.gguf; model=models/7B/ggml-model-f16.gguf; modified_at=; size=; digest=; type=model;...
"""


def test_llamafile_direct_api():
    """直接使用HTTP API测试llamafile"""
    print("=== 直接API测试 ===")

    try:
        # 测试completions端点
        response = requests.post(
            "http://localhost:8080/v1/completions",
            json={
                "model": "models/7B/ggml-model-f16.gguf",
                "prompt": "百合花是来源自哪个国家?",
                "temperature": 0.1,
                "max_tokens": 100
            },
            timeout=60
        )

        if response.status_code == 200:
            result = response.json()
            print("✅ Completions API 成功:")
            print(result['choices'][0]['text'])
        else:
            print(f"Completions API 失败: {response.status_code}")
            print(response.text)

    except Exception as e:
        print(f"Completions API 错误: {e}")


def test_chat_completions():
    """测试chat completions端点"""
    print("\n=== 测试Chat Completions ===")

    try:
        response = requests.post(
            "http://localhost:8080/v1/chat/completions",
            json={
                "model": "models/7B/ggml-model-f16.gguf",
                "messages": [
                    {"role": "user", "content": "百合花是来源自哪个国家?"}
                ],
                "temperature": 0.1,
                "max_tokens": 100
            },
            timeout=60
        )

        if response.status_code == 200:
            result = response.json()
            print("✅ Chat Completions 成功:")
            print(result['choices'][0]['message']['content'])
        else:
            print(f"Chat Completions 失败: {response.status_code}")
            print(response.text)

    except Exception as e:
        print(f"Chat Completions 错误: {e}")


def test_v1_completions():
    base_url = "http://localhost:8080/v1"

    # 首先获取可用模型
    models_response = requests.get(base_url + "/models")
    if models_response.status_code == 200:
        models_data = models_response.json()
        if models_data.get('data'):
            model_id = models_data['data'][0]['id']
            print(f"使用模型: {model_id}")

            # 测试 completions 接口
            completions_payload = {
                "model": model_id,
                "prompt": "百合花是来源自哪个国家?",
                "max_tokens": 100,
                "temperature": 0.1
            }

            try:
                response = requests.post(
                    base_url + "/completions",
                    json=completions_payload,
                    timeout=30
                )
                print(f"Completions 状态: {response.status_code}")
                if response.status_code == 200:
                    result = response.json()
                    print("响应:")
                    print(json.dumps(result, indent=2, ensure_ascii=False))
                    if 'choices' in result and result['choices']:
                        answer = result['choices'][0]['text']
                        print(f"\n回答: {answer}")
                else:
                    print(f"错误: {response.text}")
            except Exception as e:
                print(f"Completions 调用失败: {e}")

            # 测试 chat/completions 接口
            print("\n" + "="*50)
            print("测试 Chat Completions:")
            chat_payload = {
                "model": model_id,
                "messages": [
                    {"role": "user", "content": "百合花是来源自哪个国家?"}
                ],
                "max_tokens": 100,
                "temperature": 0.1
            }

            try:
                response = requests.post(
                    base_url + "/chat/completions",
                    json=chat_payload,
                    timeout=30
                )
                print(f"Chat Completions 状态: {response.status_code}")
                if response.status_code == 200:
                    result = response.json()
                    print("响应:")
                    print(json.dumps(result, indent=2, ensure_ascii=False))
                else:
                    print(f"错误: {response.text}")
            except Exception as e:
                print(f"Chat Completions 调用失败: {e}")


test_v1_completions()

# 运行测试
test_llamafile_direct_api()
test_chat_completions()
