

# https://github.com/ultrasev/llmproxy?tab=readme-ov-file
# https://llmapi.ultrasev.com/v2/gemini

from openai import OpenAI

client = OpenAI(
api_key = "",
base_url = "https://llmapi.fanyz.top/v2/openai"
# base_url = "https://llmapi.zibianqu.workers.dev/v2/openai"
)

response = client.chat.completions.create(
    model="llama-13b-chat",
    messages=[
        {"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
        {"role": "user", "content": "Who were the founders of Microsoft?"}
    ]

)

#print(response)
print(response.model_dump_json(indent=2))
print(response.choices[0].message.content)
 