from openai import OpenAI
from dotenv import load_dotenv
import os

load_dotenv()

client = OpenAI(
	base_url="https://ai.gitee.com/v1",
	api_key=os.getenv("api_key"),
)
stream_mode = True  # 是否启用流式输出

response = client.completions.create(
    prompt="Can you please let us know more details about your request?", # 修改为实际提示词
    model="Qwen2.5-14B-Instruct",
    stream=stream_mode,
    max_tokens=512,
    temperature=0.7,
    top_p=0.7,
)

if stream_mode:
    print("Begin streaming response output:")
    full_text = ""
    for chunk in response:
        if chunk.choices[0].text:
            text_chunk = chunk.choices[0].text
            full_text += text_chunk
            print(text_chunk, end="", flush=True) 
    print("\n\nFull response content:")
    print(full_text)
else:
    print("Full response content:")
    print(response.choices[0].text.strip())