from openai import OpenAI
import sys

# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
# 这个是Qwen2.5-1.5B-Instruct
openai_api_base = "http://localhost:8000/v1"
model_name = '/root/yuehu/assets/Qwen2.5-1.5B-Instruct'

# 这个是Qwen2.5-7B-Instruct-AWQ
openai_api_base = "http://localhost:8081/v1"
model_name = '/root/yuehu/assets/Qwen2.5-7B-Instruct-AWQ'

client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)

# Create a chat completion with stream=True for streaming response
response = client.chat.completions.create(
    model=model_name,
    messages=[
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": "介绍一下阿里巴巴"},
    ],
    temperature=0.7,
    top_p=0.8,
    max_tokens=512,
    extra_body={
        "repetition_penalty": 1.05,
    },
    stream=True  # Enable streaming
)

# Iterate through the stream of events
for chunk in response:
    # Each chunk contains a part of the message or an indication that the stream has ended
    if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
        content = chunk.choices[0].delta.content
        if content is not None:
            # Print each part of the message as it arrives
            print(content, end='', flush=True)

print('\n')
