from openai import OpenAI
import time

class VLLMClient:
    def __init__(self,base_url='http://127.0.0.1:8000/v1'):
        self.client=OpenAI(
            base_url=base_url,
            api_key="anything"
        )
    def generate(self,model_name,prompt,max_tokens=1024,temperature=0.6,stream=False):
        response = self.client.chat.completions.create(
            model=model_name,
            messages=[{"role": "user", "content": prompt}],
            max_tokens=max_tokens,
            temperature= temperature,
            stream= stream
        )
        if stream:
            return self._hander_stream_response(response)
        else:
            return self._hander_json_response(response)
    
    def _hander_json_response(self,response):
        return response.choices[0].message.content
    
    def _hander_stream_response(self,response):
        if chunk.choices[0].delta.content is  not None:
            yield chunk.choices[0].delta.content

if __name__ == '__main__':
    client=VLLMClient()
    start_time=time.time()
    full_response=[]
    for chunk in client.generate(
        model_name="/models/Qwen3-4B",
        prompt="天空为什么是蓝色的？",
        stream=True
    ):
        print(chunk,end="",flush=True)
        full_response.append(chunk)
    print(f'\n\n 完整的响应时间{time.time()-start_time:.2f}秒')