from openai import OpenAI
import os

client = OpenAI(
    api_key=os.environ.get("OPENAI_API_KEY", ""),
    base_url=os.environ.get("OPENAI_BASE_URL", "")
)
def call_llm(sysPrompt, prompt):    
    # client = OpenAI(
    #     api_key=os.environ.get("OPENAI_API_KEY", ""),
    #     base_url=os.environ.get("OPENAI_BASE_URL", "")
    # )
    model = os.environ.get("MODEL", "")
    # Make a streaming chat completion request
    r = client.chat.completions.create(
         model=model,
         messages=[
                {"role": "system", "content": sysPrompt},
                {"role": "user", "content": prompt}
            ],
    )
    return r.choices[0].message.content

def call_llm_stream(sysPrompt, prompt):    
    # client = OpenAI(
    #     api_key=os.environ.get("OPENAI_API_KEY", ""),
    #     base_url=os.environ.get("OPENAI_BASE_URL", "")
    # )
    model = os.environ.get("MODEL", "")
    # Make a streaming chat completion request
    r = client.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": sysPrompt},
            {"role": "user", "content": prompt}
        ],
        stream=True
    )
    return r


if __name__ == "__main__":
    print("## Testing call_llm")
    prompt = "In a few words, what is the meaning of life?"
    print(f"## Prompt: {prompt}")
    response = call_llm_stream('',prompt)
    for chunk in response:
        if len(chunk.choices)>0 and hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content is not None:
            chunk_content = chunk.choices[0].delta.content
            # Print the incoming text without a newline (simulate real-time streaming)
            print(chunk_content, end="", flush=True)
    

