import asyncio
from chernc.llm.api_inference import AsyncAPIInference, APIInference

async def async_main():
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "list some programming languages"}
    ]
    api = AsyncAPIInference(
        model_name="Qwen2.5-Coder-7B-Instruct",
        base_url="http://127.0.0.1:8000/v1",
        api_key="sk-xxxxxxxx",
    )
    # api = AsyncAPIInference(model_name="deepseek-chat", base_url="https://api.deepseek.com", api_key="sk-xxxxxxxx")
    # out = await api.chat(messages=messages)
    # print(out)
    
    async for token in api.chat_stream(messages=messages):
        print(token, end="", flush=True)

def sync_main():
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "list some programming languages"}
    ]
    api = APIInference(model_name="deepseek-chat", base_url="https://api.deepseek.com", api_key="sk-xxxxxxxx")
    out = api.chat(messages=messages)
    print(out)
    
    for token in api.chat_stream(messages=messages):
        print(token, end="", flush=True)


if __name__ == "__main__":
    asyncio.run(async_main())
    # sync_main()