# import litellm
# import os
# litellm._turn_on_debug()

# response = litellm.completion(
#     model="deepseek/DeepSeek-R1",               # add `openai/` prefix to model so litellm knows to route to OpenAI
#     api_key="SwQWNDqU8Rn5V_737eSEBbt8TbABlHUd8xI5ACYcWq2fo8s3bXmcVbQpOtPHguVJof86Bl1BU1YJJAiHDx1dMg",                  # api key to your openai compatible endpoint
#     api_base="https://infer-modelarts-cn-southwest-2.modelarts-infer.com/v1/infers/861b6827-e5ef-4fa6-90d2-5fd1b29882/v1",     # set API Base of your Custom OpenAI Endpoint
#     messages=[
#                 {
#                     "role": "user",
#                     "content": "Hey, how's it going?",
#                 }
#     ],
# )
# print(response)

from litellm import completion
import os

os.environ['DEEPSEEK_API_KEY'] = "SwQWNDqU8Rn5V_737eSEBbt8TbABlHUd8xI5ACYcWq2fo8s3bXmcVbQpOtPHguVJof86Bl1BU1YJJAiHDx1dMg"
response = completion(
    model="deepseek/DeepSeek-R1", 
    base_url="https://infer-modelarts-cn-southwest-2.modelarts-infer.com/v1/infers/861b6827-e5ef-4fa6-90d2-5fd1b29882/v1",
    messages=[
       {"role": "user", "content": "hello from litellm"}
   ],
)
print(response)