File size: 2,194 Bytes
395201c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# #### What this tests ####
# #    This tests the LiteLLM Class

# import sys, os
# import traceback
# import pytest
# sys.path.insert(
#     0, os.path.abspath("../..")
# )  # Adds the parent directory to the system path
# import litellm
# import asyncio 

# litellm.set_verbose = True
# from litellm import Router
# import instructor
# from pydantic import BaseModel

# # This enables response_model keyword
# # # from client.chat.completions.create
# # client = instructor.patch(Router(model_list=[{
# #     "model_name": "gpt-3.5-turbo", # openai model name 
# #     "litellm_params": { # params for litellm completion/embedding call 
# #         "model": "azure/chatgpt-v-2", 
# #         "api_key": os.getenv("AZURE_API_KEY"),
# #         "api_version": os.getenv("AZURE_API_VERSION"),
# #         "api_base": os.getenv("AZURE_API_BASE")
# #     }
# # }]))

# # class UserDetail(BaseModel):
# #     name: str
# #     age: int

# # user = client.chat.completions.create(
# #     model="gpt-3.5-turbo",
# #     response_model=UserDetail,
# #     messages=[
# #         {"role": "user", "content": "Extract Jason is 25 years old"},
# #     ]
# # )
# # assert isinstance(model, UserExtract)

# # assert isinstance(user, UserDetail)
# # assert user.name == "Jason"
# # assert user.age == 25

# # print(f"user: {user}")
# import instructor
# from openai import AsyncOpenAI

# aclient = instructor.apatch(Router(model_list=[{
#     "model_name": "gpt-3.5-turbo", # openai model name 
#     "litellm_params": { # params for litellm completion/embedding call 
#         "model": "azure/chatgpt-v-2", 
#         "api_key": os.getenv("AZURE_API_KEY"),
#         "api_version": os.getenv("AZURE_API_VERSION"),
#         "api_base": os.getenv("AZURE_API_BASE")
#     }
# }], default_litellm_params={"acompletion": True}))

# class UserExtract(BaseModel):
#     name: str
#     age: int
# async def main():
#     model = await aclient.chat.completions.create(
#         model="gpt-3.5-turbo",
#         response_model=UserExtract,
#         messages=[
#             {"role": "user", "content": "Extract jason is 25 years old"},
#         ],
#     )
#     print(f"model: {model}")

# asyncio.run(main())