File size: 2,247 Bytes
395201c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# #### What this tests ####
# # This tests if logging to the llmonitor integration actually works
# # Adds the parent directory to the system path
# import sys
# import os
# sys.path.insert(0, os.path.abspath("../.."))
# from litellm import completion, embedding
# import litellm
# litellm.success_callback = ["llmonitor"]
# litellm.failure_callback = ["llmonitor"]
# litellm.set_verbose = True
# def test_chat_openai():
# try:
# response = completion(
# model="gpt-3.5-turbo",
# messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}],
# user="ishaan_from_litellm"
# )
# print(response)
# except Exception as e:
# print(e)
# def test_embedding_openai():
# try:
# response = embedding(model="text-embedding-ada-002", input=["test"])
# # Add any assertions here to check the response
# print(f"response: {str(response)[:50]}")
# except Exception as e:
# print(e)
# test_chat_openai()
# # test_embedding_openai()
# def test_llmonitor_logging_function_calling():
# function1 = [
# {
# "name": "get_current_weather",
# "description": "Get the current weather in a given location",
# "parameters": {
# "type": "object",
# "properties": {
# "location": {
# "type": "string",
# "description": "The city and state, e.g. San Francisco, CA",
# },
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
# },
# "required": ["location"],
# },
# }
# ]
# try:
# response = completion(model="gpt-3.5-turbo",
# messages=[{
# "role": "user",
# "content": "what's the weather in boston"
# }],
# temperature=0.1,
# functions=function1,
# )
# print(response)
# except Exception as e:
# print(e)
# # test_llmonitor_logging_function_calling()
|