File size: 2,402 Bytes
395201c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import sys
import os
import io

sys.path.insert(0, os.path.abspath('../..'))

from litellm import completion
import litellm

litellm.success_callback = ["promptlayer"]
litellm.set_verbose = True
import time



# def test_promptlayer_logging():
#     try:
#         # Redirect stdout
#         old_stdout = sys.stdout
#         sys.stdout = new_stdout = io.StringIO()


#         response = completion(model="claude-instant-1.2",
#                               messages=[{
#                                   "role": "user",
#                                   "content": "Hi πŸ‘‹ - i'm claude"
#                               }])

#         # Restore stdout
#         time.sleep(1)
#         sys.stdout = old_stdout
#         output = new_stdout.getvalue().strip()
#         print(output)
#         if "LiteLLM: Prompt Layer Logging: success" not in output:
#             raise Exception("Required log message not found!")

#     except Exception as e:
#         print(e)

# test_promptlayer_logging()


def test_promptlayer_logging_with_metadata():
    try:
        # Redirect stdout
        old_stdout = sys.stdout
        sys.stdout = new_stdout = io.StringIO()

        response = completion(model="gpt-3.5-turbo",
                              messages=[{
                                  "role": "user",
                                  "content": "Hi πŸ‘‹ - i'm ai21"
                              }], 
                              temperature=0.2,
                              max_tokens=20,
                              metadata={"model": "ai21"})

        # Restore stdout
        time.sleep(1)
        sys.stdout = old_stdout
        output = new_stdout.getvalue().strip()
        print(output)
        if "LiteLLM: Prompt Layer Logging: success" not in output:
            raise Exception("Required log message not found!")

    except Exception as e:
        print(e)

test_promptlayer_logging_with_metadata()




# def test_chat_openai():
#     try:
#         response = completion(model="replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
#                               messages=[{
#                                   "role": "user",
#                                   "content": "Hi πŸ‘‹ - i'm openai"
#                               }])

#         print(response)
#     except Exception as e:
#         print(e)

# test_chat_openai()