File size: 4,430 Bytes
395201c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import sys
import os
import io, asyncio
# import logging
# logging.basicConfig(level=logging.DEBUG)
sys.path.insert(0, os.path.abspath('../..'))
from litellm import completion
import litellm
litellm.num_retries = 3
litellm.success_callback = ["langfuse"]
# litellm.set_verbose = True
import time
import pytest
def test_langfuse_logging_async():
try:
litellm.set_verbose = True
async def _test_langfuse():
return await litellm.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content":"This is a test"}],
max_tokens=1000,
temperature=0.7,
timeout=5,
)
response = asyncio.run(_test_langfuse())
print(f"response: {response}")
except litellm.Timeout as e:
pass
except Exception as e:
pytest.fail(f"An exception occurred - {e}")
# test_langfuse_logging_async()
def test_langfuse_logging():
try:
# litellm.set_verbose = True
response = completion(model="claude-instant-1.2",
messages=[{
"role": "user",
"content": "Hi π - i'm claude"
}],
max_tokens=10,
temperature=0.2,
metadata={"langfuse/key": "foo"}
)
print(response)
except litellm.Timeout as e:
pass
except Exception as e:
print(e)
test_langfuse_logging()
def test_langfuse_logging_stream():
try:
litellm.set_verbose=True
response = completion(model="anyscale/meta-llama/Llama-2-7b-chat-hf",
messages=[{
"role": "user",
"content": "this is a streaming test for llama2 + langfuse"
}],
max_tokens=20,
temperature=0.2,
stream=True
)
print(response)
for chunk in response:
pass
# print(chunk)
except litellm.Timeout as e:
pass
except Exception as e:
print(e)
# test_langfuse_logging_stream()
def test_langfuse_logging_custom_generation_name():
try:
litellm.set_verbose=True
response = completion(model="gpt-3.5-turbo",
messages=[{
"role": "user",
"content": "Hi π - i'm claude"
}],
max_tokens=10,
metadata = {
"langfuse/foo": "bar",
"langsmith/fizz": "buzz",
"prompt_hash": "asdf98u0j9131123"
}
)
print(response)
except litellm.Timeout as e:
pass
except Exception as e:
pytest.fail(f"An exception occurred - {e}")
print(e)
test_langfuse_logging_custom_generation_name()
def test_langfuse_logging_function_calling():
function1 = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
try:
response = completion(model="gpt-3.5-turbo",
messages=[{
"role": "user",
"content": "what's the weather in boston"
}],
temperature=0.1,
functions=function1,
)
print(response)
except litellm.Timeout as e:
pass
except Exception as e:
print(e)
# test_langfuse_logging_function_calling()
|