Spaces:
Configuration error
Configuration error
File size: 16,406 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 |
import io
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
import asyncio
import litellm
import gzip
import json
import logging
import time
from typing import Optional, List
from unittest.mock import AsyncMock, patch, Mock
import pytest
import litellm
from litellm import completion
from litellm._logging import verbose_logger
from litellm.integrations.vector_stores.bedrock_vector_store import BedrockVectorStore
from litellm.llms.custom_httpx.http_handler import HTTPHandler, AsyncHTTPHandler
from litellm.integrations.custom_logger import CustomLogger
from litellm.types.utils import StandardLoggingPayload, StandardLoggingVectorStoreRequest
from litellm.types.vector_stores import VectorStoreSearchResponse
class TestCustomLogger(CustomLogger):
def __init__(self):
self.standard_logging_payload: Optional[StandardLoggingPayload] = None
super().__init__()
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
self.standard_logging_payload = kwargs.get("standard_logging_object")
pass
@pytest.fixture(autouse=True)
def add_aws_region_to_env(monkeypatch):
monkeypatch.setenv("AWS_REGION", "us-west-2")
@pytest.fixture
def setup_vector_store_registry():
from litellm.vector_stores.vector_store_registry import VectorStoreRegistry, LiteLLM_ManagedVectorStore
# Init vector store registry
litellm.vector_store_registry = VectorStoreRegistry(
vector_stores=[
LiteLLM_ManagedVectorStore(
vector_store_id="T37J8R4WTM",
custom_llm_provider="bedrock"
)
]
)
@pytest.mark.asyncio
async def test_basic_bedrock_knowledgebase_retrieval(setup_vector_store_registry):
bedrock_knowledgebase_hook = BedrockVectorStore(aws_region_name="us-west-2")
response = await bedrock_knowledgebase_hook.make_bedrock_kb_retrieve_request(
knowledge_base_id="T37J8R4WTM",
query="what is litellm?",
)
assert response is not None
@pytest.mark.asyncio
async def test_e2e_bedrock_knowledgebase_retrieval_with_completion(setup_vector_store_registry):
litellm._turn_on_debug()
client = AsyncHTTPHandler()
print("value of litellm.vector_store_registry:", litellm.vector_store_registry)
with patch.object(client, "post") as mock_post:
# Mock the response for the LLM call
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"Content-Type": "application/json"}
mock_response.json = lambda: json.loads(mock_response.text)
mock_post.return_value = mock_response
try:
response = await litellm.acompletion(
model="anthropic/claude-3.5-sonnet",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"T37J8R4WTM"
],
client=client
)
except Exception as e:
print(f"Error: {e}")
# Verify the LLM request was made
mock_post.assert_called_once()
# Verify the request body
print("call args:", mock_post.call_args)
request_body = mock_post.call_args.kwargs["json"]
print("Request body:", json.dumps(request_body, indent=4, default=str))
# Assert content from the knowedge base was applied to the request
# 1. we should have 2 content blocks, the first is the user message, the second is the context from the knowledge base
content = request_body["messages"][0]["content"]
assert len(content) == 2
assert content[0]["type"] == "text"
assert content[1]["type"] == "text"
# 2. the message with the context should have the bedrock knowledge base prefix string
# this helps confirm that the context from the knowledge base was applied to the request
assert BedrockVectorStore.CONTENT_PREFIX_STRING in content[1]["text"]
@pytest.mark.asyncio
async def test_e2e_bedrock_knowledgebase_retrieval_with_llm_api_call(setup_vector_store_registry):
"""
Test that the Bedrock Knowledge Base Hook works when making a real llm api call
"""
# Init client
litellm._turn_on_debug()
async_client = AsyncHTTPHandler()
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2")]
response = await litellm.acompletion(
model="anthropic/claude-3-5-haiku-latest",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"T37J8R4WTM"
],
client=async_client
)
assert response is not None
@pytest.mark.asyncio
async def test_openai_with_knowledge_base_mock_openai(setup_vector_store_registry):
"""
Tests that knowledge base content is correctly passed to the OpenAI API call
"""
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2")]
litellm.set_verbose = True
from openai import AsyncOpenAI
client = AsyncOpenAI(api_key="fake-api-key")
with patch.object(
client.chat.completions.with_raw_response, "create"
) as mock_client:
try:
await litellm.acompletion(
model="gpt-4",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"T37J8R4WTM"
],
client=client,
)
except Exception as e:
print(f"Error: {e}")
# Verify the API was called
mock_client.assert_called_once()
request_body = mock_client.call_args.kwargs
# Verify the request contains messages with knowledge base context
assert "messages" in request_body
messages = request_body["messages"]
# We expect at least 2 messages:
# 1. User message with the question
# 2. User message with the knowledge base context
assert len(messages) >= 2
print("request messages:", json.dumps(messages, indent=4, default=str))
# assert message[1] is the user message with the knowledge base context
assert messages[1]["role"] == "user"
assert BedrockVectorStore.CONTENT_PREFIX_STRING in messages[1]["content"]
@pytest.mark.asyncio
async def test_openai_with_vector_store_ids_in_tool_call_mock_openai(setup_vector_store_registry):
"""
Tests that vector store ids can be passed as tools
This is the OpenAI format
"""
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2")]
litellm.set_verbose = True
from openai import AsyncOpenAI
client = AsyncOpenAI(api_key="fake-api-key")
with patch.object(
client.chat.completions.with_raw_response, "create"
) as mock_client:
try:
await litellm.acompletion(
model="gpt-4",
messages=[{"role": "user", "content": "what is litellm?"}],
tools=[{
"type": "file_search",
"vector_store_ids": ["T37J8R4WTM"]
}],
client=client,
)
except Exception as e:
print(f"Error: {e}")
# Verify the API was called
mock_client.assert_called_once()
request_body = mock_client.call_args.kwargs
print("request body:", json.dumps(request_body, indent=4, default=str))
# Verify the request contains messages with knowledge base context
assert "messages" in request_body
messages = request_body["messages"]
# We expect at least 2 messages:
# 1. User message with the question
# 2. User message with the knowledge base context
assert len(messages) >= 2
print("request messages:", json.dumps(messages, indent=4, default=str))
# assert message[1] is the user message with the knowledge base context
assert messages[1]["role"] == "user"
assert BedrockVectorStore.CONTENT_PREFIX_STRING in messages[1]["content"]
# assert that the tool call was not sent to the upstream llm API if it's a litellm vector store
assert "tools" not in request_body
@pytest.mark.asyncio
async def test_openai_with_mixed_tool_call_mock_openai(setup_vector_store_registry):
"""Ensure unrecognized vector store tools are forwarded to the provider"""
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2")]
from openai import AsyncOpenAI
client = AsyncOpenAI(api_key="fake-api-key")
with patch.object(
client.chat.completions.with_raw_response, "create"
) as mock_client:
try:
await litellm.acompletion(
model="gpt-4",
messages=[{"role": "user", "content": "what is litellm?"}],
tools=[
{"type": "file_search", "vector_store_ids": ["T37J8R4WTM"]},
{"type": "file_search", "vector_store_ids": ["unknownVS"]},
],
client=client,
)
except Exception as e:
print(f"Error: {e}")
mock_client.assert_called_once()
request_body = mock_client.call_args.kwargs
assert "messages" in request_body
messages = request_body["messages"]
assert len(messages) >= 2
assert messages[1]["role"] == "user"
assert BedrockVectorStore.CONTENT_PREFIX_STRING in messages[1]["content"]
assert "tools" in request_body
tools = request_body["tools"]
assert len(tools) == 1
assert tools[0]["vector_store_ids"] == ["unknownVS"]
@pytest.mark.asyncio
async def test_logging_with_knowledge_base_hook(setup_vector_store_registry):
"""
Test that the knowledge base request was logged in standard logging payload
"""
test_custom_logger = TestCustomLogger()
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2"), test_custom_logger]
litellm.set_verbose = True
await litellm.acompletion(
model="gpt-4",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"T37J8R4WTM"
],
)
# sleep for 1 second to allow the logging callback to run
await asyncio.sleep(1)
# assert that the knowledge base request was logged in the standard logging payload
standard_logging_payload: Optional[StandardLoggingPayload] = test_custom_logger.standard_logging_payload
assert standard_logging_payload is not None
metadata = standard_logging_payload["metadata"]
standard_logging_vector_store_request_metadata: Optional[List[StandardLoggingVectorStoreRequest]] = metadata["vector_store_request_metadata"]
print("standard_logging_vector_store_request_metadata:", json.dumps(standard_logging_vector_store_request_metadata, indent=4, default=str))
# 1 vector store request was made, expect 1 vector store request metadata object
assert len(standard_logging_vector_store_request_metadata) == 1
# expect the vector store request metadata object to have the correct values
vector_store_request_metadata = standard_logging_vector_store_request_metadata[0]
assert vector_store_request_metadata.get("vector_store_id") == "T37J8R4WTM"
assert vector_store_request_metadata.get("query") == "what is litellm?"
assert vector_store_request_metadata.get("custom_llm_provider") == "bedrock"
vector_store_search_response: VectorStoreSearchResponse = vector_store_request_metadata.get("vector_store_search_response")
assert vector_store_search_response is not None
assert vector_store_search_response.get("search_query") == "what is litellm?"
assert len(vector_store_search_response.get("data", [])) >=0
for item in vector_store_search_response.get("data", []):
assert item.get("score") is not None
assert item.get("content") is not None
assert len(item.get("content", [])) >= 0
for content_item in item.get("content", []):
text_content = content_item.get("text")
assert text_content is not None
assert len(text_content) > 0
@pytest.mark.asyncio
async def test_logging_with_knowledge_base_hook_no_vector_store_registry(setup_vector_store_registry):
"""
Test that the knowledge base request was logged in standard logging payload
"""
test_custom_logger = TestCustomLogger()
litellm.callbacks = [BedrockVectorStore(aws_region_name="us-west-2"), test_custom_logger]
litellm.vector_store_registry = None
await litellm.acompletion(
model="gpt-4",
messages=[{"role": "user", "content": "what is litellm?"}],
)
@pytest.mark.asyncio
async def test_e2e_bedrock_knowledgebase_retrieval_without_vector_store_registry(setup_vector_store_registry):
litellm._turn_on_debug()
client = AsyncHTTPHandler()
litellm.vector_store_registry = None
with patch.object(client, "post") as mock_post:
# Mock the response for the LLM call
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"Content-Type": "application/json"}
mock_response.json = lambda: json.loads(mock_response.text)
mock_post.return_value = mock_response
try:
response = await litellm.acompletion(
model="anthropic/claude-3.5-sonnet",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"T37J8R4WTM"
],
client=client
)
except Exception as e:
print(f"Error: {e}")
# Verify the LLM request was made
mock_post.assert_called_once()
# Verify the request body
print("call args:", mock_post.call_args)
request_body = mock_post.call_args.kwargs["json"]
print("Request body:", json.dumps(request_body, indent=4, default=str))
# Assert content from the knowedge base was applied to the request
# 1. we should have 1 content block, the first is the user message
# There should only be one since there is no initialized vector store registry
content = request_body["messages"][0]["content"]
assert len(content) == 1
assert content[0]["type"] == "text"
@pytest.mark.asyncio
async def test_e2e_bedrock_knowledgebase_retrieval_with_vector_store_not_in_registry(setup_vector_store_registry):
"""
No vector store request is made for vector store ids that are not in the registry
In this test newUnknownVectorStoreId is not in the registry, so no vector store request is made
"""
litellm._turn_on_debug()
client = AsyncHTTPHandler()
print("Registry iniitalized:", litellm.vector_store_registry.vector_stores)
with patch.object(client, "post") as mock_post:
# Mock the response for the LLM call
mock_response = Mock()
mock_response.status_code = 200
mock_response.headers = {"Content-Type": "application/json"}
mock_response.json = lambda: json.loads(mock_response.text)
mock_post.return_value = mock_response
try:
response = await litellm.acompletion(
model="anthropic/claude-3.5-sonnet",
messages=[{"role": "user", "content": "what is litellm?"}],
vector_store_ids = [
"newUnknownVectorStoreId"
],
client=client
)
except Exception as e:
print(f"Error: {e}")
# Verify the LLM request was made
mock_post.assert_called_once()
# Verify the request body
print("call args:", mock_post.call_args)
request_body = mock_post.call_args.kwargs["json"]
print("Request body:", json.dumps(request_body, indent=4, default=str))
# Assert content from the knowedge base was applied to the request
# 1. we should have 1 content block, the first is the user message
# There should only be one since there is no initialized vector store registry
content = request_body["messages"][0]["content"]
assert len(content) == 1
assert content[0]["type"] == "text"
|