Spaces:
Configuration error
Configuration error
File size: 3,533 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
# Test the following scenarios:
# 1. Generate a Key, and use it to make a call
import sys, os
import traceback
from dotenv import load_dotenv
from fastapi import Request
from datetime import datetime
load_dotenv()
import os, io, time
# this file is to test litellm/proxy
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest, logging, asyncio
import litellm, asyncio
from litellm.proxy.proxy_server import token_counter
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend
from litellm._logging import verbose_proxy_logger
verbose_proxy_logger.setLevel(level=logging.DEBUG)
from litellm.proxy._types import TokenCountRequest, TokenCountResponse
from litellm import Router
@pytest.mark.asyncio
async def test_vLLM_token_counting():
"""
Test Token counter for vLLM models
- User passes model="special-alias"
- token_counter should infer that special_alias -> maps to wolfram/miquliz-120b-v2.0
-> token counter should use hugging face tokenizer
"""
llm_router = Router(
model_list=[
{
"model_name": "special-alias",
"litellm_params": {
"model": "openai/wolfram/miquliz-120b-v2.0",
"api_base": "https://exampleopenaiendpoint-production.up.railway.app/",
},
}
]
)
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
response = await token_counter(
request=TokenCountRequest(
model="special-alias",
messages=[{"role": "user", "content": "hello"}],
)
)
print("response: ", response)
assert (
response.tokenizer_type == "openai_tokenizer"
) # SHOULD use the default tokenizer
assert response.model_used == "wolfram/miquliz-120b-v2.0"
@pytest.mark.asyncio
async def test_token_counting_model_not_in_model_list():
"""
Test Token counter - when a model is not in model_list
-> should use the default OpenAI tokenizer
"""
llm_router = Router(
model_list=[
{
"model_name": "gpt-4",
"litellm_params": {
"model": "gpt-4",
},
}
]
)
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
response = await token_counter(
request=TokenCountRequest(
model="special-alias",
messages=[{"role": "user", "content": "hello"}],
)
)
print("response: ", response)
assert (
response.tokenizer_type == "openai_tokenizer"
) # SHOULD use the OpenAI tokenizer
assert response.model_used == "special-alias"
@pytest.mark.asyncio
async def test_gpt_token_counting():
"""
Test Token counter
-> should work for gpt-4
"""
llm_router = Router(
model_list=[
{
"model_name": "gpt-4",
"litellm_params": {
"model": "gpt-4",
},
}
]
)
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
response = await token_counter(
request=TokenCountRequest(
model="gpt-4",
messages=[{"role": "user", "content": "hello"}],
)
)
print("response: ", response)
assert (
response.tokenizer_type == "openai_tokenizer"
) # SHOULD use the OpenAI tokenizer
assert response.request_model == "gpt-4"
|