ka1kuk commited on
Commit
a67c3b3
1 Parent(s): ba52802

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +11 -0
  2. main.py +367 -0
  3. proxy_server.py +0 -0
  4. requirements.txt +28 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR $HOME/app
4
+
5
+ COPY . .
6
+
7
+ RUN pip install -r requirements.txt
8
+
9
+ VOLUME /data
10
+
11
+ CMD ["python", "-m", "main.py"]
main.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import click
2
+ import subprocess, traceback, json
3
+ import os, sys
4
+ import random
5
+ import importlib
6
+
7
+ def run_ollama_serve():
8
+ try:
9
+ command = ["ollama", "serve"]
10
+
11
+ with open(os.devnull, "w") as devnull:
12
+ process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
13
+ except Exception as e:
14
+ print(
15
+ f"""
16
+ LiteLLM Warning: proxy started with `ollama` model\n`ollama serve` failed with Exception{e}. \nEnsure you run `ollama serve`
17
+ """
18
+ ) # noqa
19
+
20
+ def is_port_in_use(port):
21
+ import socket
22
+
23
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
24
+ return s.connect_ex(("localhost", port)) == 0
25
+
26
+ def run_server(
27
+ host = "0.0.0.0",
28
+ port = 8000,
29
+ api_base = None,
30
+ api_version = "2023-07-01-preview",
31
+ model = None,
32
+ alias = None,
33
+ add_key = None,
34
+ headers = None,
35
+ save = False,
36
+ debug = False,
37
+ detailed_debug = False,
38
+ temperature = 0.0,
39
+ max_tokens = 1000,
40
+ request_timeout = 10,
41
+ drop_params = True,
42
+ add_function_to_prompt = True,
43
+ config = None,
44
+ max_budget = 100,
45
+ telemetry = False,
46
+ test = False,
47
+ local = False,
48
+ num_workers = 1,
49
+ test_async = False,
50
+ num_requests = 1,
51
+ use_queue = False,
52
+ health = False,
53
+ version = False,
54
+ ):
55
+ global feature_telemetry
56
+ args = locals()
57
+ if local:
58
+ from .proxy_server import app, save_worker_config, usage_telemetry
59
+ else:
60
+ try:
61
+ from .litellm.proxy.proxy_server import app, save_worker_config, usage_telemetry
62
+ except ImportError as e:
63
+ if "litellm[proxy]" in str(e):
64
+ # user is missing a proxy dependency, ask them to pip install litellm[proxy]
65
+ raise e
66
+ else:
67
+ # this is just a local/relative import error, user git cloned litellm
68
+ from .proxy_server import app, save_worker_config, usage_telemetry
69
+ feature_telemetry = usage_telemetry
70
+ if version == True:
71
+ pkg_version = importlib.metadata.version("litellm")
72
+ click.echo(f"\nLiteLLM: Current Version = {pkg_version}\n")
73
+ return
74
+ if model and "ollama" in model and api_base is None:
75
+ run_ollama_serve()
76
+ if test_async is True:
77
+ import requests, concurrent, time
78
+
79
+ api_base = f"http://{host}:{port}"
80
+
81
+ def _make_openai_completion():
82
+ data = {
83
+ "model": "gpt-3.5-turbo",
84
+ "messages": [
85
+ {"role": "user", "content": "Write a short poem about the moon"}
86
+ ],
87
+ }
88
+
89
+ response = requests.post("http://0.0.0.0:8000/queue/request", json=data)
90
+
91
+ response = response.json()
92
+
93
+ while True:
94
+ try:
95
+ url = response["url"]
96
+ polling_url = f"{api_base}{url}"
97
+ polling_response = requests.get(polling_url)
98
+ polling_response = polling_response.json()
99
+ print("\n RESPONSE FROM POLLING JOB", polling_response)
100
+ status = polling_response["status"]
101
+ if status == "finished":
102
+ llm_response = polling_response["result"]
103
+ break
104
+ print(
105
+ f"POLLING JOB{polling_url}\nSTATUS: {status}, \n Response {polling_response}"
106
+ ) # noqa
107
+ time.sleep(0.5)
108
+ except Exception as e:
109
+ print("got exception in polling", e)
110
+ break
111
+
112
+ # Number of concurrent calls (you can adjust this)
113
+ concurrent_calls = num_requests
114
+
115
+ # List to store the futures of concurrent calls
116
+ futures = []
117
+ start_time = time.time()
118
+ # Make concurrent calls
119
+ with concurrent.futures.ThreadPoolExecutor(
120
+ max_workers=concurrent_calls
121
+ ) as executor:
122
+ for _ in range(concurrent_calls):
123
+ futures.append(executor.submit(_make_openai_completion))
124
+
125
+ # Wait for all futures to complete
126
+ concurrent.futures.wait(futures)
127
+
128
+ # Summarize the results
129
+ successful_calls = 0
130
+ failed_calls = 0
131
+
132
+ for future in futures:
133
+ if future.done():
134
+ if future.result() is not None:
135
+ successful_calls += 1
136
+ else:
137
+ failed_calls += 1
138
+ end_time = time.time()
139
+ print(f"Elapsed Time: {end_time-start_time}")
140
+ print(f"Load test Summary:")
141
+ print(f"Total Requests: {concurrent_calls}")
142
+ print(f"Successful Calls: {successful_calls}")
143
+ print(f"Failed Calls: {failed_calls}")
144
+ return
145
+ if health != False:
146
+ import requests
147
+
148
+ print("\nLiteLLM: Health Testing models in config")
149
+ response = requests.get(url=f"http://{host}:{port}/health")
150
+ print(json.dumps(response.json(), indent=4))
151
+ return
152
+ if test != False:
153
+ request_model = model or "gpt-3.5-turbo"
154
+ click.echo(
155
+ f"\nLiteLLM: Making a test ChatCompletions request to your proxy. Model={request_model}"
156
+ )
157
+ import openai
158
+
159
+ if test == True: # flag value set
160
+ api_base = f"http://{host}:{port}"
161
+ else:
162
+ api_base = test
163
+ client = openai.OpenAI(api_key="My API Key", base_url=api_base)
164
+
165
+ response = client.chat.completions.create(
166
+ model=request_model,
167
+ messages=[
168
+ {
169
+ "role": "user",
170
+ "content": "this is a test request, write a short poem",
171
+ }
172
+ ],
173
+ max_tokens=256,
174
+ )
175
+ click.echo(f"\nLiteLLM: response from proxy {response}")
176
+
177
+ print(
178
+ f"\n LiteLLM: Making a test ChatCompletions + streaming request to proxy. Model={request_model}"
179
+ )
180
+
181
+ response = client.chat.completions.create(
182
+ model=request_model,
183
+ messages=[
184
+ {
185
+ "role": "user",
186
+ "content": "this is a test request, write a short poem",
187
+ }
188
+ ],
189
+ stream=True,
190
+ )
191
+ for chunk in response:
192
+ click.echo(f"LiteLLM: streaming response from proxy {chunk}")
193
+ print("\n making completion request to proxy")
194
+ response = client.completions.create(
195
+ model=request_model, prompt="this is a test request, write a short poem"
196
+ )
197
+ print(response)
198
+
199
+ return
200
+ else:
201
+ if headers:
202
+ headers = json.loads(headers)
203
+ save_worker_config(
204
+ model=model,
205
+ alias=alias,
206
+ api_base=api_base,
207
+ api_version=api_version,
208
+ debug=debug,
209
+ detailed_debug=detailed_debug,
210
+ temperature=temperature,
211
+ max_tokens=max_tokens,
212
+ request_timeout=request_timeout,
213
+ max_budget=max_budget,
214
+ telemetry=telemetry,
215
+ drop_params=drop_params,
216
+ add_function_to_prompt=add_function_to_prompt,
217
+ headers=headers,
218
+ save=save,
219
+ config=config,
220
+ use_queue=use_queue,
221
+ )
222
+ try:
223
+ import uvicorn
224
+
225
+ if os.name == "nt":
226
+ pass
227
+ else:
228
+ import gunicorn.app.base
229
+ except:
230
+ raise ImportError(
231
+ "Uvicorn, gunicorn needs to be imported. Run - `pip 'litellm[proxy]'`"
232
+ )
233
+
234
+ if config is not None:
235
+ """
236
+ Allow user to pass in db url via config
237
+
238
+ read from there and save it to os.env['DATABASE_URL']
239
+ """
240
+ try:
241
+ import yaml
242
+ except:
243
+ raise ImportError(
244
+ "yaml needs to be imported. Run - `pip install 'litellm[proxy]'`"
245
+ )
246
+
247
+ if os.path.exists(config):
248
+ with open(config, "r") as config_file:
249
+ config = yaml.safe_load(config_file)
250
+ general_settings = config.get("general_settings", {})
251
+ database_url = general_settings.get("database_url", None)
252
+ if database_url and database_url.startswith("os.environ/"):
253
+ original_dir = os.getcwd()
254
+ # set the working directory to where this script is
255
+ sys.path.insert(
256
+ 0, os.path.abspath("../..")
257
+ ) # Adds the parent directory to the system path - for litellm local dev
258
+ import litellm
259
+
260
+ database_url = litellm.get_secret(database_url)
261
+ os.chdir(original_dir)
262
+ if database_url is not None and isinstance(database_url, str):
263
+ os.environ["DATABASE_URL"] = database_url
264
+
265
+ if os.getenv("DATABASE_URL", None) is not None:
266
+ try:
267
+ subprocess.run(["prisma"], capture_output=True)
268
+ is_prisma_runnable = True
269
+ except FileNotFoundError:
270
+ is_prisma_runnable = False
271
+
272
+ if is_prisma_runnable:
273
+ # run prisma db push, before starting server
274
+ # Save the current working directory
275
+ original_dir = os.getcwd()
276
+ # set the working directory to where this script is
277
+ abspath = os.path.abspath(__file__)
278
+ dname = os.path.dirname(abspath)
279
+ os.chdir(dname)
280
+ try:
281
+ subprocess.run(
282
+ ["prisma", "db", "push", "--accept-data-loss"]
283
+ ) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
284
+ finally:
285
+ os.chdir(original_dir)
286
+ else:
287
+ print(
288
+ f"Unable to connect to DB. DATABASE_URL found in environment, but prisma package not found."
289
+ )
290
+ if port == 8000 and is_port_in_use(port):
291
+ port = random.randint(1024, 49152)
292
+ from litellm.proxy.proxy_server import app
293
+
294
+ uvicorn.run(app, host=host, port=port) # run uvicorn
295
+ # if os.name == "nt":
296
+ # else:
297
+ # import gunicorn.app.base
298
+
299
+ # # Gunicorn Application Class
300
+ # class StandaloneApplication(gunicorn.app.base.BaseApplication):
301
+ # def __init__(self, app, options=None):
302
+ # self.options = options or {} # gunicorn options
303
+ # self.application = app # FastAPI app
304
+ # super().__init__()
305
+
306
+ # _endpoint_str = (
307
+ # f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
308
+ # )
309
+ # curl_command = (
310
+ # _endpoint_str
311
+ # + """
312
+ # --header 'Content-Type: application/json' \\
313
+ # --data ' {
314
+ # "model": "gpt-3.5-turbo",
315
+ # "messages": [
316
+ # {
317
+ # "role": "user",
318
+ # "content": "what llm are you"
319
+ # }
320
+ # ]
321
+ # }'
322
+ # \n
323
+ # """
324
+ # )
325
+ # print() # noqa
326
+ # print( # noqa
327
+ # f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
328
+ # )
329
+ # print( # noqa
330
+ # f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
331
+ # )
332
+ # print(
333
+ # "\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
334
+ # ) # noqa
335
+ # print( # noqa
336
+ # f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
337
+ # ) # noqa
338
+
339
+ # def load_config(self):
340
+ # # note: This Loads the gunicorn config - has nothing to do with LiteLLM Proxy config
341
+ # config = {
342
+ # key: value
343
+ # for key, value in self.options.items()
344
+ # if key in self.cfg.settings and value is not None
345
+ # }
346
+ # for key, value in config.items():
347
+ # self.cfg.set(key.lower(), value)
348
+
349
+ # def load(self):
350
+ # # gunicorn app function
351
+ # return self.application
352
+
353
+ # gunicorn_options = {
354
+ # "bind": f"{host}:{port}",
355
+ # "workers": num_workers, # default is 1
356
+ # "worker_class": "uvicorn.workers.UvicornWorker",
357
+ # "preload": True, # Add the preload flag,
358
+ # "accesslog": "-", # Log to stdout
359
+ # "access_log_format": '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s',
360
+ # }
361
+ # StandaloneApplication(
362
+ # app=app, options=gunicorn_options
363
+ # ).run() # Run gunicorn
364
+
365
+
366
+ if __name__ == "__main__":
367
+ run_server()
proxy_server.py ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LITELLM PROXY DEPENDENCIES #
2
+ anyio==4.2.0 # openai + http req.
3
+ openai>=1.0.0 # openai req.
4
+ fastapi # server dep
5
+ pydantic>=2.5 # openai req.
6
+ backoff==2.2.1 # server dep
7
+ pyyaml==6.0 # server dep
8
+ uvicorn==0.22.0 # server dep
9
+ gunicorn==21.2.0 # server dep
10
+ boto3==1.28.58 # aws bedrock/sagemaker calls
11
+ redis==4.6.0 # caching
12
+ prisma==0.11.0 # for db
13
+ mangum==0.17.0 # for aws lambda functions
14
+ google-generativeai==0.1.0 # for vertex ai calls
15
+ async_generator==1.10.0 # for async ollama calls
16
+ traceloop-sdk==0.5.3 # for open telemetry logging
17
+ langfuse>=2.0.0 # for langfuse self-hosted logging
18
+ orjson==3.9.7 # fast /embedding responses
19
+ ### LITELLM PACKAGE DEPENDENCIES
20
+ python-dotenv>=0.2.0 # for env
21
+ tiktoken>=0.4.0 # for calculating usage
22
+ importlib-metadata>=6.8.0 # for random utils
23
+ tokenizers==0.14.0 # for calculating usage
24
+ click==8.1.7 # for proxy cli
25
+ jinja2==3.1.2 # for prompt templates
26
+ certifi>=2023.7.22 # [TODO] clean up
27
+ aiohttp==3.9.0 # for network calls
28
+ ####