Spaces:
Running
Running
Update to use LiteLLM proxy server configuration
Browse files- api.py +0 -42
- app.py +20 -6
- config.yaml +13 -4
api.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from fastapi.staticfiles import StaticFiles
|
3 |
-
from fastapi.middleware.cors import CORSMiddleware
|
4 |
-
from fastapi.openapi.docs import get_swagger_ui_html
|
5 |
-
from litellm.proxy.proxy_server import app as proxy_app
|
6 |
-
|
7 |
-
app = FastAPI(
|
8 |
-
title="LiteLLM API",
|
9 |
-
version="1.0.0",
|
10 |
-
)
|
11 |
-
|
12 |
-
# Add CORS middleware
|
13 |
-
app.add_middleware(
|
14 |
-
CORSMiddleware,
|
15 |
-
allow_origins=["*"],
|
16 |
-
allow_credentials=True,
|
17 |
-
allow_methods=["*"],
|
18 |
-
allow_headers=["*"],
|
19 |
-
)
|
20 |
-
|
21 |
-
# Mount static files
|
22 |
-
app.mount("/static", StaticFiles(directory="static"), name="static")
|
23 |
-
|
24 |
-
# Mount the LiteLLM Proxy server
|
25 |
-
app.mount("/proxy", proxy_app)
|
26 |
-
|
27 |
-
@app.get("/", include_in_schema=False)
|
28 |
-
async def custom_swagger_ui_html():
|
29 |
-
return get_swagger_ui_html(
|
30 |
-
openapi_url="/openapi.json",
|
31 |
-
title="LiteLLM API",
|
32 |
-
swagger_js_url="/static/swagger/swagger-ui-bundle.js",
|
33 |
-
swagger_css_url="/static/swagger/swagger-ui.css",
|
34 |
-
)
|
35 |
-
|
36 |
-
@app.get("/health")
|
37 |
-
def health_check():
|
38 |
-
return {"status": "healthy"}
|
39 |
-
|
40 |
-
if __name__ == "__main__":
|
41 |
-
import uvicorn
|
42 |
-
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,11 +1,25 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
-
from litellm.proxy.proxy_server import
|
|
|
3 |
|
4 |
-
app = FastAPI(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
#
|
7 |
-
|
|
|
|
|
8 |
|
9 |
@app.get("/")
|
10 |
-
def
|
11 |
-
return {"message": "LiteLLM
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from litellm.proxy.proxy_server import ProxyServer
|
3 |
+
from litellm.proxy.config import ProxyConfig
|
4 |
|
5 |
+
app = FastAPI(
|
6 |
+
title="LiteLLM Proxy",
|
7 |
+
description="LiteLLM OpenAI-compatible proxy",
|
8 |
+
version="1.0",
|
9 |
+
docs_url="/proxy/docs", # Swagger UI
|
10 |
+
redoc_url="/proxy/redoc", # Optional: ReDoc UI
|
11 |
+
openapi_url="/proxy/openapi.json"
|
12 |
+
)
|
13 |
|
14 |
+
# Load LiteLLM Proxy
|
15 |
+
proxy_config = ProxyConfig()
|
16 |
+
proxy_server = ProxyServer(config=proxy_config)
|
17 |
+
proxy_server.add_routes(app)
|
18 |
|
19 |
@app.get("/")
|
20 |
+
async def root():
|
21 |
+
return {"message": "LiteLLM is running. Visit /proxy/docs"}
|
22 |
+
|
23 |
+
if __name__ == "__main__":
|
24 |
+
import uvicorn
|
25 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
config.yaml
CHANGED
@@ -3,13 +3,22 @@ model_list:
|
|
3 |
litellm_params:
|
4 |
model: gpt-3.5-turbo
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
general_settings:
|
|
|
7 |
master_key: "sk-1234"
|
8 |
-
|
9 |
-
# Enable UI dashboard
|
10 |
ui_features:
|
11 |
analytics_dashboard: true
|
12 |
model_config_management: true
|
13 |
key_management: true
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
3 |
litellm_params:
|
4 |
model: gpt-3.5-turbo
|
5 |
|
6 |
+
server_settings:
|
7 |
+
port: 7860
|
8 |
+
host: "0.0.0.0"
|
9 |
+
environment: "production"
|
10 |
+
|
11 |
general_settings:
|
12 |
+
# Master key for admin access
|
13 |
master_key: "sk-1234"
|
14 |
+
# Enable UI features
|
|
|
15 |
ui_features:
|
16 |
analytics_dashboard: true
|
17 |
model_config_management: true
|
18 |
key_management: true
|
19 |
+
# Allow all origins for CORS
|
20 |
+
allow_origins: ["*"]
|
21 |
+
# Other configurations
|
22 |
+
save_responses: true
|
23 |
+
cache_responses: false
|
24 |
+
log_requests: true
|