srishtichugh commited on
Commit
7e3d2d7
·
1 Parent(s): 28c1557

added env

Browse files
Files changed (2) hide show
  1. Dockerfile +2 -0
  2. env.example +23 -0
Dockerfile CHANGED
@@ -21,4 +21,6 @@ EXPOSE 8000
21
  HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
22
  CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
23
 
 
 
24
  CMD ["uvicorn", "server.app:app", "--host", "0.0.0.0", "--port", "8000"]
 
21
  HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
22
  CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
23
 
24
+ # server.app:app — runs server/app.py from /app working directory
25
+ # models.py, client.py, inference.py live at /app root (on PYTHONPATH automatically)
26
  CMD ["uvicorn", "server.app:app", "--host", "0.0.0.0", "--port", "8000"]
env.example ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================
2
+ # Data Cleaning OpenEnv — Environment Variables
3
+ # Copy this file to .env and fill in your values.
4
+ # Never commit your real .env to version control.
5
+ # ============================================================
6
+
7
+ # LLM API endpoint (OpenAI-compatible).
8
+ # Default points to OpenAI; swap for any compatible provider.
9
+ API_BASE_URL=https://api.openai.com/v1
10
+
11
+ # Model identifier to use for baseline inference.
12
+ # Examples: gpt-4o-mini, gpt-4o, mistralai/Mistral-7B-Instruct-v0.2
13
+ MODEL_NAME=gpt-4o-mini
14
+
15
+ # API key for the LLM provider above.
16
+ # For OpenAI: starts with sk-...
17
+ # For HuggingFace Inference: starts with hf_...
18
+ HF_TOKEN=your-api-key-here
19
+
20
+ # Base URL of the running environment server.
21
+ # Use http://localhost:8000 for local development,
22
+ # or your HuggingFace Space URL for remote runs.
23
+ ENV_URL=http://localhost:8000