davidtran999 commited on
Commit
d0d16c1
·
verified ·
1 Parent(s): 64e745f

Upload env.example with huggingface_hub

Browse files
Files changed (1) hide show
  1. env.example +70 -0
env.example ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #############################################
2
+ ## Django / Local Development
3
+ #############################################
4
+ DJANGO_SECRET_KEY=change-me-in-development
5
+ DJANGO_DEBUG=true
6
+ DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1
7
+
8
+ #############################################
9
+ ## Local PostgreSQL (Docker compose defaults)
10
+ #############################################
11
+ POSTGRES_HOST=localhost
12
+ POSTGRES_PORT=5543
13
+ POSTGRES_DB=hue_portal
14
+ POSTGRES_USER=hue
15
+ POSTGRES_PASSWORD=huepass
16
+
17
+ #############################################
18
+ ## Redis Cache (Optional - for query rewrite and prefetch caching)
19
+ #############################################
20
+ # Supports Upstash and Railway Redis free tier
21
+ REDIS_URL=redis://localhost:6380/0
22
+ # Cache TTLs (in seconds)
23
+ CACHE_QUERY_REWRITE_TTL=3600 # 1 hour
24
+ CACHE_PREFETCH_TTL=1800 # 30 minutes
25
+
26
+ #############################################
27
+ ## Hugging Face / Tunnel automation
28
+ #############################################
29
+ HF_SPACE_ID=davidtran999/hue-portal-backend
30
+ # Nếu không export HF_TOKEN trong shell, tool sẽ cố đọc ~/.cache/huggingface/token
31
+ HF_TOKEN=
32
+
33
+ # Ngrok / Cloudflare tunnel settings
34
+ NGROK_BIN=ngrok
35
+ NGROK_REGION=ap
36
+ NGROK_AUTHTOKEN=
37
+ PG_TUNNEL_LOCAL_PORT=5543
38
+ PG_TUNNEL_WATCH_INTERVAL=45
39
+
40
+ # Credentials that sẽ được đẩy lên HF secrets
41
+ PG_TUNNEL_USER=hue_remote
42
+ PG_TUNNEL_PASSWORD=huepass123
43
+ PG_TUNNEL_DB=hue_portal
44
+
45
+ #############################################
46
+ ## LLM / llama.cpp (Qwen2.5-1.5b or Vi-Qwen2-3B-RAG) defaults
47
+ #############################################
48
+ DEFAULT_LLM_PROVIDER=llama_cpp
49
+ LLM_PROVIDER=llama_cpp
50
+ # Model path (local file path or Hugging Face repo)
51
+ LLM_MODEL_PATH=/app/backend/models/qwen2.5-1.5b-instruct-q5_k_m.gguf
52
+ # Future: Vi-Qwen2-3B-RAG (when Phase 3 is complete)
53
+ # LLM_MODEL_PATH=/app/backend/models/vi-qwen2-3b-rag-q5_k_m.gguf
54
+ LLAMA_CPP_CONTEXT=4096
55
+ LLAMA_CPP_THREADS=2
56
+ LLAMA_CPP_BATCH=512
57
+ LLAMA_CPP_MAX_TOKENS=512
58
+ LLAMA_CPP_TEMPERATURE=0.35
59
+ LLAMA_CPP_TOP_P=0.85
60
+ LLAMA_CPP_REPEAT_PENALTY=1.1
61
+ LLAMA_CPP_USE_MMAP=true
62
+ LLAMA_CPP_USE_MLOCK=true
63
+ RUN_HEAVY_STARTUP_TASKS=0
64
+
65
+ #############################################
66
+ ## Frontend
67
+ #############################################
68
+ # Gán VITE_API_BASE khi muốn trỏ tới API khác (vd HF Space)
69
+ VITE_API_BASE=
70
+