Davidtran99 commited on
Commit
18d99df
·
1 Parent(s): 7e53bd3

fix: skip LLM and reranker preload to reduce CPU usage

Browse files
Files changed (1) hide show
  1. entrypoint.sh +5 -27
entrypoint.sh CHANGED
@@ -54,33 +54,11 @@ try:
54
  except Exception as e:
55
  print(f'[ENTRYPOINT] ⚠️ Embedding model preload failed: {e}', flush=True)
56
 
57
- # 2. Preload LLM Model (llama.cpp)
58
- llm_provider = os.environ.get('DEFAULT_LLM_PROVIDER') or os.environ.get('LLM_PROVIDER', '')
59
- if llm_provider.lower() == 'llama_cpp':
60
- try:
61
- print('[ENTRYPOINT] 📦 Preloading LLM model (llama.cpp)...', flush=True)
62
- from hue_portal.chatbot.llm_integration import get_llm_generator
63
- llm_gen = get_llm_generator()
64
- if llm_gen and hasattr(llm_gen, 'llama_cpp') and llm_gen.llama_cpp:
65
- print('[ENTRYPOINT] ✅ LLM model preloaded successfully', flush=True)
66
- else:
67
- print('[ENTRYPOINT] ⚠️ LLM model not loaded (may load on first request)', flush=True)
68
- except Exception as e:
69
- print(f'[ENTRYPOINT] ⚠️ LLM model preload failed: {e} (will load on first request)', flush=True)
70
- else:
71
- print(f'[ENTRYPOINT] ⏭️ Skipping LLM preload (provider is {llm_provider or \"not set\"}, not llama_cpp)', flush=True)
72
-
73
- # 3. Preload Reranker Model (lazy, but trigger import)
74
- try:
75
- print('[ENTRYPOINT] 📦 Preloading reranker model...', flush=True)
76
- from hue_portal.core.reranker import get_reranker
77
- reranker = get_reranker()
78
- if reranker:
79
- print('[ENTRYPOINT] ✅ Reranker model preloaded successfully', flush=True)
80
- else:
81
- print('[ENTRYPOINT] ⚠️ Reranker model not loaded (may load on first request)', flush=True)
82
- except Exception as e:
83
- print(f'[ENTRYPOINT] ⚠️ Reranker preload failed: {e} (will load on first request)', flush=True)
84
 
85
  print('[ENTRYPOINT] ✅ Model preload completed', flush=True) # v2.0-preload-all
86
  " || log "⚠️ Model preload had errors (models will load on first request)"
 
54
  except Exception as e:
55
  print(f'[ENTRYPOINT] ⚠️ Embedding model preload failed: {e}', flush=True)
56
 
57
+ # 2. SKIP LLM Preload (CPU optimization - too heavy, will load on first request)
58
+ print('[ENTRYPOINT] ⏭️ Skipping LLM preload (CPU optimization - will load lazily on first request)', flush=True)
59
+
60
+ # 3. SKIP Reranker Preload (CPU optimization - too heavy, will load on first request)
61
+ print('[ENTRYPOINT] ⏭️ Skipping reranker preload (CPU optimization - will load lazily on first request)', flush=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  print('[ENTRYPOINT] ✅ Model preload completed', flush=True) # v2.0-preload-all
64
  " || log "⚠️ Model preload had errors (models will load on first request)"