jordyvl commited on
Commit
31eef4a
1 Parent(s): d4e3df1

not starting

Browse files
Files changed (2) hide show
  1. README.md +4 -1
  2. app.py +3 -2
README.md CHANGED
@@ -10,7 +10,10 @@ pinned: false
10
  preload_from_hub:
11
  - "BAAI/bge-small-en-v1.5"
12
  - "HuggingFaceH4/zephyr-7b-alpha"
13
- - "microsoft/phi-2"
14
  ---
 
15
  - "meta-llama/Meta-Llama-3-8B"
16
 
 
 
 
 
10
  preload_from_hub:
11
  - "BAAI/bge-small-en-v1.5"
12
  - "HuggingFaceH4/zephyr-7b-alpha"
 
13
  ---
14
+ - "microsoft/phi-2"
15
  - "meta-llama/Meta-Llama-3-8B"
16
 
17
+ Follow-up with langchain: https://medium.com/pythoneers/offline-rag-with-llamaindex-and-tiny-and-small-llms-ab2acac936b0
18
+ https://github.com/jeremy-k3/notebooks/blob/main/RAG_with_LlamaIndex_tiny_small_LLMS.ipynb
19
+ https://www.kaggle.com/code/iamleonie/advanced-rag-with-gemma-weaviate-and-llamaindex#Step-6:-Explore-a-Naive-RAG-Query-Engine
app.py CHANGED
@@ -20,9 +20,10 @@ CHEAPMODE = torch.cuda.is_available()
20
  # LLM = "HuggingFaceH4/zephyr-7b-alpha" if not CHEAPMODE else "microsoft/phi-2"
21
 
22
  config = {
23
- "LLM": "meta-llama/Meta-Llama-3-8B",
 
24
  # "LLM": "microsoft/phi-2",
25
- # "LLM": "HuggingFaceH4/zephyr-7b-alpha",
26
  "embeddings": "BAAI/bge-small-en-v1.5",
27
  "similarity_top_k": 2,
28
  "context_window": 2048,
 
20
  # LLM = "HuggingFaceH4/zephyr-7b-alpha" if not CHEAPMODE else "microsoft/phi-2"
21
 
22
  config = {
23
+ # "LLM": "meta-llama/Meta-Llama-3-8B",
24
+ # "LLM": "google/gemma-2b",
25
  # "LLM": "microsoft/phi-2",
26
+ "LLM": "HuggingFaceH4/zephyr-7b-alpha",
27
  "embeddings": "BAAI/bge-small-en-v1.5",
28
  "similarity_top_k": 2,
29
  "context_window": 2048,