Med Tiouti commited on
Commit
adfe007
·
1 Parent(s): bccedad

Try with 7b

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +3 -3
README.md CHANGED
@@ -9,7 +9,7 @@ app_file: app.py
9
  pinned: false
10
  models:
11
  - sentence-transformers/all-MiniLM-L6-v2
12
- - daryl149/llama-2-13b-chat-hf
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  models:
11
  - sentence-transformers/all-MiniLM-L6-v2
12
+ - daryl149/llama-2-7b-chat-hf
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -21,8 +21,8 @@ from langchain import PromptTemplate, LLMChain
21
  from langchain.vectorstores import FAISS
22
 
23
 
24
- def get_model(model_name):
25
- model_repo = 'daryl149/llama-2-13b-chat-hf'
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(model_repo, use_fast=True)
28
 
@@ -39,7 +39,7 @@ def get_model(model_name):
39
  return tokenizer,model,max_len
40
 
41
 
42
- tokenizer, model, max_len = get_model("llama2-13b")
43
 
44
 
45
 
 
21
  from langchain.vectorstores import FAISS
22
 
23
 
24
+ def get_model():
25
+ model_repo = 'daryl149/llama-2-7b-chat-hf'
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(model_repo, use_fast=True)
28
 
 
39
  return tokenizer,model,max_len
40
 
41
 
42
+ tokenizer, model, max_len = get_model()
43
 
44
 
45