csuhan commited on
Commit
cb059ae
1 Parent(s): bafac31
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -14,6 +14,8 @@ from fairscale.nn.model_parallel.initialize import initialize_model_parallel
14
 
15
  from llama import LLaMA, ModelArgs, Tokenizer, Transformer, VisionModel
16
 
 
 
17
  PROMPT_DICT = {
18
  "prompt_input": (
19
  "Below is an instruction that describes a task, paired with an input that provides further context. "
@@ -166,13 +168,12 @@ def download_llama_adapter(instruct_adapter_path, caption_adapter_path):
166
  os.system(f"wget -q -O {caption_adapter_path} https://github.com/ZrrSkywalker/LLaMA-Adapter/releases/download/v.1.0.0/llama_adapter_len10_layer30_caption_vit_l.pth")
167
 
168
 
169
- # ckpt_path = "/data1/llma/7B/consolidated.00.pth"
170
- # param_path = "/data1/llma/7B/params.json"
171
- # tokenizer_path = "/data1/llma/tokenizer.model"
172
- ckpt_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="consolidated.00.pth")
173
- # ckpt_path = None
174
- param_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="params.json")
175
- tokenizer_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="tokenizer.model")
176
  instruct_adapter_path = "llama_adapter_len10_layer30_release.pth"
177
  caption_adapter_path = "llama_adapter_len10_layer30_caption_vit_l.pth"
178
  max_seq_len = 512
 
14
 
15
  from llama import LLaMA, ModelArgs, Tokenizer, Transformer, VisionModel
16
 
17
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
18
+
19
  PROMPT_DICT = {
20
  "prompt_input": (
21
  "Below is an instruction that describes a task, paired with an input that provides further context. "
 
168
  os.system(f"wget -q -O {caption_adapter_path} https://github.com/ZrrSkywalker/LLaMA-Adapter/releases/download/v.1.0.0/llama_adapter_len10_layer30_caption_vit_l.pth")
169
 
170
 
171
+ ckpt_path = "/data1/llma/7B/consolidated.00.pth"
172
+ param_path = "/data1/llma/7B/params.json"
173
+ tokenizer_path = "/data1/llma/tokenizer.model"
174
+ # ckpt_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="consolidated.00.pth")
175
+ # param_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="params.json")
176
+ # tokenizer_path = hf_hub_download(repo_id="nyanko7/LLaMA-7B", filename="tokenizer.model")
 
177
  instruct_adapter_path = "llama_adapter_len10_layer30_release.pth"
178
  caption_adapter_path = "llama_adapter_len10_layer30_caption_vit_l.pth"
179
  max_seq_len = 512