Vision-CAIR commited on
Commit
e436189
1 Parent(s): 5f49fec

Update minigpt4/models/base_model.py

Browse files
Files changed (1) hide show
  1. minigpt4/models/base_model.py +5 -5
minigpt4/models/base_model.py CHANGED
@@ -171,22 +171,22 @@ class BaseModel(nn.Module):
171
  def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
172
  lora_target_modules=["q_proj","v_proj"], **lora_kargs):
173
  logging.info('Loading LLAMA')
174
- llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
175
  llama_tokenizer.pad_token = "$$"
176
 
177
  if low_resource:
178
  llama_model = LlamaForCausalLM.from_pretrained(
179
- llama_model_path,
180
  torch_dtype=torch.float16,
181
  load_in_8bit=True,
182
  device_map={'': low_res_device},
183
- # use_auth_token=True,
184
  )
185
  else:
186
  llama_model = LlamaForCausalLM.from_pretrained(
187
- llama_model_path,
188
  torch_dtype=torch.float16,
189
- # use_auth_token=True,
190
  )
191
 
192
  if lora_r > 0:
 
171
  def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
172
  lora_target_modules=["q_proj","v_proj"], **lora_kargs):
173
  logging.info('Loading LLAMA')
174
+ llama_tokenizer = LlamaTokenizer.from_pretrained("Vision-CAIR/llama-2-7b-chat-pytorch", use_fast=False, use_auth_token=True)
175
  llama_tokenizer.pad_token = "$$"
176
 
177
  if low_resource:
178
  llama_model = LlamaForCausalLM.from_pretrained(
179
+ "Vision-CAIR/llama-2-7b-chat-pytorch",
180
  torch_dtype=torch.float16,
181
  load_in_8bit=True,
182
  device_map={'': low_res_device},
183
+ use_auth_token=True,
184
  )
185
  else:
186
  llama_model = LlamaForCausalLM.from_pretrained(
187
+ "Vision-CAIR/llama-2-7b-chat-pytorch",
188
  torch_dtype=torch.float16,
189
+ use_auth_token=True,
190
  )
191
 
192
  if lora_r > 0: