Vision-CAIR commited on
Commit
c405430
1 Parent(s): a00877d

Update minigpt4/models/mini_gpt4.py

Browse files
Files changed (1) hide show
  1. minigpt4/models/mini_gpt4.py +3 -3
minigpt4/models/mini_gpt4.py CHANGED
@@ -87,16 +87,16 @@ class MiniGPT4(Blip2Base):
87
  print('Loading Q-Former Done')
88
 
89
  print('Loading LLAMA')
90
- self.llama_tokenizer = LlamaTokenizer.from_pretrained('Vision-CAIR/vicuna', use_fast=False, use_auth_token='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', subfolder="vicuna")
91
  self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
92
 
93
  if llama_cache_dir:
94
  self.llama_model = LlamaForCausalLM.from_pretrained(
95
- 'Vision-CAIR/vicuna', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token='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', subfolder="vicuna"
96
  )
97
  else:
98
  self.llama_model = LlamaForCausalLM.from_pretrained(
99
- 'Vision-CAIR/vicuna', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token='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', subfolder="vicuna"
100
  )
101
  for name, param in self.llama_model.named_parameters():
102
  param.requires_grad = False
 
87
  print('Loading Q-Former Done')
88
 
89
  print('Loading LLAMA')
90
+ self.llama_tokenizer = LlamaTokenizer.from_pretrained('Vision-CAIR/vicuna', use_fast=False, use_auth_token='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')
91
  self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
92
 
93
  if llama_cache_dir:
94
  self.llama_model = LlamaForCausalLM.from_pretrained(
95
+ 'Vision-CAIR/vicuna', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token='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'
96
  )
97
  else:
98
  self.llama_model = LlamaForCausalLM.from_pretrained(
99
+ 'Vision-CAIR/vicuna', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token='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'
100
  )
101
  for name, param in self.llama_model.named_parameters():
102
  param.requires_grad = False