Vision-CAIR commited on
Commit
0dab6a6
1 Parent(s): eaf3b99

Update minigpt4/models/mini_gpt4.py

Browse files
Files changed (1) hide show
  1. minigpt4/models/mini_gpt4.py +2 -2
minigpt4/models/mini_gpt4.py CHANGED
@@ -30,7 +30,7 @@ class MiniGPT4(Blip2Base):
30
  def __init__(
31
  self,
32
  vit_model="eva_clip_g",
33
- q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth",
34
  img_size=224,
35
  drop_path_rate=0,
36
  use_grad_checkpoint=False,
@@ -75,7 +75,7 @@ class MiniGPT4(Blip2Base):
75
  for layer in self.Qformer.bert.encoder.layer:
76
  layer.output = None
77
  layer.intermediate = None
78
- self.load_from_pretrained(url_or_filename=q_former_model)
79
 
80
  if freeze_qformer:
81
  for name, param in self.Qformer.named_parameters():
 
30
  def __init__(
31
  self,
32
  vit_model="eva_clip_g",
33
+ q_former_model="blip2_pretrained_flant5xxl.pth",
34
  img_size=224,
35
  drop_path_rate=0,
36
  use_grad_checkpoint=False,
 
75
  for layer in self.Qformer.bert.encoder.layer:
76
  layer.output = None
77
  layer.intermediate = None
78
+ self.load_from_pretrained(cache_dir=q_former_model)
79
 
80
  if freeze_qformer:
81
  for name, param in self.Qformer.named_parameters():