LZHgrla commited on
Commit
cab2f72
1 Parent(s): 2b2877f

Update xtuner_config.py

Browse files
Files changed (1) hide show
  1. xtuner_config.py +4 -4
xtuner_config.py CHANGED
@@ -19,7 +19,7 @@ custom_hooks = [
19
  '',
20
  ],
21
  every_n_iters=500,
22
- processor=dict(
23
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
24
  trust_remote_code=True,
25
  type='transformers.CLIPImageProcessor.from_pretrained'),
@@ -58,7 +58,7 @@ llava_dataset = dict(
58
  image_folder='./data/llava_data/LLaVA-Pretrain/images',
59
  max_length=1472,
60
  pad_image_to_square=False,
61
- processor=dict(
62
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
63
  trust_remote_code=True,
64
  type='transformers.CLIPImageProcessor.from_pretrained'),
@@ -126,7 +126,7 @@ param_scheduler = [
126
  eta_min=0.0,
127
  type='mmengine.optim.CosineAnnealingLR'),
128
  ]
129
- processor = dict(
130
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
131
  trust_remote_code=True,
132
  type='transformers.CLIPImageProcessor.from_pretrained')
@@ -165,7 +165,7 @@ train_dataloader = dict(
165
  image_folder='./data/llava_data/LLaVA-Pretrain/images',
166
  max_length=1472,
167
  pad_image_to_square=False,
168
- processor=dict(
169
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
170
  trust_remote_code=True,
171
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
19
  '',
20
  ],
21
  every_n_iters=500,
22
+ image_processor=dict(
23
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
24
  trust_remote_code=True,
25
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
58
  image_folder='./data/llava_data/LLaVA-Pretrain/images',
59
  max_length=1472,
60
  pad_image_to_square=False,
61
+ image_processor=dict(
62
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
63
  trust_remote_code=True,
64
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
126
  eta_min=0.0,
127
  type='mmengine.optim.CosineAnnealingLR'),
128
  ]
129
+ image_processor = dict(
130
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
131
  trust_remote_code=True,
132
  type='transformers.CLIPImageProcessor.from_pretrained')
 
165
  image_folder='./data/llava_data/LLaVA-Pretrain/images',
166
  max_length=1472,
167
  pad_image_to_square=False,
168
+ image_processor=dict(
169
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
170
  trust_remote_code=True,
171
  type='transformers.CLIPImageProcessor.from_pretrained'),