LZHgrla commited on
Commit
6c7dcbc
1 Parent(s): 71a2875

Update xtuner_config.py

Browse files
Files changed (1) hide show
  1. xtuner_config.py +4 -4
xtuner_config.py CHANGED
@@ -20,7 +20,7 @@ custom_hooks = [
20
  'Please describe this picture',
21
  ],
22
  every_n_iters=500,
23
- processor=dict(
24
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
25
  trust_remote_code=True,
26
  type='transformers.CLIPImageProcessor.from_pretrained'),
@@ -60,7 +60,7 @@ llava_dataset = dict(
60
  image_folder='./data/llava_data/llava_images',
61
  max_length=1472,
62
  pad_image_to_square=True,
63
- processor=dict(
64
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
65
  trust_remote_code=True,
66
  type='transformers.CLIPImageProcessor.from_pretrained'),
@@ -144,7 +144,7 @@ param_scheduler = [
144
  type='mmengine.optim.CosineAnnealingLR'),
145
  ]
146
  pretrained_pth = './work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth'
147
- processor = dict(
148
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
149
  trust_remote_code=True,
150
  type='transformers.CLIPImageProcessor.from_pretrained')
@@ -183,7 +183,7 @@ train_dataloader = dict(
183
  image_folder='./data/llava_data/llava_images',
184
  max_length=1472,
185
  pad_image_to_square=True,
186
- processor=dict(
187
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
188
  trust_remote_code=True,
189
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
20
  'Please describe this picture',
21
  ],
22
  every_n_iters=500,
23
+ image_processor=dict(
24
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
25
  trust_remote_code=True,
26
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
60
  image_folder='./data/llava_data/llava_images',
61
  max_length=1472,
62
  pad_image_to_square=True,
63
+ image_processor=dict(
64
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
65
  trust_remote_code=True,
66
  type='transformers.CLIPImageProcessor.from_pretrained'),
 
144
  type='mmengine.optim.CosineAnnealingLR'),
145
  ]
146
  pretrained_pth = './work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth'
147
+ image_processor = dict(
148
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
149
  trust_remote_code=True,
150
  type='transformers.CLIPImageProcessor.from_pretrained')
 
183
  image_folder='./data/llava_data/llava_images',
184
  max_length=1472,
185
  pad_image_to_square=True,
186
+ image_processor=dict(
187
  pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
188
  trust_remote_code=True,
189
  type='transformers.CLIPImageProcessor.from_pretrained'),