LZHgrla commited on
Commit
d86310d
1 Parent(s): 6cd96ac

first commit

Browse files
Files changed (2) hide show
  1. epoch_1.pth +3 -0
  2. xtuner_config.py +187 -0
epoch_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0a53fea99a12930a0518199ffd5f1703f24b0f086204b559f032ebbbe7561d2
3
+ size 83920363
xtuner_config.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM = ''
2
+ accumulative_counts = 1
3
+ batch_size = 32
4
+ betas = (
5
+ 0.9,
6
+ 0.999,
7
+ )
8
+ custom_hooks = [
9
+ dict(
10
+ tokenizer=dict(
11
+ padding_side='right',
12
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
13
+ trust_remote_code=True,
14
+ type='transformers.AutoTokenizer.from_pretrained'),
15
+ type='xtuner.engine.DatasetInfoHook'),
16
+ dict(
17
+ evaluation_images='https://llava-vl.github.io/static/images/view.jpg',
18
+ evaluation_inputs=[
19
+ '',
20
+ ],
21
+ every_n_iters=500,
22
+ processor=dict(
23
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
24
+ trust_remote_code=True,
25
+ type='transformers.CLIPImageProcessor.from_pretrained'),
26
+ prompt_template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
27
+ system='',
28
+ tokenizer=dict(
29
+ padding_side='right',
30
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
31
+ trust_remote_code=True,
32
+ type='transformers.AutoTokenizer.from_pretrained'),
33
+ type='xtuner.engine.EvaluateChatHook'),
34
+ ]
35
+ data_path = './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json'
36
+ dataloader_num_workers = 0
37
+ default_hooks = dict(
38
+ checkpoint=dict(interval=1, type='mmengine.hooks.CheckpointHook'),
39
+ logger=dict(interval=10, type='mmengine.hooks.LoggerHook'),
40
+ param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
41
+ sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
42
+ timer=dict(type='mmengine.hooks.IterTimerHook'))
43
+ env_cfg = dict(
44
+ cudnn_benchmark=False,
45
+ dist_cfg=dict(backend='nccl'),
46
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
47
+ evaluation_freq = 500
48
+ evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg'
49
+ evaluation_inputs = [
50
+ '',
51
+ ]
52
+ image_folder = './data/llava_data/LLaVA-Pretrain/images'
53
+ launcher = 'pytorch'
54
+ llava_data_root = './data/llava_data/'
55
+ llava_dataset = dict(
56
+ data_path='./data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
57
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
58
+ image_folder='./data/llava_data/LLaVA-Pretrain/images',
59
+ max_length=1472,
60
+ pad_image_to_square=False,
61
+ processor=dict(
62
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
63
+ trust_remote_code=True,
64
+ type='transformers.CLIPImageProcessor.from_pretrained'),
65
+ template_map_fn=dict(
66
+ template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
67
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
68
+ tokenizer=dict(
69
+ padding_side='right',
70
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
71
+ trust_remote_code=True,
72
+ type='transformers.AutoTokenizer.from_pretrained'),
73
+ type='xtuner.dataset.LLaVADataset')
74
+ llm_name_or_path = 'lmsys/vicuna-7b-v1.5'
75
+ load_from = None
76
+ log_level = 'INFO'
77
+ lr = 0.001
78
+ max_epochs = 1
79
+ max_length = 1472
80
+ max_norm = 1
81
+ model = dict(
82
+ freeze_llm=True,
83
+ freeze_visual_encoder=True,
84
+ llm=dict(
85
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
86
+ quantization_config=dict(
87
+ bnb_4bit_compute_dtype='torch.float16',
88
+ bnb_4bit_quant_type='nf4',
89
+ bnb_4bit_use_double_quant=True,
90
+ llm_int8_has_fp16_weight=False,
91
+ llm_int8_threshold=6.0,
92
+ load_in_4bit=True,
93
+ load_in_8bit=False,
94
+ type='transformers.BitsAndBytesConfig'),
95
+ torch_dtype='torch.float16',
96
+ trust_remote_code=True,
97
+ type='transformers.AutoModelForCausalLM.from_pretrained'),
98
+ type='xtuner.model.LLaVAModel',
99
+ visual_encoder=dict(
100
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
101
+ type='transformers.CLIPVisionModel.from_pretrained'))
102
+ optim_type = 'torch.optim.AdamW'
103
+ optim_wrapper = dict(
104
+ optimizer=dict(
105
+ betas=(
106
+ 0.9,
107
+ 0.999,
108
+ ),
109
+ lr=0.001,
110
+ type='torch.optim.AdamW',
111
+ weight_decay=0),
112
+ type='DeepSpeedOptimWrapper')
113
+ param_scheduler = [
114
+ dict(
115
+ begin=0,
116
+ by_epoch=True,
117
+ convert_to_iter_based=True,
118
+ end=0.03,
119
+ start_factor=1e-05,
120
+ type='mmengine.optim.LinearLR'),
121
+ dict(
122
+ T_max=1,
123
+ begin=0.03,
124
+ by_epoch=True,
125
+ convert_to_iter_based=True,
126
+ eta_min=0.0,
127
+ type='mmengine.optim.CosineAnnealingLR'),
128
+ ]
129
+ processor = dict(
130
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
131
+ trust_remote_code=True,
132
+ type='transformers.CLIPImageProcessor.from_pretrained')
133
+ prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.vicuna'
134
+ randomness = dict(deterministic=False, seed=None)
135
+ resume = False
136
+ runner_type = 'FlexibleRunner'
137
+ strategy = dict(
138
+ config=dict(
139
+ bf16=dict(enabled=True),
140
+ fp16=dict(enabled=False, initial_scale_power=16),
141
+ gradient_accumulation_steps='auto',
142
+ gradient_clipping='auto',
143
+ train_micro_batch_size_per_gpu='auto',
144
+ zero_allow_untested_optimizer=True,
145
+ zero_force_ds_cpu_optimizer=False,
146
+ zero_optimization=dict(overlap_comm=True, stage=2)),
147
+ exclude_frozen_parameters=True,
148
+ gradient_accumulation_steps=1,
149
+ gradient_clipping=1,
150
+ train_micro_batch_size_per_gpu=32,
151
+ type='xtuner.engine.DeepSpeedStrategy')
152
+ tokenizer = dict(
153
+ padding_side='right',
154
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
155
+ trust_remote_code=True,
156
+ type='transformers.AutoTokenizer.from_pretrained')
157
+ train_cfg = dict(by_epoch=True, max_epochs=1, val_interval=1)
158
+ train_dataloader = dict(
159
+ batch_size=32,
160
+ collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
161
+ dataset=dict(
162
+ data_path=
163
+ './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
164
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
165
+ image_folder='./data/llava_data/LLaVA-Pretrain/images',
166
+ max_length=1472,
167
+ pad_image_to_square=False,
168
+ processor=dict(
169
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
170
+ trust_remote_code=True,
171
+ type='transformers.CLIPImageProcessor.from_pretrained'),
172
+ template_map_fn=dict(
173
+ template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
174
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
175
+ tokenizer=dict(
176
+ padding_side='right',
177
+ pretrained_model_name_or_path='lmsys/vicuna-7b-v1.5',
178
+ trust_remote_code=True,
179
+ type='transformers.AutoTokenizer.from_pretrained'),
180
+ type='xtuner.dataset.LLaVADataset'),
181
+ num_workers=0,
182
+ sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
183
+ visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336'
184
+ visualizer = None
185
+ warmup_ratio = 0.03
186
+ weight_decay = 0
187
+ work_dir = './work_dirs/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain'