LZHgrla commited on
Commit
d1337c2
1 Parent(s): 2d60ee1

first commit

Browse files
Files changed (2) hide show
  1. epoch_1.pth +3 -0
  2. xtuner_config.py +189 -0
epoch_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27a16a7822009b2dfddf2d789741c874b2299137e2385eea4934cb2b3e21e20f
3
+ size 125871521
xtuner_config.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM = ''
2
+ accumulative_counts = 1
3
+ batch_size = 32
4
+ betas = (
5
+ 0.9,
6
+ 0.999,
7
+ )
8
+ custom_hooks = [
9
+ dict(
10
+ tokenizer=dict(
11
+ padding_side='right',
12
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
13
+ trust_remote_code=True,
14
+ type='transformers.AutoTokenizer.from_pretrained'),
15
+ type='xtuner.engine.DatasetInfoHook'),
16
+ dict(
17
+ evaluation_images='https://llava-vl.github.io/static/images/view.jpg',
18
+ evaluation_inputs=[
19
+ '请描述一下这张照片',
20
+ 'Please describe this picture',
21
+ ],
22
+ every_n_iters=500,
23
+ image_processor=dict(
24
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
25
+ trust_remote_code=True,
26
+ type='transformers.CLIPImageProcessor.from_pretrained'),
27
+ prompt_template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
28
+ system='',
29
+ tokenizer=dict(
30
+ padding_side='right',
31
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
32
+ trust_remote_code=True,
33
+ type='transformers.AutoTokenizer.from_pretrained'),
34
+ type='xtuner.engine.EvaluateChatHook'),
35
+ ]
36
+ data_path = './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json'
37
+ data_root = './data/llava_data/'
38
+ dataloader_num_workers = 0
39
+ default_hooks = dict(
40
+ checkpoint=dict(interval=1, type='mmengine.hooks.CheckpointHook'),
41
+ logger=dict(interval=10, type='mmengine.hooks.LoggerHook'),
42
+ param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
43
+ sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
44
+ timer=dict(type='mmengine.hooks.IterTimerHook'))
45
+ env_cfg = dict(
46
+ cudnn_benchmark=False,
47
+ dist_cfg=dict(backend='nccl'),
48
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
49
+ evaluation_freq = 500
50
+ evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg'
51
+ evaluation_inputs = [
52
+ '请描述一下这张照片',
53
+ 'Please describe this picture',
54
+ ]
55
+ image_folder = './data/llava_data/LLaVA-Pretrain/images'
56
+ image_processor = dict(
57
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
58
+ trust_remote_code=True,
59
+ type='transformers.CLIPImageProcessor.from_pretrained')
60
+ launcher = 'pytorch'
61
+ llava_dataset = dict(
62
+ data_path='./data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
63
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
64
+ image_folder='./data/llava_data/LLaVA-Pretrain/images',
65
+ image_processor=dict(
66
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
67
+ trust_remote_code=True,
68
+ type='transformers.CLIPImageProcessor.from_pretrained'),
69
+ max_length=1472,
70
+ pad_image_to_square=False,
71
+ template_map_fn=dict(
72
+ template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
73
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
74
+ tokenizer=dict(
75
+ padding_side='right',
76
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
77
+ trust_remote_code=True,
78
+ type='transformers.AutoTokenizer.from_pretrained'),
79
+ type='xtuner.dataset.LLaVADataset')
80
+ llm_name_or_path = 'lmsys/vicuna-13b-v1.5'
81
+ load_from = None
82
+ log_level = 'INFO'
83
+ lr = 0.001
84
+ max_epochs = 1
85
+ max_length = 1472
86
+ max_norm = 1
87
+ model = dict(
88
+ freeze_llm=True,
89
+ freeze_visual_encoder=True,
90
+ llm=dict(
91
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
92
+ quantization_config=dict(
93
+ bnb_4bit_compute_dtype='torch.float16',
94
+ bnb_4bit_quant_type='nf4',
95
+ bnb_4bit_use_double_quant=True,
96
+ llm_int8_has_fp16_weight=False,
97
+ llm_int8_threshold=6.0,
98
+ load_in_4bit=True,
99
+ load_in_8bit=False,
100
+ type='transformers.BitsAndBytesConfig'),
101
+ torch_dtype='torch.float16',
102
+ trust_remote_code=True,
103
+ type='transformers.AutoModelForCausalLM.from_pretrained'),
104
+ type='xtuner.model.LLaVAModel',
105
+ visual_encoder=dict(
106
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
107
+ type='transformers.CLIPVisionModel.from_pretrained'))
108
+ optim_type = 'torch.optim.AdamW'
109
+ optim_wrapper = dict(
110
+ optimizer=dict(
111
+ betas=(
112
+ 0.9,
113
+ 0.999,
114
+ ),
115
+ lr=0.001,
116
+ type='torch.optim.AdamW',
117
+ weight_decay=0),
118
+ type='DeepSpeedOptimWrapper')
119
+ param_scheduler = [
120
+ dict(
121
+ begin=0,
122
+ by_epoch=True,
123
+ convert_to_iter_based=True,
124
+ end=0.03,
125
+ start_factor=1e-05,
126
+ type='mmengine.optim.LinearLR'),
127
+ dict(
128
+ T_max=1,
129
+ begin=0.03,
130
+ by_epoch=True,
131
+ convert_to_iter_based=True,
132
+ eta_min=0.0,
133
+ type='mmengine.optim.CosineAnnealingLR'),
134
+ ]
135
+ prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.vicuna'
136
+ randomness = dict(deterministic=False, seed=None)
137
+ resume = False
138
+ runner_type = 'FlexibleRunner'
139
+ strategy = dict(
140
+ config=dict(
141
+ bf16=dict(enabled=True),
142
+ fp16=dict(enabled=False, initial_scale_power=16),
143
+ gradient_accumulation_steps='auto',
144
+ gradient_clipping='auto',
145
+ train_micro_batch_size_per_gpu='auto',
146
+ zero_allow_untested_optimizer=True,
147
+ zero_force_ds_cpu_optimizer=False,
148
+ zero_optimization=dict(overlap_comm=True, stage=2)),
149
+ exclude_frozen_parameters=True,
150
+ gradient_accumulation_steps=1,
151
+ gradient_clipping=1,
152
+ train_micro_batch_size_per_gpu=32,
153
+ type='xtuner.engine.DeepSpeedStrategy')
154
+ tokenizer = dict(
155
+ padding_side='right',
156
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
157
+ trust_remote_code=True,
158
+ type='transformers.AutoTokenizer.from_pretrained')
159
+ train_cfg = dict(by_epoch=True, max_epochs=1, val_interval=1)
160
+ train_dataloader = dict(
161
+ batch_size=32,
162
+ collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
163
+ dataset=dict(
164
+ data_path=
165
+ './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
166
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
167
+ image_folder='./data/llava_data/LLaVA-Pretrain/images',
168
+ image_processor=dict(
169
+ pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
170
+ trust_remote_code=True,
171
+ type='transformers.CLIPImageProcessor.from_pretrained'),
172
+ max_length=1472,
173
+ pad_image_to_square=False,
174
+ template_map_fn=dict(
175
+ template='xtuner.utils.PROMPT_TEMPLATE.vicuna',
176
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
177
+ tokenizer=dict(
178
+ padding_side='right',
179
+ pretrained_model_name_or_path='lmsys/vicuna-13b-v1.5',
180
+ trust_remote_code=True,
181
+ type='transformers.AutoTokenizer.from_pretrained'),
182
+ type='xtuner.dataset.LLaVADataset'),
183
+ num_workers=0,
184
+ sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
185
+ visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336'
186
+ visualizer = None
187
+ warmup_ratio = 0.03
188
+ weight_decay = 0
189
+ work_dir = './work_dirs/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain'