LZHgrla commited on
Commit
a6577d1
1 Parent(s): d8a745f

Delete xtuner_config.py

Browse files
Files changed (1) hide show
  1. xtuner_config.py +0 -187
xtuner_config.py DELETED
@@ -1,187 +0,0 @@
1
- SYSTEM = ''
2
- accumulative_counts = 1
3
- batch_size = 32
4
- betas = (
5
- 0.9,
6
- 0.999,
7
- )
8
- custom_hooks = [
9
- dict(
10
- tokenizer=dict(
11
- padding_side='right',
12
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
13
- trust_remote_code=True,
14
- type='transformers.AutoTokenizer.from_pretrained'),
15
- type='xtuner.engine.DatasetInfoHook'),
16
- dict(
17
- evaluation_images='https://llava-vl.github.io/static/images/view.jpg',
18
- evaluation_inputs=[
19
- '',
20
- ],
21
- every_n_iters=500,
22
- image_processor=dict(
23
- pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
24
- trust_remote_code=True,
25
- type='transformers.CLIPImageProcessor.from_pretrained'),
26
- prompt_template='xtuner.utils.PROMPT_TEMPLATE.internlm_chat',
27
- system='',
28
- tokenizer=dict(
29
- padding_side='right',
30
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
31
- trust_remote_code=True,
32
- type='transformers.AutoTokenizer.from_pretrained'),
33
- type='xtuner.engine.EvaluateChatHook'),
34
- ]
35
- data_path = './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json'
36
- dataloader_num_workers = 0
37
- default_hooks = dict(
38
- checkpoint=dict(interval=1, type='mmengine.hooks.CheckpointHook'),
39
- logger=dict(interval=10, type='mmengine.hooks.LoggerHook'),
40
- param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
41
- sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
42
- timer=dict(type='mmengine.hooks.IterTimerHook'))
43
- env_cfg = dict(
44
- cudnn_benchmark=False,
45
- dist_cfg=dict(backend='nccl'),
46
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
47
- evaluation_freq = 500
48
- evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg'
49
- evaluation_inputs = [
50
- '',
51
- ]
52
- image_folder = './data/llava_data/LLaVA-Pretrain/images'
53
- launcher = 'pytorch'
54
- llava_data_root = './data/llava_data/'
55
- llava_dataset = dict(
56
- data_path='./data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
57
- dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
58
- image_folder='./data/llava_data/LLaVA-Pretrain/images',
59
- max_length=1472,
60
- pad_image_to_square=False,
61
- image_processor=dict(
62
- pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
63
- trust_remote_code=True,
64
- type='transformers.CLIPImageProcessor.from_pretrained'),
65
- template_map_fn=dict(
66
- template='xtuner.utils.PROMPT_TEMPLATE.internlm_chat',
67
- type='xtuner.dataset.map_fns.template_map_fn_factory'),
68
- tokenizer=dict(
69
- padding_side='right',
70
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
71
- trust_remote_code=True,
72
- type='transformers.AutoTokenizer.from_pretrained'),
73
- type='xtuner.dataset.LLaVADataset')
74
- llm_name_or_path = 'internlm/internlm-chat-7b'
75
- load_from = None
76
- log_level = 'INFO'
77
- lr = 0.001
78
- max_epochs = 1
79
- max_length = 1472
80
- max_norm = 1
81
- model = dict(
82
- freeze_llm=True,
83
- freeze_visual_encoder=True,
84
- llm=dict(
85
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
86
- quantization_config=dict(
87
- bnb_4bit_compute_dtype='torch.float16',
88
- bnb_4bit_quant_type='nf4',
89
- bnb_4bit_use_double_quant=True,
90
- llm_int8_has_fp16_weight=False,
91
- llm_int8_threshold=6.0,
92
- load_in_4bit=True,
93
- load_in_8bit=False,
94
- type='transformers.BitsAndBytesConfig'),
95
- torch_dtype='torch.float16',
96
- trust_remote_code=True,
97
- type='transformers.AutoModelForCausalLM.from_pretrained'),
98
- type='xtuner.model.LLaVAModel',
99
- visual_encoder=dict(
100
- pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
101
- type='transformers.CLIPVisionModel.from_pretrained'))
102
- optim_type = 'torch.optim.AdamW'
103
- optim_wrapper = dict(
104
- optimizer=dict(
105
- betas=(
106
- 0.9,
107
- 0.999,
108
- ),
109
- lr=0.001,
110
- type='torch.optim.AdamW',
111
- weight_decay=0),
112
- type='DeepSpeedOptimWrapper')
113
- param_scheduler = [
114
- dict(
115
- begin=0,
116
- by_epoch=True,
117
- convert_to_iter_based=True,
118
- end=0.03,
119
- start_factor=1e-05,
120
- type='mmengine.optim.LinearLR'),
121
- dict(
122
- T_max=1,
123
- begin=0.03,
124
- by_epoch=True,
125
- convert_to_iter_based=True,
126
- eta_min=0.0,
127
- type='mmengine.optim.CosineAnnealingLR'),
128
- ]
129
- image_processor = dict(
130
- pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
131
- trust_remote_code=True,
132
- type='transformers.CLIPImageProcessor.from_pretrained')
133
- prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.internlm_chat'
134
- randomness = dict(deterministic=False, seed=None)
135
- resume = False
136
- runner_type = 'FlexibleRunner'
137
- strategy = dict(
138
- config=dict(
139
- bf16=dict(enabled=True),
140
- fp16=dict(enabled=False, initial_scale_power=16),
141
- gradient_accumulation_steps='auto',
142
- gradient_clipping='auto',
143
- train_micro_batch_size_per_gpu='auto',
144
- zero_allow_untested_optimizer=True,
145
- zero_force_ds_cpu_optimizer=False,
146
- zero_optimization=dict(overlap_comm=True, stage=2)),
147
- exclude_frozen_parameters=True,
148
- gradient_accumulation_steps=1,
149
- gradient_clipping=1,
150
- train_micro_batch_size_per_gpu=32,
151
- type='xtuner.engine.DeepSpeedStrategy')
152
- tokenizer = dict(
153
- padding_side='right',
154
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
155
- trust_remote_code=True,
156
- type='transformers.AutoTokenizer.from_pretrained')
157
- train_cfg = dict(by_epoch=True, max_epochs=1, val_interval=1)
158
- train_dataloader = dict(
159
- batch_size=32,
160
- collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
161
- dataset=dict(
162
- data_path=
163
- './data/llava_data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json',
164
- dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
165
- image_folder='./data/llava_data/LLaVA-Pretrain/images',
166
- max_length=1472,
167
- pad_image_to_square=False,
168
- image_processor=dict(
169
- pretrained_model_name_or_path='openai/clip-vit-large-patch14-336',
170
- trust_remote_code=True,
171
- type='transformers.CLIPImageProcessor.from_pretrained'),
172
- template_map_fn=dict(
173
- template='xtuner.utils.PROMPT_TEMPLATE.internlm_chat',
174
- type='xtuner.dataset.map_fns.template_map_fn_factory'),
175
- tokenizer=dict(
176
- padding_side='right',
177
- pretrained_model_name_or_path='internlm/internlm-chat-7b',
178
- trust_remote_code=True,
179
- type='transformers.AutoTokenizer.from_pretrained'),
180
- type='xtuner.dataset.LLaVADataset'),
181
- num_workers=0,
182
- sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
183
- visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336'
184
- visualizer = None
185
- warmup_ratio = 0.03
186
- weight_decay = 0
187
- work_dir = './work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain'