LZHgrla commited on
Commit
c2af009
1 Parent(s): 8b3a96b

upload adapter

Browse files
Files changed (4) hide show
  1. README.md +21 -0
  2. adapter_config.json +26 -0
  3. adapter_model.bin +3 -0
  4. xtuner_config.py +200 -0
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "internlm/internlm-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "down_proj",
18
+ "k_proj",
19
+ "gate_proj",
20
+ "up_proj",
21
+ "v_proj",
22
+ "q_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e7f98523409964683c3ab62d8c82320218b47755e17cef4e64fecb14668c0cf
3
+ size 319977229
xtuner_config.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from bitsandbytes.optim import PagedAdamW32bit
4
+ from datasets import load_dataset
5
+ from mmengine.dataset import DefaultSampler
6
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
7
+ LoggerHook, ParamSchedulerHook)
8
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
9
+ from peft import LoraConfig
10
+ from transformers import (AutoModelForCausalLM, AutoTokenizer,
11
+ BitsAndBytesConfig)
12
+
13
+ from xtuner.dataset import ConcatDataset, process_hf_dataset
14
+ from xtuner.dataset.collate_fns import default_collate_fn
15
+ from xtuner.dataset.map_fns import (alpaca_map_fn, alpaca_zh_map_fn,
16
+ template_map_fn_factory)
17
+ from xtuner.engine import DatasetInfoHook, EvaluateChatHook
18
+ from xtuner.model import SupervisedFinetune
19
+ from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
20
+
21
+ #######################################################################
22
+ # PART 1 Settings #
23
+ #######################################################################
24
+ # Model
25
+ pretrained_model_name_or_path = 'internlm/internlm-7b'
26
+
27
+ # Data
28
+ alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
29
+ alpaca_en_path = 'tatsu-lab/alpaca'
30
+ prompt_template = PROMPT_TEMPLATE.internlm_chat
31
+ max_length = 2048
32
+ pack_to_max_length = True
33
+
34
+ # Scheduler & Optimizer
35
+ batch_size = 1 # per_device
36
+ accumulative_counts = 16
37
+ dataloader_num_workers = 0
38
+ max_epochs = 3
39
+ optim_type = PagedAdamW32bit
40
+ lr = 2e-4
41
+ betas = (0.9, 0.999)
42
+ weight_decay = 0
43
+ max_norm = 1 # grad clip
44
+
45
+ # Evaluate the generation performance during the training
46
+ evaluation_freq = 500
47
+ SYSTEM = SYSTEM_TEMPLATE.alpaca
48
+ evaluation_inputs = [
49
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
50
+ ]
51
+
52
+ #######################################################################
53
+ # PART 2 Model & Tokenizer #
54
+ #######################################################################
55
+ tokenizer = dict(
56
+ type=AutoTokenizer.from_pretrained,
57
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
58
+ trust_remote_code=True,
59
+ padding_side='right')
60
+
61
+ model = dict(
62
+ type=SupervisedFinetune,
63
+ llm=dict(
64
+ type=AutoModelForCausalLM.from_pretrained,
65
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
66
+ trust_remote_code=True,
67
+ torch_dtype=torch.float16,
68
+ quantization_config=dict(
69
+ type=BitsAndBytesConfig,
70
+ load_in_4bit=True,
71
+ load_in_8bit=False,
72
+ llm_int8_threshold=6.0,
73
+ llm_int8_has_fp16_weight=False,
74
+ bnb_4bit_compute_dtype=torch.float16,
75
+ bnb_4bit_use_double_quant=True,
76
+ bnb_4bit_quant_type='nf4')),
77
+ lora=dict(
78
+ type=LoraConfig,
79
+ r=64,
80
+ lora_alpha=16,
81
+ lora_dropout=0.1,
82
+ bias='none',
83
+ task_type='CAUSAL_LM'))
84
+
85
+ #######################################################################
86
+ # PART 3 Dataset & Dataloader #
87
+ #######################################################################
88
+ alpaca_en = dict(
89
+ type=process_hf_dataset,
90
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
91
+ tokenizer=tokenizer,
92
+ max_length=max_length,
93
+ dataset_map_fn=alpaca_map_fn,
94
+ template_map_fn=dict(
95
+ type=template_map_fn_factory, template=prompt_template),
96
+ remove_unused_columns=True,
97
+ shuffle_before_pack=True,
98
+ pack_to_max_length=pack_to_max_length)
99
+
100
+ alpaca_zh = dict(
101
+ type=process_hf_dataset,
102
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
103
+ tokenizer=tokenizer,
104
+ max_length=max_length,
105
+ dataset_map_fn=alpaca_zh_map_fn,
106
+ template_map_fn=dict(
107
+ type=template_map_fn_factory, template=prompt_template),
108
+ remove_unused_columns=True,
109
+ shuffle_before_pack=True,
110
+ pack_to_max_length=pack_to_max_length)
111
+
112
+ train_dataset = dict(
113
+ type=ConcatDataset,
114
+ datasets_cfg=dict(alpaca_en=alpaca_en, alpaca_zh=alpaca_zh))
115
+
116
+ train_dataloader = dict(
117
+ batch_size=batch_size,
118
+ num_workers=dataloader_num_workers,
119
+ dataset=train_dataset,
120
+ sampler=dict(type=DefaultSampler, shuffle=True),
121
+ collate_fn=dict(type=default_collate_fn))
122
+
123
+ #######################################################################
124
+ # PART 4 Scheduler & Optimizer #
125
+ #######################################################################
126
+ # optimizer
127
+ optim_wrapper = dict(
128
+ type=AmpOptimWrapper,
129
+ optimizer=dict(
130
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
131
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
132
+ accumulative_counts=accumulative_counts,
133
+ loss_scale='dynamic',
134
+ dtype='float16')
135
+
136
+ # learning policy
137
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
138
+ param_scheduler = dict(
139
+ type=CosineAnnealingLR,
140
+ eta_min=lr * 0.1,
141
+ by_epoch=True,
142
+ T_max=max_epochs,
143
+ convert_to_iter_based=True)
144
+
145
+ # train, val, test setting
146
+ train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
147
+
148
+ #######################################################################
149
+ # PART 5 Runtime #
150
+ #######################################################################
151
+ # Log the dialogue periodically during the training process, optional
152
+ custom_hooks = [
153
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
154
+ dict(
155
+ type=EvaluateChatHook,
156
+ tokenizer=tokenizer,
157
+ every_n_iters=evaluation_freq,
158
+ evaluation_inputs=evaluation_inputs,
159
+ system=SYSTEM,
160
+ prompt_template=prompt_template)
161
+ ]
162
+
163
+ # configure default hooks
164
+ default_hooks = dict(
165
+ # record the time of every iteration.
166
+ timer=dict(type=IterTimerHook),
167
+ # print log every 100 iterations.
168
+ logger=dict(type=LoggerHook, interval=10),
169
+ # enable the parameter scheduler.
170
+ param_scheduler=dict(type=ParamSchedulerHook),
171
+ # save checkpoint per epoch.
172
+ checkpoint=dict(type=CheckpointHook, interval=1),
173
+ # set sampler seed in distributed evrionment.
174
+ sampler_seed=dict(type=DistSamplerSeedHook),
175
+ )
176
+
177
+ # configure environment
178
+ env_cfg = dict(
179
+ # whether to enable cudnn benchmark
180
+ cudnn_benchmark=False,
181
+ # set multi process parameters
182
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
183
+ # set distributed parameters
184
+ dist_cfg=dict(backend='nccl'),
185
+ )
186
+
187
+ # set visualizer
188
+ visualizer = None
189
+
190
+ # set log level
191
+ log_level = 'INFO'
192
+
193
+ # load from which checkpoint
194
+ load_from = None
195
+
196
+ # whether to resume training from the loaded checkpoint
197
+ resume = False
198
+
199
+ # Defaults to use random seed and disable `deterministic`
200
+ randomness = dict(seed=None, deterministic=False)