LZHgrla commited on
Commit
a63f570
1 Parent(s): 1234646

upload adapter

Browse files
Files changed (4) hide show
  1. README.md +43 -0
  2. adapter_config.json +26 -0
  3. adapter_model.bin +3 -0
  4. xtuner_config.py +195 -0
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ pipeline_tag: conversational
4
+ base_model: internlm/internlm-20b
5
+ ---
6
+
7
+ <div align="center">
8
+ <img src="https://github.com/InternLM/lmdeploy/assets/36994684/0cf8d00f-e86b-40ba-9b54-dc8f1bc6c8d8" width="600"/>
9
+
10
+
11
+ [![Generic badge](https://img.shields.io/badge/GitHub-%20XTuner-black.svg)](https://github.com/InternLM/xtuner)
12
+
13
+
14
+ </div>
15
+
16
+ ## Model
17
+
18
+ internlm-20b-qlora-msagent-react is fine-tuned from [InternLM-20B](https://huggingface.co/internlm/internlm-20b) with [MSAgent-Bench](https://modelscope.cn/datasets/damo/MSAgent-Bench) dataset by [XTuner](https://github.com/InternLM/xtuner).
19
+
20
+
21
+ ## Quickstart
22
+
23
+ ### Usage with XTuner CLI
24
+
25
+ #### Installation
26
+
27
+ ```shell
28
+ pip install xtuner
29
+ ```
30
+
31
+ #### Chat
32
+
33
+ ```shell
34
+ xtuner chat internlm/internlm-20b --adapter xtuner/internlm-20b-qlora-msagent-react --lagent
35
+ ```
36
+
37
+ #### Fine-tune
38
+
39
+ Use the following command to quickly reproduce the fine-tuning results.
40
+
41
+ ```shell
42
+ NPROC_PER_NODE=8 xtuner train internlm_20b_qlora_msagent_react_e3_gpu8
43
+ ```
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/140/InternLM/20B/internlm-20b-base",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "down_proj",
19
+ "q_proj",
20
+ "up_proj",
21
+ "gate_proj",
22
+ "v_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad0cff2f1efe74a1bf77e6667183b40cf547b1e4ed2490ef3529ca0091324989
3
+ size 751345965
xtuner_config.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from bitsandbytes.optim import PagedAdamW32bit
4
+ from mmengine.dataset import DefaultSampler
5
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
6
+ LoggerHook, ParamSchedulerHook)
7
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
8
+ from modelscope.msdatasets import MsDataset
9
+ from peft import LoraConfig
10
+ from transformers import (AutoModelForCausalLM, AutoTokenizer,
11
+ BitsAndBytesConfig)
12
+
13
+ from xtuner.dataset import process_ms_dataset
14
+ from xtuner.dataset.collate_fns import default_collate_fn
15
+ from xtuner.dataset.map_fns import msagent_react_map_fn
16
+ from xtuner.engine import DatasetInfoHook, EvaluateChatHook
17
+ from xtuner.model import SupervisedFinetune
18
+
19
+ #######################################################################
20
+ # PART 1 Settings #
21
+ #######################################################################
22
+ # Model
23
+ pretrained_model_name_or_path = '/140/InternLM/20B/internlm-20b-base'
24
+
25
+ # Data
26
+ data_path = 'damo/MSAgent-Bench'
27
+ max_length = 2048
28
+ pack_to_max_length = False
29
+
30
+ # Scheduler & Optimizer
31
+ batch_size = 8 # per_device
32
+ accumulative_counts = 1
33
+ dataloader_num_workers = 2
34
+ max_epochs = 3
35
+ optim_type = PagedAdamW32bit
36
+ lr = 2e-4
37
+ betas = (0.9, 0.999)
38
+ weight_decay = 0
39
+ max_norm = 1 # grad clip
40
+
41
+ # Evaluate the generation performance during the training
42
+ evaluation_freq = 500
43
+ evaluation_inputs = [
44
+ ('<|System|>:你是一个可以调用外部工具的助手,可以使用的工具包括:\n'
45
+ "{\'GoogleSearch\': \'一个可以从谷歌搜索结果的API。\\n"
46
+ '当你需要对于一个特定问题找到简短明了的回答时,可以使用它。\\n'
47
+ "输入应该是一个搜索查询。\\n\\n输入参数:{'query': '搜索查询'}\',"
48
+ "\'PythonInterpreter\': \"用来执行Python代码。代码必须是一个函数,\\n"
49
+ "函数名必须得是 \'solution\',代码对应你的思考过程。代码实例格式如下:\\n"
50
+ '```python\\n# import 依赖包\\nimport xxx\\ndef solution():'
51
+ '\\n # 初始化一些变量\\n variable_names_with_real_meaning = xxx'
52
+ '\\n # 步骤一\\n mid_variable = func(variable_names_with_real_meaning)'
53
+ '\\n # 步骤 x\\n mid_variable = func(mid_variable)\\n # 最后结果'
54
+ '\\n final_answer = func(mid_variable)\\n return final_answer'
55
+ "\\n```\\n输入参数:{'command': '需要执行的代码'}\"}\n"
56
+ '如果使用工具请遵循以下格式回复:\n```\n'
57
+ 'Thought:思考你当前步骤需要解决什么问题,是否需要使用工具\n'
58
+ "Action:工具名称,你的工具必须从 [[\'GoogleSearch\', \'PythonInterpreter\']] 选择"
59
+ '\nAction Input:工具输入参数\n```\n工具返回按照以下格式回复:\n'
60
+ '```\nResponse:调用工具后的结果\n```'
61
+ '\n如果你已经知道了答案,或者你不需要工具,请遵循以下格式回复\n```'
62
+ '\nThought:给出最终答案的思考过程\nFinal Answer:最终答案\n```\n开始!\n'
63
+ '<|User|>:上海明天天气怎么样?\n'
64
+ '<|Bot|>:')
65
+ ]
66
+
67
+ #######################################################################
68
+ # PART 2 Model & Tokenizer #
69
+ #######################################################################
70
+ tokenizer = dict(
71
+ type=AutoTokenizer.from_pretrained,
72
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
73
+ trust_remote_code=True,
74
+ padding_side='right')
75
+
76
+ model = dict(
77
+ type=SupervisedFinetune,
78
+ llm=dict(
79
+ type=AutoModelForCausalLM.from_pretrained,
80
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
81
+ trust_remote_code=True,
82
+ torch_dtype=torch.float16,
83
+ quantization_config=dict(
84
+ type=BitsAndBytesConfig,
85
+ load_in_4bit=True,
86
+ load_in_8bit=False,
87
+ llm_int8_threshold=6.0,
88
+ llm_int8_has_fp16_weight=False,
89
+ bnb_4bit_compute_dtype=torch.float16,
90
+ bnb_4bit_use_double_quant=True,
91
+ bnb_4bit_quant_type='nf4')),
92
+ lora=dict(
93
+ type=LoraConfig,
94
+ r=64,
95
+ lora_alpha=16,
96
+ lora_dropout=0.1,
97
+ bias='none',
98
+ task_type='CAUSAL_LM'))
99
+
100
+ #######################################################################
101
+ # PART 3 Dataset & Dataloader #
102
+ #######################################################################
103
+ train_dataset = dict(
104
+ type=process_ms_dataset,
105
+ dataset=dict(type=MsDataset.load, dataset_name=data_path),
106
+ tokenizer=tokenizer,
107
+ max_length=max_length,
108
+ dataset_map_fn=msagent_react_map_fn,
109
+ remove_unused_columns=True,
110
+ shuffle_before_pack=True,
111
+ pack_to_max_length=pack_to_max_length)
112
+
113
+ train_dataloader = dict(
114
+ batch_size=batch_size,
115
+ num_workers=dataloader_num_workers,
116
+ dataset=train_dataset,
117
+ sampler=dict(type=DefaultSampler, shuffle=True),
118
+ collate_fn=dict(type=default_collate_fn))
119
+
120
+ #######################################################################
121
+ # PART 4 Scheduler & Optimizer #
122
+ #######################################################################
123
+ # optimizer
124
+ optim_wrapper = dict(
125
+ type=AmpOptimWrapper,
126
+ optimizer=dict(
127
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
128
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
129
+ accumulative_counts=accumulative_counts,
130
+ loss_scale='dynamic',
131
+ dtype='float16')
132
+
133
+ # learning policy
134
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
135
+ param_scheduler = dict(
136
+ type=CosineAnnealingLR,
137
+ eta_min=lr * 0.1,
138
+ by_epoch=True,
139
+ T_max=max_epochs,
140
+ convert_to_iter_based=True)
141
+
142
+ # train, val, test setting
143
+ train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
144
+
145
+ #######################################################################
146
+ # PART 5 Runtime #
147
+ #######################################################################
148
+ # Log the dialogue periodically during the training process, optional
149
+ custom_hooks = [
150
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
151
+ dict(
152
+ type=EvaluateChatHook,
153
+ tokenizer=tokenizer,
154
+ every_n_iters=evaluation_freq,
155
+ evaluation_inputs=evaluation_inputs)
156
+ ]
157
+
158
+ # configure default hooks
159
+ default_hooks = dict(
160
+ # record the time of every iteration.
161
+ timer=dict(type=IterTimerHook),
162
+ # print log every 100 iterations.
163
+ logger=dict(type=LoggerHook, interval=10),
164
+ # enable the parameter scheduler.
165
+ param_scheduler=dict(type=ParamSchedulerHook),
166
+ # save checkpoint per epoch.
167
+ checkpoint=dict(type=CheckpointHook, interval=1),
168
+ # set sampler seed in distributed evrionment.
169
+ sampler_seed=dict(type=DistSamplerSeedHook),
170
+ )
171
+
172
+ # configure environment
173
+ env_cfg = dict(
174
+ # whether to enable cudnn benchmark
175
+ cudnn_benchmark=False,
176
+ # set multi process parameters
177
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
178
+ # set distributed parameters
179
+ dist_cfg=dict(backend='nccl'),
180
+ )
181
+
182
+ # set visualizer
183
+ visualizer = None
184
+
185
+ # set log level
186
+ log_level = 'INFO'
187
+
188
+ # load from which checkpoint
189
+ load_from = None
190
+
191
+ # whether to resume training from the loaded checkpoint
192
+ resume = False
193
+
194
+ # Defaults to use random seed and disable `deterministic`
195
+ randomness = dict(seed=None, deterministic=False)