gugarosa wwwaj commited on
Commit
0693e0b
1 Parent(s): 49f911d

Update sample_finetune.py (#42)

Browse files

- Update sample_finetune.py (91d1d9bcfc9841d30eaa70889abfe6cf0492dc8c)


Co-authored-by: Wen Wen <wwwaj@users.noreply.huggingface.co>

Files changed (1) hide show
  1. sample_finetune.py +116 -29
sample_finetune.py CHANGED
@@ -1,28 +1,68 @@
1
- import torch
 
 
 
2
  from datasets import load_dataset
 
 
 
3
  from trl import SFTTrainer
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
5
 
6
  """
7
  A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
8
- a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py
9
-
10
- 1. Install accelerate:
 
 
 
 
 
 
11
  conda install -c conda-forge accelerate
12
- 2. Setup accelerate config:
 
 
 
13
  accelerate config
14
- to simply use all the GPUs available:
15
- python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='bf16')"
16
- check accelerate config:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  accelerate env
18
- 3. Run the code:
19
  accelerate launch sample_finetune.py
20
  """
21
 
 
 
 
22
  ###################
23
  # Hyper-parameters
24
  ###################
25
- args = {
26
  "bf16": True,
27
  "do_eval": False,
28
  "learning_rate": 5.0e-06,
@@ -35,7 +75,7 @@ args = {
35
  "output_dir": "./checkpoint_dir",
36
  "overwrite_output_dir": True,
37
  "per_device_eval_batch_size": 4,
38
- "per_device_train_batch_size": 8,
39
  "remove_unused_columns": True,
40
  "save_steps": 100,
41
  "save_total_limit": 1,
@@ -45,8 +85,42 @@ args = {
45
  "gradient_accumulation_steps": 1,
46
  "warmup_ratio": 0.2,
47
  }
48
-
49
- training_args = TrainingArguments(**args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
 
52
  ################
@@ -59,14 +133,16 @@ model_kwargs = dict(
59
  trust_remote_code=True,
60
  attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
61
  torch_dtype=torch.bfloat16,
62
- device_map="cuda",
63
  )
64
  model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
65
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
 
66
  tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
67
  tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
68
  tokenizer.padding_side = 'right'
69
 
 
70
  ##################
71
  # Data Processing
72
  ##################
@@ -83,17 +159,25 @@ def apply_chat_template(
83
  return example
84
 
85
  raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
86
- column_names = list(raw_dataset["train_sft"].features)
 
 
 
 
 
 
 
 
 
 
87
 
88
- processed_dataset = raw_dataset.map(
89
  apply_chat_template,
90
  fn_kwargs={"tokenizer": tokenizer},
91
- num_proc=12,
92
  remove_columns=column_names,
93
- desc="Applying chat template",
94
  )
95
- train_dataset = processed_dataset["train_sft"]
96
- eval_dataset = processed_dataset["test_sft"]
97
 
98
 
99
  ###########
@@ -101,9 +185,10 @@ eval_dataset = processed_dataset["test_sft"]
101
  ###########
102
  trainer = SFTTrainer(
103
  model=model,
104
- args=training_args,
105
- train_dataset=train_dataset,
106
- eval_dataset=eval_dataset,
 
107
  max_seq_length=2048,
108
  dataset_text_field="text",
109
  tokenizer=tokenizer,
@@ -115,16 +200,18 @@ trainer.log_metrics("train", metrics)
115
  trainer.save_metrics("train", metrics)
116
  trainer.save_state()
117
 
 
118
  #############
119
  # Evaluation
120
  #############
121
  tokenizer.padding_side = 'left'
122
  metrics = trainer.evaluate()
123
- metrics["eval_samples"] = len(eval_dataset)
124
  trainer.log_metrics("eval", metrics)
125
  trainer.save_metrics("eval", metrics)
126
 
127
- ############
128
- # Save model
129
- ############
130
- trainer.save_model(training_args.output_dir)
 
 
1
+ import sys
2
+ import logging
3
+
4
+ import datasets
5
  from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ import torch
8
+ import transformers
9
  from trl import SFTTrainer
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
11
 
12
  """
13
  A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
14
+ a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
15
+ This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
16
+ script can be run on V100 or later generation GPUs. Here are some suggestions on
17
+ futher reducing memory consumption:
18
+ - reduce batch size
19
+ - decrease lora dimension
20
+ - restrict lora target modules
21
+ Please follow these steps to run the script:
22
+ 1. Install dependencies:
23
  conda install -c conda-forge accelerate
24
+ pip3 install -i https://pypi.org/simple/ bitsandbytes
25
+ pip3 install peft
26
+ pip3 install deepspeed
27
+ 2. Setup accelerate and deepspeed config based on the machine used:
28
  accelerate config
29
+ Here is a sample config for deepspeed zero3:
30
+ compute_environment: LOCAL_MACHINE
31
+ debug: false
32
+ deepspeed_config:
33
+ gradient_accumulation_steps: 1
34
+ offload_optimizer_device: none
35
+ offload_param_device: none
36
+ zero3_init_flag: true
37
+ zero3_save_16bit_model: true
38
+ zero_stage: 3
39
+ distributed_type: DEEPSPEED
40
+ downcast_bf16: 'no'
41
+ enable_cpu_affinity: false
42
+ machine_rank: 0
43
+ main_training_function: main
44
+ mixed_precision: bf16
45
+ num_machines: 1
46
+ num_processes: 4
47
+ rdzv_backend: static
48
+ same_network: true
49
+ tpu_env: []
50
+ tpu_use_cluster: false
51
+ tpu_use_sudo: false
52
+ use_cpu: false
53
+ 3. check accelerate config:
54
  accelerate env
55
+ 4. Run the code:
56
  accelerate launch sample_finetune.py
57
  """
58
 
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
  ###################
63
  # Hyper-parameters
64
  ###################
65
+ training_config = {
66
  "bf16": True,
67
  "do_eval": False,
68
  "learning_rate": 5.0e-06,
 
75
  "output_dir": "./checkpoint_dir",
76
  "overwrite_output_dir": True,
77
  "per_device_eval_batch_size": 4,
78
+ "per_device_train_batch_size": 4,
79
  "remove_unused_columns": True,
80
  "save_steps": 100,
81
  "save_total_limit": 1,
 
85
  "gradient_accumulation_steps": 1,
86
  "warmup_ratio": 0.2,
87
  }
88
+
89
+ peft_config = {
90
+ "r": 16,
91
+ "lora_alpha": 32,
92
+ "lora_dropout": 0.05,
93
+ "bias": "none",
94
+ "task_type": "CAUSAL_LM",
95
+ "target_modules": "all-linear",
96
+ "modules_to_save": None,
97
+ }
98
+ train_conf = TrainingArguments(**training_config)
99
+ peft_conf = LoraConfig(**peft_config)
100
+
101
+
102
+ ###############
103
+ # Setup logging
104
+ ###############
105
+ logging.basicConfig(
106
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
107
+ datefmt="%Y-%m-%d %H:%M:%S",
108
+ handlers=[logging.StreamHandler(sys.stdout)],
109
+ )
110
+ log_level = train_conf.get_process_log_level()
111
+ logger.setLevel(log_level)
112
+ datasets.utils.logging.set_verbosity(log_level)
113
+ transformers.utils.logging.set_verbosity(log_level)
114
+ transformers.utils.logging.enable_default_handler()
115
+ transformers.utils.logging.enable_explicit_format()
116
+
117
+ # Log on each process a small summary
118
+ logger.warning(
119
+ f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
120
+ + f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
121
+ )
122
+ logger.info(f"Training/evaluation parameters {train_conf}")
123
+ logger.info(f"PEFT parameters {peft_conf}")
124
 
125
 
126
  ################
 
133
  trust_remote_code=True,
134
  attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
135
  torch_dtype=torch.bfloat16,
136
+ device_map=None
137
  )
138
  model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
139
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
140
+ tokenizer.model_max_length = 2048
141
  tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
142
  tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
143
  tokenizer.padding_side = 'right'
144
 
145
+
146
  ##################
147
  # Data Processing
148
  ##################
 
159
  return example
160
 
161
  raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
162
+ train_dataset = raw_dataset["train_sft"]
163
+ test_dataset = raw_dataset["test_sft"]
164
+ column_names = list(train_dataset.features)
165
+
166
+ processed_train_dataset = train_dataset.map(
167
+ apply_chat_template,
168
+ fn_kwargs={"tokenizer": tokenizer},
169
+ num_proc=10,
170
+ remove_columns=column_names,
171
+ desc="Applying chat template to train_sft",
172
+ )
173
 
174
+ processed_test_dataset = test_dataset.map(
175
  apply_chat_template,
176
  fn_kwargs={"tokenizer": tokenizer},
177
+ num_proc=10,
178
  remove_columns=column_names,
179
+ desc="Applying chat template to test_sft",
180
  )
 
 
181
 
182
 
183
  ###########
 
185
  ###########
186
  trainer = SFTTrainer(
187
  model=model,
188
+ args=train_conf,
189
+ peft_config=peft_conf,
190
+ train_dataset=processed_train_dataset,
191
+ eval_dataset=processed_test_dataset,
192
  max_seq_length=2048,
193
  dataset_text_field="text",
194
  tokenizer=tokenizer,
 
200
  trainer.save_metrics("train", metrics)
201
  trainer.save_state()
202
 
203
+
204
  #############
205
  # Evaluation
206
  #############
207
  tokenizer.padding_side = 'left'
208
  metrics = trainer.evaluate()
209
+ metrics["eval_samples"] = len(processed_test_dataset)
210
  trainer.log_metrics("eval", metrics)
211
  trainer.save_metrics("eval", metrics)
212
 
213
+
214
+ # ############
215
+ # # Save model
216
+ # ############
217
+ trainer.save_model(train_conf.output_dir)