Pclanglais commited on
Commit
083d82d
1 Parent(s): 65f29ef

Create finetuning.py

Browse files
Files changed (1) hide show
  1. finetuning.py +231 -0
finetuning.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
5
+
6
+ print(device)
7
+
8
+
9
+ from datasets import load_dataset
10
+ from transformers import (
11
+ AutoModelForCausalLM,
12
+ AutoTokenizer,
13
+ BitsAndBytesConfig,
14
+ HfArgumentParser,
15
+ TrainingArguments,
16
+ pipeline,
17
+ logging,
18
+ LlamaTokenizerFast
19
+ )
20
+ from peft import LoraConfig, PeftModel, get_peft_model
21
+ from trl import SFTTrainer
22
+
23
+ # Le modèle que nous allons utiliser dans le Hugging Face hub
24
+ model_name = "mistral-hermes"
25
+
26
+ torch.cuda.empty_cache()
27
+
28
+ #project_directory = "~/finetuning/sigmund-spplus"
29
+
30
+ # Le nom du nouveau modèle
31
+ new_model_name = "mistral-mfs-reference"
32
+
33
+ # The output directory where the model predictions and checkpoints will be written
34
+ output_dir = "./mistral-mfs-reference"
35
+
36
+ # Tensorboard logs
37
+ tb_log_dir = "./mistral-mfs-reference/logs"
38
+
39
+ # Nombre de steps : à ajuster selon la taille du corpus et le nombre d'epochs à faire tourner.
40
+ max_steps = 500
41
+
42
+ # Les paramètres importants !!
43
+ per_device_train_batch_size = 4 #Nombre d'exemples envoyés par batch. En mettre plus pour aller plus vite.
44
+ learning_rate = 2e-5 #De préférence un taux d'apprentissage bas comme Mistral-Hermes se débrouille bien en français
45
+ max_seq_length = 4096 #C'est la fenêtre contextuelle. Elle peut être portée jusqu'à 4096 tokens (mais attention à la mémoire disponible !)
46
+ save_steps = 1000 # Sauvegarde des steps (permet de faire redémarrer l'entraînement si le fine-tuning ne fonctionne pas)
47
+ # Learning rate schedule (constant a bit better than cosine, and has advantage for analysis)
48
+ lr_scheduler_type = "linear"
49
+
50
+
51
+ #Les autres paramètres
52
+ local_rank = -1
53
+ per_device_eval_batch_size = 1
54
+ gradient_accumulation_steps = 4
55
+ max_grad_norm = 0.3
56
+ weight_decay = 0.001
57
+ lora_alpha = 16
58
+ lora_dropout = 0.1
59
+ lora_r = 64
60
+ # Group sequences into batches with same length (saves memory and speeds up training considerably)
61
+ group_by_length = True
62
+
63
+ # Activate 4-bit precision base model loading
64
+ use_4bit = True
65
+
66
+ # Activate nested quantization for 4-bit base models
67
+ use_nested_quant = False
68
+
69
+ # Compute dtype for 4-bit base models
70
+ bnb_4bit_compute_dtype = "float16"
71
+
72
+ # Quantization type (fp4 or nf4=
73
+ bnb_4bit_quant_type = "nf4"
74
+
75
+ # Number of training epochs
76
+ num_train_epochs = 1
77
+
78
+ # Enable fp16 training
79
+ fp16 = True
80
+
81
+ # Enable bf16 training
82
+ bf16 = False
83
+
84
+ # Use packing dataset creating
85
+ packing = False
86
+
87
+ # Enable gradient checkpointing
88
+ gradient_checkpointing = True
89
+
90
+ # Optimizer to use, original is paged_adamw_32bit
91
+ optim = "paged_adamw_32bit"
92
+
93
+ # Fraction of steps to do a warmup for
94
+ warmup_ratio = 0.03
95
+
96
+ # Log every X updates steps
97
+ logging_steps = 1
98
+
99
+ # Load the entire model on the GPU 0
100
+ device_map = {"": 0}
101
+
102
+ # Visualize training
103
+ report_to = "tensorboard"
104
+
105
+
106
+ #2. Import du tokenizer.
107
+ peft_config = LoraConfig(
108
+ lora_alpha=lora_alpha,
109
+ lora_dropout=lora_dropout,
110
+ r=lora_r,
111
+ inference_mode=False,
112
+ task_type="CAUSAL_LM",
113
+ target_modules = ["q_proj", "v_proj"]
114
+ )
115
+
116
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
117
+
118
+ # This is the fix for fp16 training
119
+ #tokenizer.padding_side = "right"
120
+ #tokenizer.pad_token = tokenizer.eos_token
121
+
122
+ #3. Préparation de la base de données
123
+
124
+ from datasets import load_dataset
125
+
126
+ def format_alpaca(sample):
127
+ prompt = f"{sample['conversation']}"
128
+ return prompt
129
+
130
+ # template dataset to add prompt to each sample
131
+ def template_dataset(sample):
132
+ sample["text"] = f"{format_alpaca(sample)}{tokenizer.eos_token}"
133
+ return sample
134
+
135
+ # Chargement du dataset.
136
+ #dataset = load_dataset("databricks/databricks-dolly-15k", split="train")
137
+ data_files = {"train": "references_mfs_corpus.json"}
138
+ dataset = load_dataset("json", data_files=data_files, split="train")
139
+
140
+ # Shuffle the dataset
141
+ dataset_shuffled = dataset.shuffle(seed=42)
142
+
143
+ # Select the first 250 rows from the shuffled dataset, comment if you want 15k
144
+ #dataset = dataset_shuffled.select(range(512))
145
+
146
+ #Transformation du dataset pour utiliser le format guanaco
147
+ dataset = dataset.map(template_dataset, remove_columns=list(dataset.features))
148
+
149
+ print(dataset[40])
150
+
151
+ #4. Import du modèle
152
+
153
+ # Load tokenizer and model with QLoRA configuration
154
+ compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
155
+
156
+ bnb_config = BitsAndBytesConfig(
157
+ load_in_4bit=use_4bit,
158
+ bnb_4bit_quant_type=bnb_4bit_quant_type,
159
+ bnb_4bit_compute_dtype=compute_dtype,
160
+ bnb_4bit_use_double_quant=use_nested_quant,
161
+ )
162
+
163
+ if compute_dtype == torch.float16 and use_4bit:
164
+ major, _ = torch.cuda.get_device_capability()
165
+ if major >= 8:
166
+ print("=" * 80)
167
+ print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16")
168
+ print("=" * 80)
169
+
170
+ model = AutoModelForCausalLM.from_pretrained(
171
+ model_name,
172
+ device_map=device_map,
173
+ quantization_config=bnb_config
174
+ )
175
+
176
+ model.config.use_cache = False
177
+ model.config.pretraining_tp = 1
178
+
179
+ #5. Fine-tuning
180
+
181
+ torch.cuda.empty_cache()
182
+
183
+ training_arguments = TrainingArguments(
184
+ output_dir=output_dir,
185
+ per_device_train_batch_size=per_device_train_batch_size,
186
+ gradient_accumulation_steps=gradient_accumulation_steps,
187
+ gradient_checkpointing=True,
188
+ optim=optim,
189
+ save_steps=save_steps,
190
+ logging_steps=logging_steps,
191
+ learning_rate=learning_rate,
192
+ fp16=fp16,
193
+ bf16=bf16,
194
+ max_grad_norm=max_grad_norm,
195
+ max_steps=max_steps,
196
+ warmup_ratio=warmup_ratio,
197
+ group_by_length=group_by_length,
198
+ lr_scheduler_type=lr_scheduler_type,
199
+ report_to="tensorboard"
200
+ )
201
+
202
+ trainer = SFTTrainer(
203
+ model=model,
204
+ train_dataset=dataset,
205
+ peft_config=peft_config,
206
+ dataset_text_field="text",
207
+ max_seq_length=max_seq_length,
208
+ tokenizer=tokenizer,
209
+ args=training_arguments,
210
+ packing=packing
211
+ )
212
+
213
+ trainer.train()
214
+ #trainer.train(resume_from_checkpoint=True)
215
+
216
+ #6. Sauvegarde
217
+
218
+ model_to_save = trainer.model.module if hasattr(trainer.model, 'module') else trainer.model # Take care of distributed/parallel training
219
+ model_to_save.save_pretrained(new_model_name)
220
+
221
+ torch.cuda.empty_cache()
222
+
223
+ from peft import AutoPeftModelForCausalLM
224
+
225
+ model = AutoPeftModelForCausalLM.from_pretrained(new_model_name, device_map="auto", torch_dtype=torch.bfloat16)
226
+ model = model.merge_and_unload()
227
+
228
+ output_merged_dir = os.path.join(new_model_name, new_model_name)
229
+ model.save_pretrained(output_merged_dir, safe_serialization=True)
230
+
231
+ tokenizer.save_pretrained(output_merged_dir)