alexmarques commited on
Commit
7a95cff
1 Parent(s): 295ceb1

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/nm/drive0/alexandre/cache/hub/models--microsoft--Phi-3-medium-128k-instruct/snapshots/cae1d42b5577398fd1be9f0746052562ae552886",
3
  "architectures": [
4
  "Phi3ForCausalLM"
5
  ],
@@ -48,13 +48,7 @@
48
  ],
49
  "kv_cache_scheme": null,
50
  "quant_method": "compressed-tensors",
51
- "quantization_status": "frozen",
52
- "sparsity_config": {
53
- "format": "dense",
54
- "global_sparsity": 1.3620183617957324,
55
- "registry_requires_subclass": false,
56
- "sparsity_structure": "unstructured"
57
- }
58
  },
59
  "embd_pdrop": 0.0,
60
  "eos_token_id": 32000,
@@ -210,7 +204,7 @@
210
  "sliding_window": 131072,
211
  "tie_word_embeddings": false,
212
  "torch_dtype": "bfloat16",
213
- "transformers_version": "4.42.3",
214
  "use_cache": true,
215
  "vocab_size": 32064
216
  }
 
1
  {
2
+ "_name_or_path": "/root/.cache/huggingface/hub/models--microsoft--Phi-3-medium-128k-instruct/snapshots/fa7d2aa4f5ea69b2e36b20d050cdae79c9bfbb3f",
3
  "architectures": [
4
  "Phi3ForCausalLM"
5
  ],
 
48
  ],
49
  "kv_cache_scheme": null,
50
  "quant_method": "compressed-tensors",
51
+ "quantization_status": "frozen"
 
 
 
 
 
 
52
  },
53
  "embd_pdrop": 0.0,
54
  "eos_token_id": 32000,
 
204
  "sliding_window": 131072,
205
  "tie_word_embeddings": false,
206
  "torch_dtype": "bfloat16",
207
+ "transformers_version": "4.44.1",
208
  "use_cache": true,
209
  "vocab_size": 32064
210
  }
generation_config.json CHANGED
@@ -7,5 +7,5 @@
7
  32007
8
  ],
9
  "pad_token_id": 32000,
10
- "transformers_version": "4.42.3"
11
  }
 
7
  32007
8
  ],
9
  "pad_token_id": 32000,
10
+ "transformers_version": "4.44.1"
11
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1494fa2d36e2cb968377e549d6952eea0ae36795e6cb8f817c45fbdeb8e0e51d
3
  size 4825810264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e92fd69d892de671d369e3d152c4f95473a2ce70b210bbf4f8085df52581ac7
3
  size 4825810264
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0955e4d9e339322c74a5410bd34f6a44310128141fcaa68206b66487d33344c
3
  size 4956401472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad44d52535ed9465e6814928178cc34b2e68ddd99244c3f7a8cfb89a834b95e8
3
  size 4956401472
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1904abda1480e1205592b5dc1f0d0b117026ba23878b508176ece92bc6c9629f
3
  size 4511123824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61f91273431a3e5f871441c546e8fcdd4ffa79285a962561dfbfb6b9c89658cc
3
  size 4511123824
recipe.yaml CHANGED
@@ -1,11 +1,16 @@
1
  quant_stage:
2
  quant_modifiers:
 
 
 
 
 
 
 
3
  GPTQModifier:
4
- sequential_update: false
5
  dampening_frac: 0.01
6
  ignore: [lm_head]
7
- config_groups:
8
- group_0:
9
- targets: [Linear]
10
- weights: {num_bits: 8, type: int, symmetric: true, strategy: channel}
11
- input_activations: {num_bits: 8, type: int, symmetric: true, dynamic: true, strategy: token}
 
1
  quant_stage:
2
  quant_modifiers:
3
+ SmoothQuantModifier:
4
+ smoothing_strength: 0.8
5
+ mappings:
6
+ - - ['re:.*qkv_proj']
7
+ - re:.*input_layernorm
8
+ - - ['re:.*gate_up_proj']
9
+ - re:.*post_attention_layernorm
10
  GPTQModifier:
11
+ sequential_update: true
12
  dampening_frac: 0.01
13
  ignore: [lm_head]
14
+ scheme: W8A8
15
+ targets: Linear
16
+ observer: mse
 
 
sample_finetune.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import logging
3
+
4
+ import datasets
5
+ from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ import torch
8
+ import transformers
9
+ from trl import SFTTrainer
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
11
+
12
+ """
13
+ A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
14
+ a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
15
+ This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
16
+ script can be run on V100 or later generation GPUs. Here are some suggestions on
17
+ futher reducing memory consumption:
18
+ - reduce batch size
19
+ - decrease lora dimension
20
+ - restrict lora target modules
21
+ Please follow these steps to run the script:
22
+ 1. Install dependencies:
23
+ conda install -c conda-forge accelerate
24
+ pip3 install -i https://pypi.org/simple/ bitsandbytes
25
+ pip3 install peft transformers trl datasets
26
+ pip3 install deepspeed
27
+ 2. Setup accelerate and deepspeed config based on the machine used:
28
+ accelerate config
29
+ Here is a sample config for deepspeed zero3:
30
+ compute_environment: LOCAL_MACHINE
31
+ debug: false
32
+ deepspeed_config:
33
+ gradient_accumulation_steps: 1
34
+ offload_optimizer_device: none
35
+ offload_param_device: none
36
+ zero3_init_flag: true
37
+ zero3_save_16bit_model: true
38
+ zero_stage: 3
39
+ distributed_type: DEEPSPEED
40
+ downcast_bf16: 'no'
41
+ enable_cpu_affinity: false
42
+ machine_rank: 0
43
+ main_training_function: main
44
+ mixed_precision: bf16
45
+ num_machines: 1
46
+ num_processes: 4
47
+ rdzv_backend: static
48
+ same_network: true
49
+ tpu_env: []
50
+ tpu_use_cluster: false
51
+ tpu_use_sudo: false
52
+ use_cpu: false
53
+ 3. check accelerate config:
54
+ accelerate env
55
+ 4. Run the code:
56
+ accelerate launch sample_finetune.py
57
+ """
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ ###################
63
+ # Hyper-parameters
64
+ ###################
65
+ training_config = {
66
+ "bf16": True,
67
+ "do_eval": False,
68
+ "learning_rate": 5.0e-06,
69
+ "log_level": "info",
70
+ "logging_steps": 20,
71
+ "logging_strategy": "steps",
72
+ "lr_scheduler_type": "cosine",
73
+ "num_train_epochs": 1,
74
+ "max_steps": -1,
75
+ "output_dir": "./checkpoint_dir",
76
+ "overwrite_output_dir": True,
77
+ "per_device_eval_batch_size": 4,
78
+ "per_device_train_batch_size": 4,
79
+ "remove_unused_columns": True,
80
+ "save_steps": 100,
81
+ "save_total_limit": 1,
82
+ "seed": 0,
83
+ "gradient_checkpointing": True,
84
+ "gradient_checkpointing_kwargs":{"use_reentrant": False},
85
+ "gradient_accumulation_steps": 1,
86
+ "warmup_ratio": 0.2,
87
+ }
88
+
89
+ peft_config = {
90
+ "r": 16,
91
+ "lora_alpha": 32,
92
+ "lora_dropout": 0.05,
93
+ "bias": "none",
94
+ "task_type": "CAUSAL_LM",
95
+ "target_modules": "all-linear",
96
+ "modules_to_save": None,
97
+ }
98
+ train_conf = TrainingArguments(**training_config)
99
+ peft_conf = LoraConfig(**peft_config)
100
+
101
+
102
+ ###############
103
+ # Setup logging
104
+ ###############
105
+ logging.basicConfig(
106
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
107
+ datefmt="%Y-%m-%d %H:%M:%S",
108
+ handlers=[logging.StreamHandler(sys.stdout)],
109
+ )
110
+ log_level = train_conf.get_process_log_level()
111
+ logger.setLevel(log_level)
112
+ datasets.utils.logging.set_verbosity(log_level)
113
+ transformers.utils.logging.set_verbosity(log_level)
114
+ transformers.utils.logging.enable_default_handler()
115
+ transformers.utils.logging.enable_explicit_format()
116
+
117
+ # Log on each process a small summary
118
+ logger.warning(
119
+ f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
120
+ + f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
121
+ )
122
+ logger.info(f"Training/evaluation parameters {train_conf}")
123
+ logger.info(f"PEFT parameters {peft_conf}")
124
+
125
+
126
+ ################
127
+ # Modle Loading
128
+ ################
129
+ checkpoint_path = "microsoft/Phi-3-medium-4k-instruct"
130
+ # checkpoint_path = "microsoft/Phi-3-medium-128k-instruct"
131
+ model_kwargs = dict(
132
+ use_cache=False,
133
+ trust_remote_code=True,
134
+ attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
135
+ torch_dtype=torch.bfloat16,
136
+ device_map=None
137
+ )
138
+ model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
139
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
140
+ tokenizer.model_max_length = 2048
141
+ tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
142
+ tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
143
+ tokenizer.padding_side = 'right'
144
+
145
+
146
+ ##################
147
+ # Data Processing
148
+ ##################
149
+ def apply_chat_template(
150
+ example,
151
+ tokenizer,
152
+ ):
153
+ messages = example["messages"]
154
+ example["text"] = tokenizer.apply_chat_template(
155
+ messages, tokenize=False, add_generation_prompt=False)
156
+ return example
157
+
158
+ raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
159
+ train_dataset = raw_dataset["train_sft"]
160
+ test_dataset = raw_dataset["test_sft"]
161
+ column_names = list(train_dataset.features)
162
+
163
+ processed_train_dataset = train_dataset.map(
164
+ apply_chat_template,
165
+ fn_kwargs={"tokenizer": tokenizer},
166
+ num_proc=10,
167
+ remove_columns=column_names,
168
+ desc="Applying chat template to train_sft",
169
+ )
170
+
171
+ processed_test_dataset = test_dataset.map(
172
+ apply_chat_template,
173
+ fn_kwargs={"tokenizer": tokenizer},
174
+ num_proc=10,
175
+ remove_columns=column_names,
176
+ desc="Applying chat template to test_sft",
177
+ )
178
+
179
+
180
+ ###########
181
+ # Training
182
+ ###########
183
+ trainer = SFTTrainer(
184
+ model=model,
185
+ args=train_conf,
186
+ peft_config=peft_conf,
187
+ train_dataset=processed_train_dataset,
188
+ eval_dataset=processed_test_dataset,
189
+ max_seq_length=2048,
190
+ dataset_text_field="text",
191
+ tokenizer=tokenizer,
192
+ packing=True
193
+ )
194
+ train_result = trainer.train()
195
+ metrics = train_result.metrics
196
+ trainer.log_metrics("train", metrics)
197
+ trainer.save_metrics("train", metrics)
198
+ trainer.save_state()
199
+
200
+
201
+ #############
202
+ # Evaluation
203
+ #############
204
+ tokenizer.padding_side = 'left'
205
+ metrics = trainer.evaluate()
206
+ metrics["eval_samples"] = len(processed_test_dataset)
207
+ trainer.log_metrics("eval", metrics)
208
+ trainer.save_metrics("eval", metrics)
209
+
210
+
211
+ # ############
212
+ # # Save model
213
+ # ############
214
+ trainer.save_model(train_conf.output_dir)
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 8196,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {