chchen commited on
Commit
224470a
1 Parent(s): 72cb7d4

Training in progress, step 500

Browse files
.ipynb_checkpoints/lora_orpo-checkpoint.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: lmsys/vicuna-7b-v1.5
3
+
4
+ ### method
5
+ stage: orpo
6
+ do_train: true
7
+ finetuning_type: lora
8
+ lora_target: all
9
+
10
+ ### dataset
11
+ dataset: dpo_mix_en
12
+ dataset_dir: data
13
+ template: vicuna
14
+ cutoff_len: 1024
15
+ # max_samples: 1000
16
+ overwrite_cache: true
17
+ preprocessing_num_workers: 16
18
+
19
+ ### output
20
+ output_dir: saves/Vicuna-7B-v1.5/lora/orpo
21
+ logging_steps: 10
22
+ save_steps: 500
23
+ plot_loss: true
24
+ overwrite_output_dir: true
25
+ save_total_limit: 3
26
+ load_best_model_at_end: true
27
+ push_to_hub: true
28
+ hub_model_id: chchen/Vicuna-7B-v1.5-ORPO
29
+
30
+ ### train
31
+ per_device_train_batch_size: 2
32
+ gradient_accumulation_steps: 8
33
+ learning_rate: 0.000005
34
+ num_train_epochs: 3.0
35
+ lr_scheduler_type: cosine
36
+ warmup_steps: 0.1
37
+ bf16: true
38
+
39
+ ### eval
40
+ val_size: 0.1
41
+ per_device_eval_batch_size: 2
42
+ evaluation_strategy: steps
43
+ eval_steps: 500
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "lmsys/vicuna-7b-v1.5",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "o_proj",
24
+ "down_proj",
25
+ "k_proj",
26
+ "up_proj",
27
+ "v_proj",
28
+ "gate_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ae54bec57f318b2fd881fb0282903c596ce0b7aa45def4eb9205bdd6c3d166
3
+ size 80013120
lora_orpo.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: lmsys/vicuna-7b-v1.5
3
+
4
+ ### method
5
+ stage: orpo
6
+ do_train: true
7
+ finetuning_type: lora
8
+ lora_target: all
9
+
10
+ ### dataset
11
+ dataset: dpo_mix_en
12
+ dataset_dir: data
13
+ template: vicuna
14
+ cutoff_len: 1024
15
+ # max_samples: 1000
16
+ overwrite_cache: true
17
+ preprocessing_num_workers: 16
18
+
19
+ ### output
20
+ output_dir: saves/Vicuna-7B-v1.5/lora/orpo
21
+ logging_steps: 10
22
+ save_steps: 500
23
+ plot_loss: true
24
+ overwrite_output_dir: true
25
+ save_total_limit: 3
26
+ load_best_model_at_end: true
27
+ push_to_hub: true
28
+ hub_model_id: chchen/Vicuna-7B-v1.5-ORPO
29
+
30
+ ### train
31
+ per_device_train_batch_size: 2
32
+ gradient_accumulation_steps: 8
33
+ learning_rate: 0.000005
34
+ num_train_epochs: 3.0
35
+ lr_scheduler_type: cosine
36
+ warmup_steps: 0.1
37
+ bf16: true
38
+
39
+ ### eval
40
+ val_size: 0.1
41
+ per_device_eval_batch_size: 2
42
+ evaluation_strategy: steps
43
+ eval_steps: 500
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'USER: ' + content + ' ASSISTANT:' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 4096,
37
+ "pad_token": "<unk>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "unk_token": "<unk>",
44
+ "use_default_system_prompt": false
45
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 1686, "loss": 1.227, "accuracy": 0.4124999940395355, "learning_rate": 4.9995745934141085e-06, "epoch": 0.017781729273171815, "percentage": 0.59, "elapsed_time": "0:01:35", "remaining_time": "4:25:41"}
2
+ {"current_steps": 20, "total_steps": 1686, "loss": 1.0707, "accuracy": 0.45625001192092896, "learning_rate": 4.9982812903243405e-06, "epoch": 0.03556345854634363, "percentage": 1.19, "elapsed_time": "0:03:11", "remaining_time": "4:25:52"}
3
+ {"current_steps": 30, "total_steps": 1686, "loss": 1.1087, "accuracy": 0.5249999761581421, "learning_rate": 4.996120496405222e-06, "epoch": 0.05334518781951545, "percentage": 1.78, "elapsed_time": "0:04:46", "remaining_time": "4:23:30"}
4
+ {"current_steps": 40, "total_steps": 1686, "loss": 1.1498, "accuracy": 0.5062500238418579, "learning_rate": 4.99309296196014e-06, "epoch": 0.07112691709268726, "percentage": 2.37, "elapsed_time": "0:06:20", "remaining_time": "4:20:49"}
5
+ {"current_steps": 50, "total_steps": 1686, "loss": 1.1221, "accuracy": 0.4625000059604645, "learning_rate": 4.989199738255166e-06, "epoch": 0.08890864636585907, "percentage": 2.97, "elapsed_time": "0:07:57", "remaining_time": "4:20:22"}
6
+ {"current_steps": 60, "total_steps": 1686, "loss": 1.2305, "accuracy": 0.45625001192092896, "learning_rate": 4.984442177154031e-06, "epoch": 0.1066903756390309, "percentage": 3.56, "elapsed_time": "0:09:36", "remaining_time": "4:20:16"}
7
+ {"current_steps": 70, "total_steps": 1686, "loss": 1.1208, "accuracy": 0.4625000059604645, "learning_rate": 4.978821930648704e-06, "epoch": 0.12447210491220272, "percentage": 4.15, "elapsed_time": "0:11:15", "remaining_time": "4:20:03"}
8
+ {"current_steps": 80, "total_steps": 1686, "loss": 1.2343, "accuracy": 0.5, "learning_rate": 4.97234095028576e-06, "epoch": 0.14225383418537452, "percentage": 4.74, "elapsed_time": "0:12:55", "remaining_time": "4:19:37"}
9
+ {"current_steps": 90, "total_steps": 1686, "loss": 1.1471, "accuracy": 0.4749999940395355, "learning_rate": 4.965001486488743e-06, "epoch": 0.16003556345854633, "percentage": 5.34, "elapsed_time": "0:14:34", "remaining_time": "4:18:19"}
10
+ {"current_steps": 100, "total_steps": 1686, "loss": 1.1124, "accuracy": 0.4937500059604645, "learning_rate": 4.956806087776732e-06, "epoch": 0.17781729273171815, "percentage": 5.93, "elapsed_time": "0:16:07", "remaining_time": "4:15:41"}
11
+ {"current_steps": 110, "total_steps": 1686, "loss": 1.2227, "accuracy": 0.4749999940395355, "learning_rate": 4.947757599879411e-06, "epoch": 0.19559902200489, "percentage": 6.52, "elapsed_time": "0:17:40", "remaining_time": "4:13:08"}
12
+ {"current_steps": 120, "total_steps": 1686, "loss": 1.0907, "accuracy": 0.4937500059604645, "learning_rate": 4.937859164748931e-06, "epoch": 0.2133807512780618, "percentage": 7.12, "elapsed_time": "0:19:12", "remaining_time": "4:10:46"}
13
+ {"current_steps": 130, "total_steps": 1686, "loss": 1.1117, "accuracy": 0.5062500238418579, "learning_rate": 4.92711421946891e-06, "epoch": 0.23116248055123362, "percentage": 7.71, "elapsed_time": "0:20:54", "remaining_time": "4:10:17"}
14
+ {"current_steps": 140, "total_steps": 1686, "loss": 1.0745, "accuracy": 0.5249999761581421, "learning_rate": 4.915526495060961e-06, "epoch": 0.24894420982440543, "percentage": 8.3, "elapsed_time": "0:22:33", "remaining_time": "4:09:02"}
15
+ {"current_steps": 150, "total_steps": 1686, "loss": 1.0386, "accuracy": 0.5249999761581421, "learning_rate": 4.903100015189153e-06, "epoch": 0.26672593909757725, "percentage": 8.9, "elapsed_time": "0:24:11", "remaining_time": "4:07:48"}
16
+ {"current_steps": 160, "total_steps": 1686, "loss": 1.1206, "accuracy": 0.512499988079071, "learning_rate": 4.889839094762848e-06, "epoch": 0.28450766837074903, "percentage": 9.49, "elapsed_time": "0:25:46", "remaining_time": "4:05:54"}
17
+ {"current_steps": 170, "total_steps": 1686, "loss": 1.0632, "accuracy": 0.4937500059604645, "learning_rate": 4.875748338438416e-06, "epoch": 0.3022893976439209, "percentage": 10.08, "elapsed_time": "0:27:20", "remaining_time": "4:03:53"}
18
+ {"current_steps": 180, "total_steps": 1686, "loss": 1.0245, "accuracy": 0.5249999761581421, "learning_rate": 4.8608326390203386e-06, "epoch": 0.32007112691709266, "percentage": 10.68, "elapsed_time": "0:28:58", "remaining_time": "4:02:26"}
19
+ {"current_steps": 190, "total_steps": 1686, "loss": 1.0617, "accuracy": 0.4937500059604645, "learning_rate": 4.845097175762251e-06, "epoch": 0.3378528561902645, "percentage": 11.27, "elapsed_time": "0:30:36", "remaining_time": "4:01:01"}
20
+ {"current_steps": 200, "total_steps": 1686, "loss": 1.198, "accuracy": 0.48750001192092896, "learning_rate": 4.8285474125685286e-06, "epoch": 0.3556345854634363, "percentage": 11.86, "elapsed_time": "0:32:11", "remaining_time": "3:59:13"}
21
+ {"current_steps": 210, "total_steps": 1686, "loss": 1.0712, "accuracy": 0.5, "learning_rate": 4.811189096097025e-06, "epoch": 0.37341631473660813, "percentage": 12.46, "elapsed_time": "0:33:54", "remaining_time": "3:58:21"}
22
+ {"current_steps": 220, "total_steps": 1686, "loss": 1.0422, "accuracy": 0.4749999940395355, "learning_rate": 4.793028253763633e-06, "epoch": 0.39119804400978, "percentage": 13.05, "elapsed_time": "0:35:33", "remaining_time": "3:56:54"}
23
+ {"current_steps": 230, "total_steps": 1686, "loss": 1.0212, "accuracy": 0.5562499761581421, "learning_rate": 4.774071191649352e-06, "epoch": 0.40897977328295176, "percentage": 13.64, "elapsed_time": "0:37:12", "remaining_time": "3:55:32"}
24
+ {"current_steps": 240, "total_steps": 1686, "loss": 1.1058, "accuracy": 0.45625001192092896, "learning_rate": 4.7543244923105975e-06, "epoch": 0.4267615025561236, "percentage": 14.23, "elapsed_time": "0:38:50", "remaining_time": "3:54:02"}
25
+ {"current_steps": 250, "total_steps": 1686, "loss": 1.0857, "accuracy": 0.46875, "learning_rate": 4.733795012493506e-06, "epoch": 0.4445432318292954, "percentage": 14.83, "elapsed_time": "0:40:30", "remaining_time": "3:52:41"}
26
+ {"current_steps": 260, "total_steps": 1686, "loss": 1.0164, "accuracy": 0.48750001192092896, "learning_rate": 4.712489880753035e-06, "epoch": 0.46232496110246724, "percentage": 15.42, "elapsed_time": "0:42:12", "remaining_time": "3:51:27"}
27
+ {"current_steps": 270, "total_steps": 1686, "loss": 1.0133, "accuracy": 0.5375000238418579, "learning_rate": 4.690416494977673e-06, "epoch": 0.480106690375639, "percentage": 16.01, "elapsed_time": "0:43:50", "remaining_time": "3:49:54"}
28
+ {"current_steps": 280, "total_steps": 1686, "loss": 1.1374, "accuracy": 0.5062500238418579, "learning_rate": 4.667582519820639e-06, "epoch": 0.49788841964881086, "percentage": 16.61, "elapsed_time": "0:45:33", "remaining_time": "3:48:46"}
29
+ {"current_steps": 290, "total_steps": 1686, "loss": 1.1314, "accuracy": 0.5062500238418579, "learning_rate": 4.643995884038443e-06, "epoch": 0.5156701489219827, "percentage": 17.2, "elapsed_time": "0:47:12", "remaining_time": "3:47:15"}
30
+ {"current_steps": 300, "total_steps": 1686, "loss": 1.0053, "accuracy": 0.48124998807907104, "learning_rate": 4.6196647777377475e-06, "epoch": 0.5334518781951545, "percentage": 17.79, "elapsed_time": "0:48:52", "remaining_time": "3:45:48"}
31
+ {"current_steps": 310, "total_steps": 1686, "loss": 1.0919, "accuracy": 0.48124998807907104, "learning_rate": 4.59459764953147e-06, "epoch": 0.5512336074683263, "percentage": 18.39, "elapsed_time": "0:50:34", "remaining_time": "3:44:28"}
32
+ {"current_steps": 320, "total_steps": 1686, "loss": 1.0951, "accuracy": 0.574999988079071, "learning_rate": 4.568803203605133e-06, "epoch": 0.5690153367414981, "percentage": 18.98, "elapsed_time": "0:52:13", "remaining_time": "3:42:55"}
33
+ {"current_steps": 330, "total_steps": 1686, "loss": 1.0391, "accuracy": 0.5062500238418579, "learning_rate": 4.542290396694462e-06, "epoch": 0.58679706601467, "percentage": 19.57, "elapsed_time": "0:53:55", "remaining_time": "3:41:34"}
34
+ {"current_steps": 340, "total_steps": 1686, "loss": 1.0484, "accuracy": 0.5, "learning_rate": 4.515068434975298e-06, "epoch": 0.6045787952878418, "percentage": 20.17, "elapsed_time": "0:55:32", "remaining_time": "3:39:52"}
35
+ {"current_steps": 350, "total_steps": 1686, "loss": 1.1304, "accuracy": 0.42500001192092896, "learning_rate": 4.487146770866887e-06, "epoch": 0.6223605245610135, "percentage": 20.76, "elapsed_time": "0:57:11", "remaining_time": "3:38:16"}
36
+ {"current_steps": 360, "total_steps": 1686, "loss": 1.2174, "accuracy": 0.46875, "learning_rate": 4.458535099749666e-06, "epoch": 0.6401422538341853, "percentage": 21.35, "elapsed_time": "0:58:47", "remaining_time": "3:36:32"}
37
+ {"current_steps": 370, "total_steps": 1686, "loss": 1.0243, "accuracy": 0.53125, "learning_rate": 4.429243356598694e-06, "epoch": 0.6579239831073572, "percentage": 21.95, "elapsed_time": "1:00:23", "remaining_time": "3:34:49"}
38
+ {"current_steps": 380, "total_steps": 1686, "loss": 1.0101, "accuracy": 0.4625000059604645, "learning_rate": 4.399281712533875e-06, "epoch": 0.675705712380529, "percentage": 22.54, "elapsed_time": "1:01:57", "remaining_time": "3:32:57"}
39
+ {"current_steps": 390, "total_steps": 1686, "loss": 1.0071, "accuracy": 0.45625001192092896, "learning_rate": 4.368660571288192e-06, "epoch": 0.6934874416537008, "percentage": 23.13, "elapsed_time": "1:03:39", "remaining_time": "3:31:31"}
40
+ {"current_steps": 400, "total_steps": 1686, "loss": 1.1448, "accuracy": 0.48750001192092896, "learning_rate": 4.337390565595163e-06, "epoch": 0.7112691709268726, "percentage": 23.72, "elapsed_time": "1:05:16", "remaining_time": "3:29:52"}
41
+ {"current_steps": 410, "total_steps": 1686, "loss": 1.0363, "accuracy": 0.512499988079071, "learning_rate": 4.305482553496786e-06, "epoch": 0.7290509002000445, "percentage": 24.32, "elapsed_time": "1:06:51", "remaining_time": "3:28:03"}
42
+ {"current_steps": 420, "total_steps": 1686, "loss": 1.0826, "accuracy": 0.53125, "learning_rate": 4.272947614573244e-06, "epoch": 0.7468326294732163, "percentage": 24.91, "elapsed_time": "1:08:24", "remaining_time": "3:26:11"}
43
+ {"current_steps": 430, "total_steps": 1686, "loss": 1.0309, "accuracy": 0.53125, "learning_rate": 4.23979704609569e-06, "epoch": 0.7646143587463881, "percentage": 25.5, "elapsed_time": "1:10:02", "remaining_time": "3:24:35"}
44
+ {"current_steps": 440, "total_steps": 1686, "loss": 1.0531, "accuracy": 0.543749988079071, "learning_rate": 4.206042359103435e-06, "epoch": 0.78239608801956, "percentage": 26.1, "elapsed_time": "1:11:39", "remaining_time": "3:22:56"}
45
+ {"current_steps": 450, "total_steps": 1686, "loss": 1.0182, "accuracy": 0.5062500238418579, "learning_rate": 4.17169527440691e-06, "epoch": 0.8001778172927317, "percentage": 26.69, "elapsed_time": "1:13:15", "remaining_time": "3:21:12"}
46
+ {"current_steps": 460, "total_steps": 1686, "loss": 1.0256, "accuracy": 0.53125, "learning_rate": 4.136767718517797e-06, "epoch": 0.8179595465659035, "percentage": 27.28, "elapsed_time": "1:14:52", "remaining_time": "3:19:34"}
47
+ {"current_steps": 470, "total_steps": 1686, "loss": 1.0376, "accuracy": 0.42500001192092896, "learning_rate": 4.1012718195077196e-06, "epoch": 0.8357412758390753, "percentage": 27.88, "elapsed_time": "1:16:30", "remaining_time": "3:17:57"}
48
+ {"current_steps": 480, "total_steps": 1686, "loss": 1.0453, "accuracy": 0.4937500059604645, "learning_rate": 4.065219902796953e-06, "epoch": 0.8535230051122472, "percentage": 28.47, "elapsed_time": "1:18:05", "remaining_time": "3:16:12"}
49
+ {"current_steps": 490, "total_steps": 1686, "loss": 1.0194, "accuracy": 0.550000011920929, "learning_rate": 4.028624486874608e-06, "epoch": 0.871304734385419, "percentage": 29.06, "elapsed_time": "1:19:43", "remaining_time": "3:14:34"}
50
+ {"current_steps": 500, "total_steps": 1686, "loss": 1.0913, "accuracy": 0.4312500059604645, "learning_rate": 3.99149827895177e-06, "epoch": 0.8890864636585908, "percentage": 29.66, "elapsed_time": "1:21:23", "remaining_time": "3:13:04"}
51
+ {"current_steps": 500, "total_steps": 1686, "eval_loss": 1.0354068279266357, "epoch": 0.8890864636585908, "percentage": 29.66, "elapsed_time": "1:24:29", "remaining_time": "3:20:25"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bf5d50cd34d64f54cdc21d6e2f47ee075c78b3ad0c8ba3f0874ac987481c297
3
+ size 5176