dmariko commited on
Commit
4eea8a9
1 Parent(s): b231ecd

SmolLM-360M-Instruct-dpo-16k

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: HuggingFaceTB/SmolLM-360M-Instruct
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: SmolLM-360M-Instruct-dpo-16k
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # SmolLM-360M-Instruct-dpo-16k
17
+
18
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-360M-Instruct) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.8873
21
+ - Rewards/chosen: 0.0047
22
+ - Rewards/rejected: 0.3539
23
+ - Rewards/accuracies: 0.0326
24
+ - Rewards/margins: -0.3493
25
+ - Logps/rejected: -470.7575
26
+ - Logps/chosen: -546.0133
27
+ - Logits/rejected: 0.3014
28
+ - Logits/chosen: 0.6045
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-06
48
+ - train_batch_size: 2
49
+ - eval_batch_size: 2
50
+ - seed: 42
51
+ - gradient_accumulation_steps: 2
52
+ - total_train_batch_size: 4
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: cosine
55
+ - lr_scheduler_warmup_steps: 2
56
+ - num_epochs: 6
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
+ |:-------------:|:------:|:-----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
+ | 0.5225 | 0.9999 | 3368 | 0.8679 | 0.0092 | 0.3258 | 0.0337 | -0.3166 | -471.0385 | -545.9679 | 0.3212 | 0.6250 |
63
+ | 0.4511 | 2.0 | 6737 | 0.8863 | 0.0171 | 0.3649 | 0.0283 | -0.3477 | -470.6477 | -545.8885 | 0.2889 | 0.5939 |
64
+ | 0.4453 | 2.9999 | 10105 | 0.8880 | 0.0006 | 0.3516 | 0.0304 | -0.3510 | -470.7807 | -546.0537 | 0.3259 | 0.6291 |
65
+ | 0.4439 | 4.0 | 13474 | 0.8894 | 0.0067 | 0.3598 | 0.0228 | -0.3531 | -470.6990 | -545.9932 | 0.2699 | 0.5815 |
66
+ | 0.4441 | 4.9999 | 16842 | 0.8881 | 0.0058 | 0.3569 | 0.0293 | -0.3511 | -470.7278 | -546.0020 | 0.2999 | 0.6028 |
67
+ | 0.4442 | 5.9991 | 20208 | 0.8873 | 0.0047 | 0.3539 | 0.0326 | -0.3493 | -470.7575 | -546.0133 | 0.3014 | 0.6045 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.41.0
73
+ - Pytorch 2.2.0
74
+ - Datasets 2.19.1
75
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceTB/SmolLM-360M-Instruct",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 960,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 2560,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 15,
18
+ "num_hidden_layers": 32,
19
+ "num_key_value_heads": 5,
20
+ "pad_token_id": 2,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000.0,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.41.0",
28
+ "use_cache": false,
29
+ "vocab_size": 49152
30
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "max_new_tokens": 40,
6
+ "pad_token_id": 2,
7
+ "transformers_version": "4.41.0"
8
+ }
logs/evaluation_data/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e279d6438642c72a3f2bcb8a85ee7d146c084c3aee6bed8d67382dec817e8c2
3
+ size 4200008
logs/evaluation_data/dataset_info.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "parquet",
3
+ "citation": "",
4
+ "config_name": "default",
5
+ "dataset_name": "test-dpo-plaba",
6
+ "dataset_size": 4033369,
7
+ "description": "",
8
+ "download_checksums": {
9
+ "hf://datasets/dmariko/test-dpo-plaba@df3f28196a1828595fc34a68a7903693de3f4dac/data/train-00000-of-00001.parquet": {
10
+ "num_bytes": 1846723,
11
+ "checksum": null
12
+ }
13
+ },
14
+ "download_size": 1846723,
15
+ "features": {
16
+ "prompt": {
17
+ "dtype": "string",
18
+ "_type": "Value"
19
+ },
20
+ "chosen": {
21
+ "dtype": "string",
22
+ "_type": "Value"
23
+ },
24
+ "rejected": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ }
28
+ },
29
+ "homepage": "",
30
+ "license": "",
31
+ "size_in_bytes": 5880092,
32
+ "splits": {
33
+ "train": {
34
+ "name": "train",
35
+ "num_bytes": 4033369,
36
+ "num_examples": 921,
37
+ "dataset_name": "test-dpo-plaba"
38
+ }
39
+ },
40
+ "version": {
41
+ "version_str": "0.0.0",
42
+ "major": 0,
43
+ "minor": 0,
44
+ "patch": 0
45
+ }
46
+ }
logs/evaluation_data/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "9fbf7da8a292f424",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }
logs/training_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"output_dir": "SmolLM-360M-Instruct-dpo-16k", "overwrite_output_dir": false, "do_train": false, "do_eval": true, "do_predict": false, "eval_strategy": "epoch", "prediction_loss_only": false, "per_device_train_batch_size": 2, "per_device_eval_batch_size": 2, "per_gpu_train_batch_size": null, "per_gpu_eval_batch_size": null, "gradient_accumulation_steps": 2, "eval_accumulation_steps": null, "eval_delay": 0, "learning_rate": 5e-06, "weight_decay": 0.0, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1.0, "num_train_epochs": 6, "max_steps": -1, "lr_scheduler_type": "cosine", "lr_scheduler_kwargs": {}, "warmup_ratio": 0.0, "warmup_steps": 2, "log_level": "passive", "log_level_replica": "warning", "log_on_each_node": true, "logging_dir": "SmolLM-360M-Instruct-dpo-16k/runs/Sep11_18-08-40_ip-172-16-93-96.eu-west-1.compute.internal", "logging_strategy": "epoch", "logging_first_step": false, "logging_steps": 1, "logging_nan_inf_filter": true, "save_strategy": "epoch", "save_steps": 500, "save_total_limit": null, "save_safetensors": true, "save_on_each_node": false, "save_only_model": false, "restore_callback_states_from_checkpoint": false, "no_cuda": false, "use_cpu": false, "use_mps_device": false, "seed": 42, "data_seed": null, "jit_mode_eval": false, "use_ipex": false, "bf16": true, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": null, "local_rank": 0, "ddp_backend": null, "tpu_num_cores": null, "tpu_metrics_debug": false, "debug": [], "dataloader_drop_last": false, "eval_steps": null, "dataloader_num_workers": 0, "dataloader_prefetch_factor": null, "past_index": -1, "run_name": "SmolLM-360M-Instruct-dpo-16k", "disable_tqdm": false, "remove_unused_columns": false, "label_names": null, "load_best_model_at_end": false, "metric_for_best_model": null, "greater_is_better": null, "ignore_data_skip": false, "fsdp": [], "fsdp_min_num_params": 0, "fsdp_config": {"min_num_params": 0, "xla": false, "xla_fsdp_v2": false, "xla_fsdp_grad_ckpt": false}, "fsdp_transformer_layer_cls_to_wrap": null, "accelerator_config": {"split_batches": false, "dispatch_batches": null, "even_batches": true, "use_seedable_sampler": true, "non_blocking": false, "gradient_accumulation_kwargs": null}, "deepspeed": null, "label_smoothing_factor": 0.0, "optim": "paged_adamw_32bit", "optim_args": null, "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": ["tensorboard"], "ddp_find_unused_parameters": null, "ddp_bucket_cap_mb": null, "ddp_broadcast_buffers": null, "dataloader_pin_memory": true, "dataloader_persistent_workers": false, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": null, "hub_model_id": null, "hub_strategy": "every_save", "hub_token": "<HUB_TOKEN>", "hub_private_repo": false, "hub_always_push": false, "gradient_checkpointing": true, "gradient_checkpointing_kwargs": {"use_reentrant": false}, "include_inputs_for_metrics": false, "eval_do_concat_batches": true, "fp16_backend": "auto", "evaluation_strategy": null, "push_to_hub_model_id": null, "push_to_hub_organization": null, "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": null, "ray_scope": "last", "ddp_timeout": 1800, "torch_compile": false, "torch_compile_backend": null, "torch_compile_mode": null, "dispatch_batches": null, "split_batches": null, "include_tokens_per_second": false, "include_num_input_tokens_seen": false, "neftune_noise_alpha": null, "optim_target_modules": null, "batch_eval_metrics": false}
logs/training_data/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2292d7ceaf1c87890c4be79a2be8e6e048d3117be8ab511d99495c947fa61385
3
+ size 47906296
logs/training_data/dataset_info.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "parquet",
3
+ "citation": "",
4
+ "config_name": "default",
5
+ "dataset_name": "test-dpo",
6
+ "dataset_size": 52930157,
7
+ "description": "",
8
+ "download_checksums": {
9
+ "hf://datasets/dmariko/test-dpo@73d3c2ff5fe5b5dab3d2019469a00d00bdfb0742/data/train-00000-of-00001.parquet": {
10
+ "num_bytes": 25553882,
11
+ "checksum": null
12
+ }
13
+ },
14
+ "download_size": 25553882,
15
+ "features": {
16
+ "prompt": {
17
+ "dtype": "string",
18
+ "_type": "Value"
19
+ },
20
+ "chosen": {
21
+ "dtype": "string",
22
+ "_type": "Value"
23
+ },
24
+ "rejected": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ }
28
+ },
29
+ "homepage": "",
30
+ "license": "",
31
+ "size_in_bytes": 78484039,
32
+ "splits": {
33
+ "train": {
34
+ "name": "train",
35
+ "num_bytes": 52930157,
36
+ "num_examples": 14954,
37
+ "dataset_name": "test-dpo"
38
+ }
39
+ },
40
+ "version": {
41
+ "version_str": "0.0.0",
42
+ "major": 0,
43
+ "minor": 0,
44
+ "patch": 0
45
+ }
46
+ }
logs/training_data/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "85df5ed416154a2b",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b41f9492973ff642a1599f3566393c47f227fc9c7c3cc79c90bf78b8997065
3
+ size 723674912
runs/Sep11_18-08-40_ip-172-16-93-96.eu-west-1.compute.internal/events.out.tfevents.1726078190.ip-172-16-93-96.eu-west-1.compute.internal.23166.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c17050487340751900e2b14589a7167045e458745e6eed7a57ad3710c7a14c7
3
+ size 13969
special_tokens_map.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<|im_start|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|im_end|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": "<|im_end|>",
21
+ "unk_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
144
+ ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
+ "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
+ "model_max_length": 2048,
150
+ "pad_token": "<|im_end|>",
151
+ "tokenizer_class": "GPT2Tokenizer",
152
+ "unk_token": "<|endoftext|>",
153
+ "vocab_size": 49152
154
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e24f4543a30df0b19abb3a048eb5094ab6aae933a6073d6ec2773a336e3c9d28
3
+ size 5176
vocab.json ADDED
The diff for this file is too large to render. See raw diff