pkarypis commited on
Commit
db9e4d7
1 Parent(s): f1aabf4

Model save

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: facebook/opt-125m
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ model-index:
11
+ - name: opt-125m-sft
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # opt-125m-sft
19
+
20
+ This model is a fine-tuned version of [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) on the generator dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 1.9307
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 2e-05
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 16
44
+ - seed: 42
45
+ - distributed_type: multi-GPU
46
+ - num_devices: 32
47
+ - total_train_batch_size: 512
48
+ - total_eval_batch_size: 512
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - num_epochs: 1.0
52
+ - mixed_precision_training: Native AMP
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss |
57
+ |:-------------:|:-----:|:----:|:---------------:|
58
+ | 2.5574 | 1.0 | 253 | 1.9307 |
59
+
60
+
61
+ ### Framework versions
62
+
63
+ - Transformers 4.36.2
64
+ - Pytorch 2.0.1+cu117
65
+ - Datasets 2.14.5
66
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 1.9306640625,
4
+ "eval_runtime": 3.5411,
5
+ "eval_samples": 23110,
6
+ "eval_samples_per_second": 4032.407,
7
+ "eval_steps_per_second": 7.907,
8
+ "train_loss": 2.057099233031744,
9
+ "train_runtime": 133.17,
10
+ "train_samples": 207865,
11
+ "train_samples_per_second": 969.4,
12
+ "train_steps_per_second": 1.9
13
+ }
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "enable_bias": true,
14
+ "eos_token_id": 2,
15
+ "ffn_dim": 3072,
16
+ "hidden_size": 768,
17
+ "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
+ "layerdrop": 0.0,
20
+ "max_position_embeddings": 2048,
21
+ "model_type": "opt",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 1,
25
+ "prefix": "</s>",
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.36.2",
28
+ "use_cache": false,
29
+ "vocab_size": 50272,
30
+ "word_embed_proj_dim": 768
31
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 1.9306640625,
4
+ "eval_runtime": 3.5411,
5
+ "eval_samples": 23110,
6
+ "eval_samples_per_second": 4032.407,
7
+ "eval_steps_per_second": 7.907
8
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.36.2"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf062b9da04804fe93c1d7be2bce6fcfc44260752a928b13655fe82de6930fc
3
+ size 250500968
runs/Jan03_20-40-29_aga39/events.out.tfevents.1704336046.aga39.159199.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:632163b641bd0a48c7f0a0081eb4609b7e9ca9ec8c329e574a3dcf3e2ff05b14
3
+ size 5258
runs/Jan03_20-40-29_aga39/events.out.tfevents.1704336183.aga39.159199.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67da6a0462a3d4fb46124150999a5e55d9db5de58d5e4a3bc1c90485a1ff9a2
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "1": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "2": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ }
21
+ },
22
+ "bos_token": "</s>",
23
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
24
+ "clean_up_tokenization_spaces": true,
25
+ "eos_token": "</s>",
26
+ "errors": "replace",
27
+ "model_max_length": 2048,
28
+ "pad_token": "<pad>",
29
+ "tokenizer_class": "GPT2Tokenizer",
30
+ "unk_token": "</s>"
31
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 2.057099233031744,
4
+ "train_runtime": 133.17,
5
+ "train_samples": 207865,
6
+ "train_samples_per_second": 969.4,
7
+ "train_steps_per_second": 1.9
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 253,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 2e-05,
14
+ "loss": 2.5574,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 1.0,
19
+ "eval_loss": 1.9306640625,
20
+ "eval_runtime": 4.0635,
21
+ "eval_samples_per_second": 3513.941,
22
+ "eval_steps_per_second": 6.891,
23
+ "step": 253
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "step": 253,
28
+ "total_flos": 180169583099904.0,
29
+ "train_loss": 2.057099233031744,
30
+ "train_runtime": 133.17,
31
+ "train_samples_per_second": 969.4,
32
+ "train_steps_per_second": 1.9
33
+ }
34
+ ],
35
+ "logging_steps": 500,
36
+ "max_steps": 253,
37
+ "num_input_tokens_seen": 0,
38
+ "num_train_epochs": 1,
39
+ "save_steps": 500,
40
+ "total_flos": 180169583099904.0,
41
+ "train_batch_size": 16,
42
+ "trial_name": null,
43
+ "trial_params": null
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e10f054ef00b6c605d1341ff3f7de217685c07dabcaf4e10e90c397eb42835
3
+ size 5307
vocab.json ADDED
The diff for this file is too large to render. See raw diff