mipatov commited on
Commit
d1d9db8
1 Parent(s): a29b6e2
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - accuracy
7
+ model-index:
8
+ - name: opt-model
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # opt-model
16
+
17
+ This model is a fine-tuned version of [facebook/opt-13b](https://huggingface.co/facebook/opt-13b) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 2.3965
20
+ - Accuracy: 0.5020
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 1e-05
40
+ - train_batch_size: 24
41
+ - eval_batch_size: 24
42
+ - seed: 42
43
+ - distributed_type: multi-GPU
44
+ - num_devices: 3
45
+ - total_train_batch_size: 72
46
+ - total_eval_batch_size: 72
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 5.0
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
54
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
55
+ | 3.6363 | 1.0 | 3 | 3.2090 | 0.4082 |
56
+ | 2.8168 | 2.0 | 6 | 2.4805 | 0.4874 |
57
+ | 2.3529 | 3.0 | 9 | 2.4219 | 0.4915 |
58
+ | 2.1842 | 4.0 | 12 | 2.4023 | 0.4991 |
59
+ | 2.0765 | 5.0 | 15 | 2.3965 | 0.5020 |
60
+
61
+
62
+ ### Framework versions
63
+
64
+ - Transformers 4.20.1
65
+ - Pytorch 1.12.0+cu102
66
+ - Datasets 2.3.2
67
+ - Tokenizers 0.12.1
all_results.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "before_init_mem_cpu": 19409301504,
3
+ "before_init_mem_gpu": 514785792,
4
+ "epoch": 5.0,
5
+ "eval_accuracy": 0.5020384866275277,
6
+ "eval_loss": 2.396484375,
7
+ "eval_mem_cpu_alloc_delta": 1241088,
8
+ "eval_mem_cpu_peaked_delta": 0,
9
+ "eval_mem_gpu_alloc_delta": 0,
10
+ "eval_mem_gpu_peaked_delta": 13894116352,
11
+ "eval_runtime": 7.6383,
12
+ "eval_samples": 24,
13
+ "eval_samples_per_second": 3.142,
14
+ "eval_steps_per_second": 0.131,
15
+ "init_mem_cpu_alloc_delta": 20480,
16
+ "init_mem_cpu_peaked_delta": 0,
17
+ "init_mem_gpu_alloc_delta": 0,
18
+ "init_mem_gpu_peaked_delta": 0,
19
+ "perplexity": 10.984491067549964,
20
+ "train_loss": 2.6133464177449546,
21
+ "train_mem_cpu_alloc_delta": 87850864640,
22
+ "train_mem_cpu_peaked_delta": 4164583424,
23
+ "train_mem_gpu_alloc_delta": -456804352,
24
+ "train_mem_gpu_peaked_delta": 13894116352,
25
+ "train_runtime": 736.506,
26
+ "train_samples": 165,
27
+ "train_samples_per_second": 1.12,
28
+ "train_steps_per_second": 0.02
29
+ }
checkpoint-2/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-13b",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "eos_token_id": 2,
14
+ "ffn_dim": 20480,
15
+ "hidden_size": 5120,
16
+ "init_std": 0.02,
17
+ "layerdrop": 0.0,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "opt",
20
+ "num_attention_heads": 40,
21
+ "num_hidden_layers": 40,
22
+ "output_projection": true,
23
+ "pad_token_id": 1,
24
+ "prefix": "</s>",
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.20.1",
27
+ "use_cache": true,
28
+ "vocab_size": 50265,
29
+ "word_embed_proj_dim": 5120
30
+ }
checkpoint-2/global_step2/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57b140e0944cc8b63af3723b415db6069ef6be3d49df869329c6981b28f2dff
3
+ size 250361
checkpoint-2/global_step2/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03aee2c6dbe6a211808185f77ae3bdae79004aabc33bdcc7fbabab08ba53dac8
3
+ size 19496779776
checkpoint-2/global_step2/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b5e09f6f315cbda85befc546cba246c7d39dca55510eae78c0479aeb514d2d
3
+ size 250361
checkpoint-2/global_step2/zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0928b3add3e1426b289c7ea63bb131e7ea4616db7a4490d10239913eab160cc4
3
+ size 18584686592
checkpoint-2/global_step2/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585a2b848e9d1373ef33676865ea87839fcddc069e49d350308683dae28a634c
3
+ size 250361
checkpoint-2/global_step2/zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c5edb967b0d4f9eb1be96837b40f187d364dfc201154afc5f9da155e8c6712
3
+ size 19108249600
checkpoint-2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761bc42254196f751d37cf2c3dd1128f531d990acd84e2a6dbb270c1085d7ead
3
+ size 25707087165
checkpoint-2/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-2/tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "</s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "name_or_path": "facebook/opt-13b",
22
+ "pad_token": {
23
+ "__type": "AddedToken",
24
+ "content": "<pad>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "special_tokens_map_file": null,
31
+ "tokenizer_class": "GPT2Tokenizer",
32
+ "unk_token": {
33
+ "__type": "AddedToken",
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
checkpoint-2/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84c70db082549e01237f6482f16e9f3aa714da3c1bc6c9967f9370f573a4dc62
3
+ size 4335
checkpoint-2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-13b",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "eos_token_id": 2,
14
+ "ffn_dim": 20480,
15
+ "hidden_size": 5120,
16
+ "init_std": 0.02,
17
+ "layerdrop": 0.0,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "opt",
20
+ "num_attention_heads": 40,
21
+ "num_hidden_layers": 40,
22
+ "output_projection": true,
23
+ "pad_token_id": 1,
24
+ "prefix": "</s>",
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.20.1",
27
+ "use_cache": true,
28
+ "vocab_size": 50265,
29
+ "word_embed_proj_dim": 5120
30
+ }
eval_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.5020384866275277,
4
+ "eval_loss": 2.396484375,
5
+ "eval_mem_cpu_alloc_delta": 1241088,
6
+ "eval_mem_cpu_peaked_delta": 0,
7
+ "eval_mem_gpu_alloc_delta": 0,
8
+ "eval_mem_gpu_peaked_delta": 13894116352,
9
+ "eval_runtime": 7.6383,
10
+ "eval_samples": 24,
11
+ "eval_samples_per_second": 3.142,
12
+ "eval_steps_per_second": 0.131,
13
+ "perplexity": 10.984491067549964
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb0640265cd6ca8c05efaaee690c15cefa365e2b9372f4cec62588e86bb4d1e
3
+ size 25707087165
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "</s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "name_or_path": "facebook/opt-13b",
22
+ "pad_token": {
23
+ "__type": "AddedToken",
24
+ "content": "<pad>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "special_tokens_map_file": null,
31
+ "tokenizer_class": "GPT2Tokenizer",
32
+ "unk_token": {
33
+ "__type": "AddedToken",
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
train_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "before_init_mem_cpu": 19409301504,
3
+ "before_init_mem_gpu": 514785792,
4
+ "epoch": 5.0,
5
+ "init_mem_cpu_alloc_delta": 20480,
6
+ "init_mem_cpu_peaked_delta": 0,
7
+ "init_mem_gpu_alloc_delta": 0,
8
+ "init_mem_gpu_peaked_delta": 0,
9
+ "train_loss": 2.6133464177449546,
10
+ "train_mem_cpu_alloc_delta": 87850864640,
11
+ "train_mem_cpu_peaked_delta": 4164583424,
12
+ "train_mem_gpu_alloc_delta": -456804352,
13
+ "train_mem_gpu_peaked_delta": 13894116352,
14
+ "train_runtime": 736.506,
15
+ "train_samples": 165,
16
+ "train_samples_per_second": 1.12,
17
+ "train_steps_per_second": 0.02
18
+ }
trainer_state.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 15,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.333333333333334e-06,
13
+ "loss": 3.6363,
14
+ "step": 3
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.40818656229615136,
19
+ "eval_loss": 3.208984375,
20
+ "eval_runtime": 9.2632,
21
+ "eval_samples_per_second": 2.591,
22
+ "eval_steps_per_second": 0.108,
23
+ "step": 3
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 7.333333333333333e-06,
28
+ "loss": 2.8168,
29
+ "step": 6
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.4873613829093281,
34
+ "eval_loss": 2.48046875,
35
+ "eval_runtime": 7.2903,
36
+ "eval_samples_per_second": 3.292,
37
+ "eval_steps_per_second": 0.137,
38
+ "step": 6
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 5.333333333333334e-06,
43
+ "loss": 2.3529,
44
+ "step": 9
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.49151989562948467,
49
+ "eval_loss": 2.421875,
50
+ "eval_runtime": 7.8557,
51
+ "eval_samples_per_second": 3.055,
52
+ "eval_steps_per_second": 0.127,
53
+ "step": 9
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 3.3333333333333333e-06,
58
+ "loss": 2.1842,
59
+ "step": 12
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.4991030658838878,
64
+ "eval_loss": 2.40234375,
65
+ "eval_runtime": 7.3477,
66
+ "eval_samples_per_second": 3.266,
67
+ "eval_steps_per_second": 0.136,
68
+ "step": 12
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 1.3333333333333334e-06,
73
+ "loss": 2.0765,
74
+ "step": 15
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.5020384866275277,
79
+ "eval_loss": 2.396484375,
80
+ "eval_runtime": 7.3064,
81
+ "eval_samples_per_second": 3.285,
82
+ "eval_steps_per_second": 0.137,
83
+ "step": 15
84
+ },
85
+ {
86
+ "before_init_mem_cpu": 19409301504,
87
+ "before_init_mem_gpu": 514785792,
88
+ "epoch": 5.0,
89
+ "init_mem_cpu_alloc_delta": 20480,
90
+ "init_mem_cpu_peaked_delta": 0,
91
+ "init_mem_gpu_alloc_delta": 0,
92
+ "init_mem_gpu_peaked_delta": 0,
93
+ "step": 15,
94
+ "total_flos": 6182393610240.0,
95
+ "train_loss": 2.6133464177449546,
96
+ "train_mem_cpu_alloc_delta": 87850864640,
97
+ "train_mem_cpu_peaked_delta": 4164583424,
98
+ "train_mem_gpu_alloc_delta": -456804352,
99
+ "train_mem_gpu_peaked_delta": 13894116352,
100
+ "train_runtime": 736.506,
101
+ "train_samples_per_second": 1.12,
102
+ "train_steps_per_second": 0.02
103
+ }
104
+ ],
105
+ "max_steps": 15,
106
+ "num_train_epochs": 5,
107
+ "total_flos": 6182393610240.0,
108
+ "trial_name": null,
109
+ "trial_params": null
110
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76845b569c4dd3b62f36428c000bf88337c8a3be024d00a72965e01f994db56d
3
+ size 4335
vocab.json ADDED
The diff for this file is too large to render. See raw diff