Ar4l commited on
Commit
83b0efe
1 Parent(s): ebc2644

Upload folder using huggingface_hub

Browse files
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6153846383094788,
4
+ "eval_f1": 0.47368421052631576,
5
+ "eval_loss": 0.6848027110099792,
6
+ "eval_mcc": 0.17256712249065836,
7
+ "eval_runtime": 0.1138,
8
+ "eval_samples": 52,
9
+ "eval_samples_per_second": 456.859,
10
+ "eval_steps_per_second": 61.5,
11
+ "total_flos": 163423735633920.0,
12
+ "train_loss": 0.7042514474051339,
13
+ "train_runtime": 33.2199,
14
+ "train_samples": 554,
15
+ "train_samples_per_second": 333.536,
16
+ "train_steps_per_second": 42.143
17
+ }
checkpoint-140/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/ubuntu/utah/babylm-24/data/training/models/10M_babylm_ascii/SPM-Unigram_6144/DebertaV2-Base-10M_babylm-A",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 1536,
12
+ "label2id": {
13
+ "0": 0,
14
+ "1": 1
15
+ },
16
+ "layer_norm_eps": 1e-07,
17
+ "max_position_embeddings": 512,
18
+ "max_relative_positions": -1,
19
+ "model_type": "deberta-v2",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_layers": 8,
22
+ "pad_token_id": 3,
23
+ "pooler_dropout": 0,
24
+ "pooler_hidden_act": "gelu",
25
+ "pooler_hidden_size": 768,
26
+ "pos_att_type": null,
27
+ "position_biased_input": true,
28
+ "relative_attention": false,
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.44.2",
31
+ "type_vocab_size": 0,
32
+ "vocab_size": 6144
33
+ }
checkpoint-140/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4be62a77ab9dedd098777ad54755f3a8d9eb3e26d6b7474ff9b99866cc9718c
3
+ size 174103504
checkpoint-140/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:807a6867e65c07bd7f0b0feed36bff29ed7574dd35edcb7ae3696b5e9f02f01f
3
+ size 348288250
checkpoint-140/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef6a994db5bb1ef20bc55033427d0b425bf9998852b81744fdc8bb4262e3d6d
3
+ size 14244
checkpoint-140/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50792edf44512d0536e32cea049bc3f5b58cbcd3e88ef4b503aa86670047127c
3
+ size 1064
checkpoint-140/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-140/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-140/tokenizer_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[PAD]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "[PAR]",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "[TAB]",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "clean_up_tokenization_spaces": false,
61
+ "cls_token": "[CLS]",
62
+ "mask_token": "[MASK]",
63
+ "model_max_length": 1000000000000000019884624838656,
64
+ "pad_token": "[PAD]",
65
+ "sep_token": "[SEP]",
66
+ "tokenizer_class": "PreTrainedTokenizerFast"
67
+ }
checkpoint-140/trainer_state.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.47368421052631576,
3
+ "best_model_checkpoint": "/home/ubuntu/utah/babylm-24/src/evaluation/results/finetune/DebertaV2-Base-10M_babylm-A/wsc/checkpoint-140",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 140,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.6153846383094788,
14
+ "eval_f1": 0.0,
15
+ "eval_loss": 0.6650720834732056,
16
+ "eval_mcc": 0.0,
17
+ "eval_runtime": 0.1205,
18
+ "eval_samples_per_second": 431.51,
19
+ "eval_steps_per_second": 58.088,
20
+ "step": 70
21
+ },
22
+ {
23
+ "epoch": 2.0,
24
+ "eval_accuracy": 0.6153846383094788,
25
+ "eval_f1": 0.47368421052631576,
26
+ "eval_loss": 0.6848027110099792,
27
+ "eval_mcc": 0.17256712249065836,
28
+ "eval_runtime": 0.096,
29
+ "eval_samples_per_second": 541.411,
30
+ "eval_steps_per_second": 72.882,
31
+ "step": 140
32
+ }
33
+ ],
34
+ "logging_steps": 500,
35
+ "max_steps": 1400,
36
+ "num_input_tokens_seen": 0,
37
+ "num_train_epochs": 20,
38
+ "save_steps": 500,
39
+ "stateful_callbacks": {
40
+ "EarlyStoppingCallback": {
41
+ "args": {
42
+ "early_stopping_patience": 3,
43
+ "early_stopping_threshold": 0.001
44
+ },
45
+ "attributes": {
46
+ "early_stopping_patience_counter": 0
47
+ }
48
+ },
49
+ "TrainerControl": {
50
+ "args": {
51
+ "should_epoch_stop": false,
52
+ "should_evaluate": false,
53
+ "should_log": false,
54
+ "should_save": true,
55
+ "should_training_stop": false
56
+ },
57
+ "attributes": {}
58
+ }
59
+ },
60
+ "total_flos": 65369494253568.0,
61
+ "train_batch_size": 8,
62
+ "trial_name": null,
63
+ "trial_params": null
64
+ }
checkpoint-140/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0fbc45d3adb0d70b538cfee67e5365bd5c29a4be56d5a6c0bc6f25d0772611
3
+ size 5368
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/ubuntu/utah/babylm-24/data/training/models/10M_babylm_ascii/SPM-Unigram_6144/DebertaV2-Base-10M_babylm-A",
3
+ "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 1536,
12
+ "label2id": {
13
+ "0": 0,
14
+ "1": 1
15
+ },
16
+ "layer_norm_eps": 1e-07,
17
+ "max_position_embeddings": 512,
18
+ "max_relative_positions": -1,
19
+ "model_type": "deberta-v2",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_layers": 8,
22
+ "pad_token_id": 3,
23
+ "pooler_dropout": 0,
24
+ "pooler_hidden_act": "gelu",
25
+ "pooler_hidden_size": 768,
26
+ "pos_att_type": null,
27
+ "position_biased_input": true,
28
+ "relative_attention": false,
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.44.2",
31
+ "type_vocab_size": 0,
32
+ "vocab_size": 6144
33
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.6153846383094788,
4
+ "eval_f1": 0.47368421052631576,
5
+ "eval_loss": 0.6848027110099792,
6
+ "eval_mcc": 0.17256712249065836,
7
+ "eval_runtime": 0.1138,
8
+ "eval_samples": 52,
9
+ "eval_samples_per_second": 456.859,
10
+ "eval_steps_per_second": 61.5
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4be62a77ab9dedd098777ad54755f3a8d9eb3e26d6b7474ff9b99866cc9718c
3
+ size 174103504
predictions.txt ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ index prediction
2
+ 0 1
3
+ 1 0
4
+ 2 0
5
+ 3 0
6
+ 4 1
7
+ 5 0
8
+ 6 0
9
+ 7 1
10
+ 8 0
11
+ 9 0
12
+ 10 1
13
+ 11 1
14
+ 12 0
15
+ 13 1
16
+ 14 0
17
+ 15 1
18
+ 16 1
19
+ 17 0
20
+ 18 0
21
+ 19 0
22
+ 20 0
23
+ 21 0
24
+ 22 1
25
+ 23 0
26
+ 24 1
27
+ 25 0
28
+ 26 1
29
+ 27 0
30
+ 28 0
31
+ 29 0
32
+ 30 1
33
+ 31 1
34
+ 32 0
35
+ 33 0
36
+ 34 1
37
+ 35 1
38
+ 36 0
39
+ 37 0
40
+ 38 1
41
+ 39 1
42
+ 40 0
43
+ 41 0
44
+ 42 0
45
+ 43 0
46
+ 44 0
47
+ 45 0
48
+ 46 0
49
+ 47 1
50
+ 48 0
51
+ 49 0
52
+ 50 0
53
+ 51 0
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[PAD]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "[PAR]",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "[TAB]",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "clean_up_tokenization_spaces": false,
61
+ "cls_token": "[CLS]",
62
+ "mask_token": "[MASK]",
63
+ "model_max_length": 1000000000000000019884624838656,
64
+ "pad_token": "[PAD]",
65
+ "sep_token": "[SEP]",
66
+ "tokenizer_class": "PreTrainedTokenizerFast"
67
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 163423735633920.0,
4
+ "train_loss": 0.7042514474051339,
5
+ "train_runtime": 33.2199,
6
+ "train_samples": 554,
7
+ "train_samples_per_second": 333.536,
8
+ "train_steps_per_second": 42.143
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.47368421052631576,
3
+ "best_model_checkpoint": "/home/ubuntu/utah/babylm-24/src/evaluation/results/finetune/DebertaV2-Base-10M_babylm-A/wsc/checkpoint-140",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 350,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.6153846383094788,
14
+ "eval_f1": 0.0,
15
+ "eval_loss": 0.6650720834732056,
16
+ "eval_mcc": 0.0,
17
+ "eval_runtime": 0.1205,
18
+ "eval_samples_per_second": 431.51,
19
+ "eval_steps_per_second": 58.088,
20
+ "step": 70
21
+ },
22
+ {
23
+ "epoch": 2.0,
24
+ "eval_accuracy": 0.6153846383094788,
25
+ "eval_f1": 0.47368421052631576,
26
+ "eval_loss": 0.6848027110099792,
27
+ "eval_mcc": 0.17256712249065836,
28
+ "eval_runtime": 0.096,
29
+ "eval_samples_per_second": 541.411,
30
+ "eval_steps_per_second": 72.882,
31
+ "step": 140
32
+ },
33
+ {
34
+ "epoch": 3.0,
35
+ "eval_accuracy": 0.5961538553237915,
36
+ "eval_f1": 0.16,
37
+ "eval_loss": 0.6839962005615234,
38
+ "eval_mcc": 0.010314212462587935,
39
+ "eval_runtime": 0.0965,
40
+ "eval_samples_per_second": 539.131,
41
+ "eval_steps_per_second": 72.575,
42
+ "step": 210
43
+ },
44
+ {
45
+ "epoch": 4.0,
46
+ "eval_accuracy": 0.6153846383094788,
47
+ "eval_f1": 0.0,
48
+ "eval_loss": 0.6632354259490967,
49
+ "eval_mcc": 0.0,
50
+ "eval_runtime": 0.0974,
51
+ "eval_samples_per_second": 533.999,
52
+ "eval_steps_per_second": 71.884,
53
+ "step": 280
54
+ },
55
+ {
56
+ "epoch": 5.0,
57
+ "eval_accuracy": 0.6153846383094788,
58
+ "eval_f1": 0.0,
59
+ "eval_loss": 0.6646058559417725,
60
+ "eval_mcc": 0.0,
61
+ "eval_runtime": 0.0979,
62
+ "eval_samples_per_second": 530.989,
63
+ "eval_steps_per_second": 71.479,
64
+ "step": 350
65
+ },
66
+ {
67
+ "epoch": 5.0,
68
+ "step": 350,
69
+ "total_flos": 163423735633920.0,
70
+ "train_loss": 0.7042514474051339,
71
+ "train_runtime": 33.2199,
72
+ "train_samples_per_second": 333.536,
73
+ "train_steps_per_second": 42.143
74
+ }
75
+ ],
76
+ "logging_steps": 500,
77
+ "max_steps": 1400,
78
+ "num_input_tokens_seen": 0,
79
+ "num_train_epochs": 20,
80
+ "save_steps": 500,
81
+ "stateful_callbacks": {
82
+ "EarlyStoppingCallback": {
83
+ "args": {
84
+ "early_stopping_patience": 3,
85
+ "early_stopping_threshold": 0.001
86
+ },
87
+ "attributes": {
88
+ "early_stopping_patience_counter": 0
89
+ }
90
+ },
91
+ "TrainerControl": {
92
+ "args": {
93
+ "should_epoch_stop": false,
94
+ "should_evaluate": false,
95
+ "should_log": false,
96
+ "should_save": true,
97
+ "should_training_stop": true
98
+ },
99
+ "attributes": {}
100
+ }
101
+ },
102
+ "total_flos": 163423735633920.0,
103
+ "train_batch_size": 8,
104
+ "trial_name": null,
105
+ "trial_params": null
106
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0fbc45d3adb0d70b538cfee67e5365bd5c29a4be56d5a6c0bc6f25d0772611
3
+ size 5368