Chua, Vui Seng commited on
Commit
7e13174
1 Parent(s): e714bb8

Initial model commit

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *trainer_state.json filter=lfs diff=lfs merge=lfs -text
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_exact_match": 80.51087984862819,
4
+ "eval_f1": 87.99746814985123,
5
+ "eval_samples": 10784,
6
+ "train_loss": 15.741395791878693,
7
+ "train_runtime": 55394.6042,
8
+ "train_samples": 88524,
9
+ "train_samples_per_second": 15.981,
10
+ "train_steps_per_second": 0.999
11
+ }
compressed_graph.dot ADDED
The diff for this file is too large to render. See raw diff
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "NNCFNetwork"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.9.1",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 30522
25
+ }
eval_results.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_exact_match": 80.51087984862819,
4
+ "eval_f1": 87.99746814985123,
5
+ "eval_samples": 10784
6
+ }
linear_layer_sparse_stats_total_30M_65.1_relative_sparsity.csv ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,linear_id,shape,param_count,nnz_count
2
+ 0,bert.encoder.layer.0.attention.self.query,"[768, 768]",589824,36266
3
+ 1,bert.encoder.layer.0.attention.self.key,"[768, 768]",589824,28736
4
+ 2,bert.encoder.layer.0.attention.self.value,"[768, 768]",589824,62217
5
+ 3,bert.encoder.layer.0.attention.output.dense,"[768, 768]",589824,80877
6
+ 4,bert.encoder.layer.0.intermediate.dense,"[3072, 768]",2359296,1391950
7
+ 5,bert.encoder.layer.0.output.dense,"[768, 3072]",2359296,1304418
8
+ 6,bert.encoder.layer.1.attention.self.query,"[768, 768]",589824,88856
9
+ 7,bert.encoder.layer.1.attention.self.key,"[768, 768]",589824,80852
10
+ 8,bert.encoder.layer.1.attention.self.value,"[768, 768]",589824,89520
11
+ 9,bert.encoder.layer.1.attention.output.dense,"[768, 768]",589824,99229
12
+ 10,bert.encoder.layer.1.intermediate.dense,"[3072, 768]",2359296,1448241
13
+ 11,bert.encoder.layer.1.output.dense,"[768, 3072]",2359296,1298451
14
+ 12,bert.encoder.layer.2.attention.self.query,"[768, 768]",589824,111820
15
+ 13,bert.encoder.layer.2.attention.self.key,"[768, 768]",589824,105781
16
+ 14,bert.encoder.layer.2.attention.self.value,"[768, 768]",589824,107982
17
+ 15,bert.encoder.layer.2.attention.output.dense,"[768, 768]",589824,107182
18
+ 16,bert.encoder.layer.2.intermediate.dense,"[3072, 768]",2359296,1478488
19
+ 17,bert.encoder.layer.2.output.dense,"[768, 3072]",2359296,1324632
20
+ 18,bert.encoder.layer.3.attention.self.query,"[768, 768]",589824,119244
21
+ 19,bert.encoder.layer.3.attention.self.key,"[768, 768]",589824,121415
22
+ 20,bert.encoder.layer.3.attention.self.value,"[768, 768]",589824,144196
23
+ 21,bert.encoder.layer.3.attention.output.dense,"[768, 768]",589824,141057
24
+ 22,bert.encoder.layer.3.intermediate.dense,"[3072, 768]",2359296,1477159
25
+ 23,bert.encoder.layer.3.output.dense,"[768, 3072]",2359296,1308149
26
+ 24,bert.encoder.layer.4.attention.self.query,"[768, 768]",589824,116629
27
+ 25,bert.encoder.layer.4.attention.self.key,"[768, 768]",589824,115443
28
+ 26,bert.encoder.layer.4.attention.self.value,"[768, 768]",589824,153857
29
+ 27,bert.encoder.layer.4.attention.output.dense,"[768, 768]",589824,153726
30
+ 28,bert.encoder.layer.4.intermediate.dense,"[3072, 768]",2359296,1467353
31
+ 29,bert.encoder.layer.4.output.dense,"[768, 3072]",2359296,1275091
32
+ 30,bert.encoder.layer.5.attention.self.query,"[768, 768]",589824,99164
33
+ 31,bert.encoder.layer.5.attention.self.key,"[768, 768]",589824,106720
34
+ 32,bert.encoder.layer.5.attention.self.value,"[768, 768]",589824,151873
35
+ 33,bert.encoder.layer.5.attention.output.dense,"[768, 768]",589824,146952
36
+ 34,bert.encoder.layer.5.intermediate.dense,"[3072, 768]",2359296,1483651
37
+ 35,bert.encoder.layer.5.output.dense,"[768, 3072]",2359296,1268211
38
+ 36,bert.encoder.layer.6.attention.self.query,"[768, 768]",589824,99027
39
+ 37,bert.encoder.layer.6.attention.self.key,"[768, 768]",589824,108498
40
+ 38,bert.encoder.layer.6.attention.self.value,"[768, 768]",589824,151369
41
+ 39,bert.encoder.layer.6.attention.output.dense,"[768, 768]",589824,138662
42
+ 40,bert.encoder.layer.6.intermediate.dense,"[3072, 768]",2359296,1413626
43
+ 41,bert.encoder.layer.6.output.dense,"[768, 3072]",2359296,1178877
44
+ 42,bert.encoder.layer.7.attention.self.query,"[768, 768]",589824,72908
45
+ 43,bert.encoder.layer.7.attention.self.key,"[768, 768]",589824,87122
46
+ 44,bert.encoder.layer.7.attention.self.value,"[768, 768]",589824,134079
47
+ 45,bert.encoder.layer.7.attention.output.dense,"[768, 768]",589824,117207
48
+ 46,bert.encoder.layer.7.intermediate.dense,"[3072, 768]",2359296,1240812
49
+ 47,bert.encoder.layer.7.output.dense,"[768, 3072]",2359296,1039716
50
+ 48,bert.encoder.layer.8.attention.self.query,"[768, 768]",589824,87546
51
+ 49,bert.encoder.layer.8.attention.self.key,"[768, 768]",589824,95542
52
+ 50,bert.encoder.layer.8.attention.self.value,"[768, 768]",589824,147149
53
+ 51,bert.encoder.layer.8.attention.output.dense,"[768, 768]",589824,121221
54
+ 52,bert.encoder.layer.8.intermediate.dense,"[3072, 768]",2359296,991806
55
+ 53,bert.encoder.layer.8.output.dense,"[768, 3072]",2359296,805916
56
+ 54,bert.encoder.layer.9.attention.self.query,"[768, 768]",589824,94420
57
+ 55,bert.encoder.layer.9.attention.self.key,"[768, 768]",589824,95694
58
+ 56,bert.encoder.layer.9.attention.self.value,"[768, 768]",589824,58481
59
+ 57,bert.encoder.layer.9.attention.output.dense,"[768, 768]",589824,46748
60
+ 58,bert.encoder.layer.9.intermediate.dense,"[3072, 768]",2359296,550773
61
+ 59,bert.encoder.layer.9.output.dense,"[768, 3072]",2359296,425540
62
+ 60,bert.encoder.layer.10.attention.self.query,"[768, 768]",589824,62325
63
+ 61,bert.encoder.layer.10.attention.self.key,"[768, 768]",589824,64069
64
+ 62,bert.encoder.layer.10.attention.self.value,"[768, 768]",589824,31075
65
+ 63,bert.encoder.layer.10.attention.output.dense,"[768, 768]",589824,22579
66
+ 64,bert.encoder.layer.10.intermediate.dense,"[3072, 768]",2359296,361008
67
+ 65,bert.encoder.layer.10.output.dense,"[768, 3072]",2359296,240192
68
+ 66,bert.encoder.layer.11.attention.self.query,"[768, 768]",589824,20394
69
+ 67,bert.encoder.layer.11.attention.self.key,"[768, 768]",589824,25082
70
+ 68,bert.encoder.layer.11.attention.self.value,"[768, 768]",589824,14505
71
+ 69,bert.encoder.layer.11.attention.output.dense,"[768, 768]",589824,6766
72
+ 70,bert.encoder.layer.11.intermediate.dense,"[3072, 768]",2359296,296386
73
+ 71,bert.encoder.layer.11.output.dense,"[768, 3072]",2359296,119879
nncf_bert_config_squad_mvnt_pruning-distill-run9.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "input_info": [
3
+ {
4
+ "sample_size": [1, 384],
5
+ "type": "long"
6
+ },
7
+ {
8
+ "sample_size": [1, 384],
9
+ "type": "long"
10
+ },
11
+ {
12
+ "sample_size": [1, 384],
13
+ "type": "long"
14
+ }
15
+ ],
16
+ "compression": {
17
+ "algorithm": "movement_sparsity",
18
+ "params": {
19
+ "schedule": "poly_threshold",
20
+ "power": 3,
21
+ "init_importance_threshold": 0.0,
22
+ "final_importance_threshold": 0.1,
23
+ "warmup_start_epoch": 1,
24
+ "warmup_end_epoch": 2.0,
25
+ "steps_per_epoch": 5533,
26
+ "regu_final_lambda": 400,
27
+ "update_per_optimizer_step": true,
28
+ },
29
+ "ignored_scopes": ["{re}.*NNCFEmbedding", "{re}.*qa_outputs*"]
30
+ },
31
+ }
original_graph.dot ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:706d7824866e777c580143b51c6010aca9a77f5724dec1f976f15510173db4e8
3
+ size 435643185
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_eval_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ {
2
+ "exact_match": 43.02743614001892,
3
+ "f1": 51.430003076867955
4
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d825cfde516847cfff070965312b341e122f19faf89888533d52ab9dd8a7115
3
+ size 435842064
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-uncased", "tokenizer_class": "BertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 15.741395791878693,
4
+ "train_runtime": 55394.6042,
5
+ "train_samples": 88524,
6
+ "train_samples_per_second": 15.981,
7
+ "train_steps_per_second": 0.999
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:951fc3fabed8ef6b47a2cddd39635ddcf7cf2450527af96980acd6394266fb56
3
+ size 31521946
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bd0b5759a7fbd91522a4658a06a58a8d60d3fdf8ff65b1198b488ac4bf9d161
3
+ size 3055
vocab.txt ADDED
The diff for this file is too large to render. See raw diff