ofirzaf commited on
Commit
37cc1a8
1 Parent(s): f669621

Initial commit

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ ---
4
+ # 80% 1x4 Block Sparse BERT-Large (uncased) Fine Tuned on SQuADv1.1
5
+ This model is a result of fine-tuning a Prune OFA 80% 1x4 block sparse pre-trained BERT-Large combined with knowledge distillation.
6
+ This model yields the following results on SQuADv1.1 development set:<br>
7
+ `{"exact_match": 84.673, "f1": 91.174}`
8
+
9
+ For further details see our paper, [Prune Once for All: Sparse Pre-Trained Language Models](https://arxiv.org/abs/2111.05754), and our open source implementation available [here](https://github.com/IntelLabs/Model-Compression-Research-Package/tree/main/research/prune-once-for-all).
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/store/nosnap/results/mlm/wikipedia_bookcorpus/bert-large/block1x4-mag-80/",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "keys_to_ignore_at_inference": [
14
+ "prediction_logits"
15
+ ],
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 0,
22
+ "position_embedding_type": "absolute",
23
+ "transformers_version": "4.6.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bcf983b9c11e7825de2bf6d7d63abf5adfeef476bfb2baf29b57e076262db77
3
+ size 1336543724
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-uncased"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff