calpt commited on
Commit
cd84d15
1 Parent(s): 673acde

Initial version.

Browse files
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - bert
4
+ - adapterhub:deprel/ud_ewt
5
+ - adapter-transformers
6
+ datasets:
7
+ - universal_dependencies
8
+ language:
9
+ - en
10
+ ---
11
+
12
+ # Adapter `AdapterHub/bert-base-uncased-pf-ud_deprel` for bert-base-uncased
13
+
14
+ An [adapter](https://adapterhub.ml) for the `bert-base-uncased` model that was trained on the [deprel/ud_ewt](https://adapterhub.ml/explore/deprel/ud_ewt/) dataset and includes a prediction head for tagging.
15
+
16
+ This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library.
17
+
18
+ ## Usage
19
+
20
+ First, install `adapter-transformers`:
21
+
22
+ ```
23
+ pip install -U adapter-transformers
24
+ ```
25
+ _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_
26
+
27
+ Now, the adapter can be loaded and activated like this:
28
+
29
+ ```python
30
+ from transformers import AutoModelWithHeads
31
+
32
+ model = AutoModelWithHeads.from_pretrained("bert-base-uncased")
33
+ adapter_name = model.load_adapter("AdapterHub/bert-base-uncased-pf-ud_deprel", source="hf")
34
+ model.active_adapters = adapter_name
35
+ ```
36
+
37
+ ## Architecture & Training
38
+
39
+ The training code for this adapter is available at https://github.com/adapter-hub/efficient-task-transfer.
40
+ In particular, training configurations for all tasks can be found [here](https://github.com/adapter-hub/efficient-task-transfer/tree/master/run_configs).
41
+
42
+
43
+ ## Evaluation results
44
+
45
+ Refer to [the paper](https://arxiv.org/pdf/2104.08247) for more information on results.
46
+
47
+ ## Citation
48
+
49
+ If you use this adapter, please cite our paper ["What to Pre-Train on? Efficient Intermediate Task Selection"](https://arxiv.org/pdf/2104.08247):
50
+
51
+ ```bibtex
52
+ @inproceedings{poth-etal-2021-what-to-pre-train-on,
53
+ title={What to Pre-Train on? Efficient Intermediate Task Selection},
54
+ author={Clifton Poth and Jonas Pfeiffer and Andreas Rücklé and Iryna Gurevych},
55
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
56
+ month = nov,
57
+ year = "2021",
58
+ address = "Online",
59
+ publisher = "Association for Computational Linguistics",
60
+ url = "https://arxiv.org/abs/2104.08247",
61
+ pages = "to appear",
62
+ }
63
+ ```
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "inv_adapter": null,
6
+ "inv_adapter_reduction_factor": null,
7
+ "leave_out": [],
8
+ "ln_after": false,
9
+ "ln_before": false,
10
+ "mh_adapter": false,
11
+ "non_linearity": "relu",
12
+ "original_ln_after": true,
13
+ "original_ln_before": true,
14
+ "output_adapter": true,
15
+ "reduction_factor": 16,
16
+ "residual_before_ln": true
17
+ },
18
+ "hidden_size": 768,
19
+ "model_class": "BertModelWithHeads",
20
+ "model_name": "bert-base-uncased",
21
+ "model_type": "bert",
22
+ "name": "ud_deprel_en_ewt"
23
+ }
head_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "head_type": "tagging",
5
+ "label2id": {
6
+ "_": 0,
7
+ "acl": 1,
8
+ "acl:relcl": 2,
9
+ "advcl": 3,
10
+ "advmod": 4,
11
+ "amod": 5,
12
+ "appos": 6,
13
+ "aux": 7,
14
+ "aux:pass": 8,
15
+ "case": 9,
16
+ "cc": 10,
17
+ "cc:preconj": 11,
18
+ "ccomp": 12,
19
+ "compound": 13,
20
+ "compound:prt": 14,
21
+ "conj": 15,
22
+ "cop": 16,
23
+ "csubj": 17,
24
+ "csubj:pass": 18,
25
+ "dep": 19,
26
+ "det": 20,
27
+ "det:predet": 21,
28
+ "discourse": 22,
29
+ "dislocated": 23,
30
+ "expl": 24,
31
+ "fixed": 25,
32
+ "flat": 26,
33
+ "flat:foreign": 27,
34
+ "goeswith": 28,
35
+ "iobj": 29,
36
+ "list": 30,
37
+ "mark": 31,
38
+ "nmod": 32,
39
+ "nmod:npmod": 33,
40
+ "nmod:poss": 34,
41
+ "nmod:tmod": 35,
42
+ "nsubj": 36,
43
+ "nsubj:pass": 37,
44
+ "nummod": 38,
45
+ "obj": 39,
46
+ "obl": 40,
47
+ "obl:npmod": 41,
48
+ "obl:tmod": 42,
49
+ "orphan": 43,
50
+ "parataxis": 44,
51
+ "punct": 45,
52
+ "reparandum": 46,
53
+ "root": 47,
54
+ "vocative": 48,
55
+ "xcomp": 49
56
+ },
57
+ "layers": 1,
58
+ "num_labels": 50
59
+ },
60
+ "hidden_size": 768,
61
+ "model_class": "BertModelWithHeads",
62
+ "model_name": "bert-base-uncased",
63
+ "model_type": "bert",
64
+ "name": "ud_deprel_en_ewt"
65
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8fa22c2ee5a8328c82624abe53b02dd74fb4bbd40624383cb60514a78402dc
3
+ size 3595119
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c135b16b9a21f0972641370fc541236bdac8651279fd6c126c963d7f2e6a4e6f
3
+ size 154871