Geigle commited on
Commit
10e7192
1 Parent(s): 76df31f
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - adapter-transformers
4
+ - xlm-roberta
5
+ - adapterhub:quality_estimation/wmt21
6
+ ---
7
+
8
+ # Adapter `Gregor/xlm-roberta-large-wmt21-qe` for xlm-roberta-large
9
+
10
+ An [adapter](https://adapterhub.ml) for the xlm-roberta-large model that was trained on the [quality_estimation/wmt21](https://adapterhub.ml/explore/quality_estimation/wmt21/) dataset and includes a prediction head for classification.
11
+
12
+ This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library.
13
+
14
+ ## Usage
15
+
16
+ First, install `adapter-transformers`:
17
+
18
+ ```
19
+ pip install -U adapter-transformers
20
+ ```
21
+ _Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_
22
+
23
+ Now, the adapter can be loaded and activated like this:
24
+
25
+ ```python
26
+ from transformers import AutoModelWithHeads
27
+
28
+ model = AutoModelWithHeads.from_pretrained("xlm-roberta-large")
29
+ adapter_name = model.load_adapter("Gregor/xlm-roberta-large-wmt21-qe")
30
+ model.active_adapters = adapter_name
31
+ ```
32
+
33
+ ## Architecture & Training
34
+
35
+ <!-- Add some description here -->
36
+
37
+ ## Evaluation results
38
+
39
+ <!-- Add some description here -->
40
+
41
+ ## Citation
42
+
43
+ <!-- Add some description here -->
adapter_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "inv_adapter": null,
6
+ "inv_adapter_reduction_factor": null,
7
+ "leave_out": [],
8
+ "ln_after": false,
9
+ "ln_before": false,
10
+ "mh_adapter": false,
11
+ "non_linearity": "gelu",
12
+ "original_ln_after": true,
13
+ "original_ln_before": true,
14
+ "output_adapter": true,
15
+ "reduction_factor": 8,
16
+ "residual_before_ln": true
17
+ },
18
+ "hidden_size": 1024,
19
+ "model_class": "XLMRobertaModelWithHeads",
20
+ "model_name": "xlm-roberta-large",
21
+ "model_type": "xlm-roberta",
22
+ "name": "qe_wmt21"
23
+ }
head_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "bias": true,
5
+ "head_type": "classification",
6
+ "label2id": {
7
+ "LABEL_0": 0
8
+ },
9
+ "layers": 2,
10
+ "num_labels": 1,
11
+ "use_pooler": false
12
+ },
13
+ "hidden_size": 1024,
14
+ "model_class": "XLMRobertaModelWithHeads",
15
+ "model_name": "xlm-roberta-large",
16
+ "model_type": "xlm-roberta",
17
+ "name": "qe_wmt21"
18
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073a3cda07a4fe8ae0b8e38d2a118a92e7f9b63ece0c0b48c87d1e66c2b36a37
3
+ size 25311151
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a63c5bbd7154b1b5dfbeacffdf7679303bf6c91ef9278ce95f55dea1ff63097
3
+ size 4204175