lenglaender commited on
Commit
27bca22
1 Parent(s): ea77925

Upload model

Browse files
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - t5
4
+ - adapter-transformers
5
+ datasets:
6
+ - amazon_polarity
7
+ ---
8
+
9
+ # Adapter `lenglaender/xlm-roberta-base-lora-lm-amazon-polarity` for google-t5/t5-base
10
+
11
+ An [adapter](https://adapterhub.ml) for the `google-t5/t5-base` model that was trained on the [amazon_polarity](https://huggingface.co/datasets/amazon_polarity/) dataset and includes a prediction head for seq2seq lm.
12
+
13
+ This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library.
14
+
15
+ ## Usage
16
+
17
+ First, install `adapters`:
18
+
19
+ ```
20
+ pip install -U adapters
21
+ ```
22
+
23
+ Now, the adapter can be loaded and activated like this:
24
+
25
+ ```python
26
+ from adapters import AutoAdapterModel
27
+
28
+ model = AutoAdapterModel.from_pretrained("google-t5/t5-base")
29
+ adapter_name = model.load_adapter("lenglaender/xlm-roberta-base-lora-lm-amazon-polarity", source="hf", set_active=True)
30
+ ```
31
+
32
+ ## Architecture & Training
33
+
34
+ LoRA has r=8 and alpha=8
35
+
36
+ ## Evaluation results
37
+
38
+
39
+
40
+ ## Citation
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 8,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.0,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 8,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "hidden_size": 768,
20
+ "model_class": "T5AdapterModel",
21
+ "model_name": "google-t5/t5-base",
22
+ "model_type": "t5",
23
+ "name": "amazon_lm",
24
+ "version": "0.2.0"
25
+ }
head_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": null,
4
+ "bias": false,
5
+ "head_type": "seq2seq_lm",
6
+ "label2id": null,
7
+ "layer_norm": false,
8
+ "layers": 1,
9
+ "shift_labels": false,
10
+ "vocab_size": 32128
11
+ },
12
+ "hidden_size": 768,
13
+ "model_class": "T5AdapterModel",
14
+ "model_name": "google-t5/t5-base",
15
+ "model_type": "t5",
16
+ "name": "amazon_lm",
17
+ "version": "0.2.0"
18
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f017e242713e83ed3e70d2256103e1103b565ca91807ffb7f202bccec7d79f
3
+ size 3590962
pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1512ab439ca45de32ca6b614812a8085145b7cb2d03eac7fcfcd2248d8ef5ad
3
+ size 98698515