calpt commited on
Commit
3ae1b3b
1 Parent(s): b97eabb

Add adapter gpt2_lm_poem_pfeiffer version 1

Browse files
Files changed (3) hide show
  1. README.md +53 -0
  2. adapter_config.json +41 -0
  3. pytorch_adapter.bin +3 -0
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - adapter-transformers
4
+ - gpt2
5
+ - adapterhub:lm/poem
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Adapter `gpt2_lm_poem_pfeiffer` for gpt2
10
+
11
+ An adapter for the `gpt2` model, trained on the [lm/poem](https://adapterhub.ml/explore/lm/poem/) dataset.
12
+
13
+ **This adapter was created for usage with the [Adapters](https://github.com/Adapter-Hub/adapters) library.**
14
+
15
+ ## Usage
16
+
17
+ First, install `adapters`:
18
+
19
+ ```
20
+ pip install -U adapters
21
+ ```
22
+
23
+ Now, the adapter can be loaded and activated like this:
24
+
25
+ ```python
26
+ from adapters import AutoAdapterModel
27
+
28
+ model = AutoAdapterModel.from_pretrained("gpt2")
29
+ adapter_name = model.load_adapter("AdapterHub/gpt2_lm_poem_pfeiffer")
30
+ model.set_active_adapters(adapter_name)
31
+ ```
32
+
33
+ ## Architecture & Training
34
+
35
+ - Adapter architecture: pfeiffer
36
+ - Prediction head: None
37
+ - Dataset: [lm/poem](https://adapterhub.ml/explore/lm/poem/)
38
+
39
+ ## Author Information
40
+
41
+ - Author name(s): Hannah Sterz
42
+ - Author email: hannah.sterz@stud.tu-darmstadt.de
43
+ - Author links: [Twitter](https://twitter.com/@h_sterz)
44
+
45
+
46
+
47
+ ## Citation
48
+
49
+ ```bibtex
50
+
51
+ ```
52
+
53
+ *This adapter has been auto-imported from https://github.com/Adapter-Hub/Hub/blob/master/adapters/ukp/gpt2_lm_poem_pfeiffer.yaml*.
adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "use_gating": false
34
+ },
35
+ "hidden_size": 768,
36
+ "model_class": "GPT2AdapterModel",
37
+ "model_name": "gpt2",
38
+ "model_type": "gpt2",
39
+ "name": "poem",
40
+ "version": "0.2.0"
41
+ }
pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8dc85f92b287582245165b4a869387849ff11b5d6ca614b0d360395b2e2c645
3
+ size 3594918