asahi417 commited on
Commit
b56eea3
1 Parent(s): c38592e

model update

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ widget:
4
+ - text: "mammal is to whale what"
5
+ example_title: "Analogy Example 1 (semantic relation)"
6
+ - text: "wedding is to marriage what "
7
+ example_title: "Analogy Example 2 (semantic relation, metaphor)"
8
+ - text: "London is to U.K. what"
9
+ example_title: "Analogy Example 3 (entity)"
10
+ - text: "actual is to actually what"
11
+ example_title: "Analogy Example 4 (morphological)"
12
+ ---
13
+ # relbert/opt-350m-analogy
14
+
15
+ This is [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) fine-tuned on [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity)
16
+ for analogy generation, which is to generate a word pair (eg. `bird is to crow`) given a query (eg. `mammal is to whale`)
17
+ so that the query and the generated word pair form an analogy statement.
18
+
19
+ ### Usage
20
+
21
+ ```python
22
+ from transformers import pipeline
23
+
24
+ pipe = pipeline('text2text-generation', model="relbert/opt-350m-analogy")
25
+ output = pipe("mammal is to whale what")
26
+ print(output)
27
+ >>> [{'generated_text': 'bird is to crow'}]
28
+ ```
config.json CHANGED
@@ -1,23 +1,17 @@
1
  {
2
- "_name_or_path": "analogy_models/opt-350m-analogy-epoch15/model",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
6
  "architectures": [
7
- "T5ForConditionalGeneration"
8
  ],
9
  "attention_dropout": 0.0,
10
  "bos_token_id": 2,
11
- "d_ff": 2048,
12
- "d_kv": 64,
13
- "d_model": 1024,
14
- "dense_act_fn": "relu",
15
  "do_layer_norm_before": false,
16
  "dropout": 0.1,
17
- "dropout_rate": 0.1,
18
  "enable_bias": true,
19
  "eos_token_id": 2,
20
- "feed_forward_proj": "relu",
21
  "ffn_dim": 4096,
22
  "finetuing_config": {
23
  "batch_size": 32,
@@ -29,22 +23,16 @@
29
  "random_seed": 42,
30
  "template": "<subj-a> is to <obj-a> what <subj-b> is to <obj-b>"
31
  },
 
32
  "init_std": 0.02,
33
- "initializer_factor": 1.0,
34
- "is_encoder_decoder": true,
35
- "is_gated_act": false,
36
  "layer_norm_elementwise_affine": true,
37
- "layer_norm_epsilon": 1e-06,
38
  "layerdrop": 0.0,
39
  "max_position_embeddings": 2048,
40
- "model_type": "t5",
41
- "num_decoder_layers": 6,
42
- "num_heads": 16,
43
- "num_layers": 24,
44
  "pad_token_id": 1,
45
  "prefix": "</s>",
46
- "relative_attention_max_distance": 128,
47
- "relative_attention_num_buckets": 32,
48
  "torch_dtype": "float32",
49
  "transformers_version": "4.26.1",
50
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "facebook/opt-350m",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
6
  "architectures": [
7
+ "OPTForCausalLM"
8
  ],
9
  "attention_dropout": 0.0,
10
  "bos_token_id": 2,
 
 
 
 
11
  "do_layer_norm_before": false,
12
  "dropout": 0.1,
 
13
  "enable_bias": true,
14
  "eos_token_id": 2,
 
15
  "ffn_dim": 4096,
16
  "finetuing_config": {
17
  "batch_size": 32,
 
23
  "random_seed": 42,
24
  "template": "<subj-a> is to <obj-a> what <subj-b> is to <obj-b>"
25
  },
26
+ "hidden_size": 1024,
27
  "init_std": 0.02,
 
 
 
28
  "layer_norm_elementwise_affine": true,
 
29
  "layerdrop": 0.0,
30
  "max_position_embeddings": 2048,
31
+ "model_type": "opt",
32
+ "num_attention_heads": 16,
33
+ "num_hidden_layers": 24,
 
34
  "pad_token_id": 1,
35
  "prefix": "</s>",
 
 
36
  "torch_dtype": "float32",
37
  "transformers_version": "4.26.1",
38
  "use_cache": true,
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a623d8eb8a964ad39669782686f2a0d3cdbd6a9319e06c5a06d1beb0fb0436fa
3
- size 1313584089
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a2f6771652f725f309aa30ad99b55f5e7bd3a35eebffd09f41ef23ffef3894
3
+ size 1324919965
tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 1000000000000000019884624838656,
22
- "name_or_path": "analogy_models/opt-350m-analogy-epoch15/model",
23
  "pad_token": {
24
  "__type": "AddedToken",
25
  "content": "<pad>",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 1000000000000000019884624838656,
22
+ "name_or_path": "facebook/opt-350m",
23
  "pad_token": {
24
  "__type": "AddedToken",
25
  "content": "<pad>",
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52903e47e30d3d7c38842084e5dd4b0760f0cc6a91e3ccaecb28ac2139dd6fa2
3
+ size 3515
validation_accuracy.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy": 0.9873417721518988, "dataset": "relbert/semeval2012_relational_similarity", "split": "validation"}
vocab.json CHANGED
The diff for this file is too large to render. See raw diff