asahi417 commited on
Commit
9bab0ff
1 Parent(s): bb9deac

model update

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ widget:
4
+ - text: "mammal is to whale what"
5
+ example_title: "Analogy Example 1 (semantic relation)"
6
+ - text: "wedding is to marriage what "
7
+ example_title: "Analogy Example 2 (semantic relation, metaphor)"
8
+ - text: "London is to U.K. what"
9
+ example_title: "Analogy Example 3 (entity)"
10
+ - text: "actual is to actually what"
11
+ example_title: "Analogy Example 4 (morphological)"
12
+ ---
13
+ # relbert/opt-125m-analogy
14
+
15
+ This is [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) fine-tuned on [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity)
16
+ for analogy generation, which is to generate a word pair (eg. `bird is to crow`) given a query (eg. `mammal is to whale`)
17
+ so that the query and the generated word pair form an analogy statement.
18
+
19
+ ### Usage
20
+
21
+ ```python
22
+ from transformers import pipeline
23
+
24
+ pipe = pipeline('text2text-generation', model="relbert/opt-125m-analogy")
25
+ output = pipe("mammal is to whale what")
26
+ print(output)
27
+ >>> [{'generated_text': 'bird is to crow'}]
28
+ ```
config.json CHANGED
@@ -1,23 +1,17 @@
1
  {
2
- "_name_or_path": "analogy_models/opt-125m-analogy-epoch10/model",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
6
  "architectures": [
7
- "T5ForConditionalGeneration"
8
  ],
9
  "attention_dropout": 0.0,
10
  "bos_token_id": 2,
11
- "d_ff": 2048,
12
- "d_kv": 64,
13
- "d_model": 768,
14
- "dense_act_fn": "relu",
15
  "do_layer_norm_before": true,
16
  "dropout": 0.1,
17
- "dropout_rate": 0.1,
18
  "enable_bias": true,
19
  "eos_token_id": 2,
20
- "feed_forward_proj": "relu",
21
  "ffn_dim": 3072,
22
  "finetuing_config": {
23
  "batch_size": 32,
@@ -29,22 +23,16 @@
29
  "random_seed": 42,
30
  "template": "<subj-a> is to <obj-a> what <subj-b> is to <obj-b>"
31
  },
 
32
  "init_std": 0.02,
33
- "initializer_factor": 1.0,
34
- "is_encoder_decoder": true,
35
- "is_gated_act": false,
36
  "layer_norm_elementwise_affine": true,
37
- "layer_norm_epsilon": 1e-06,
38
  "layerdrop": 0.0,
39
  "max_position_embeddings": 2048,
40
- "model_type": "t5",
41
- "num_decoder_layers": 6,
42
- "num_heads": 12,
43
- "num_layers": 12,
44
  "pad_token_id": 1,
45
  "prefix": "</s>",
46
- "relative_attention_max_distance": 128,
47
- "relative_attention_num_buckets": 32,
48
  "torch_dtype": "float32",
49
  "transformers_version": "4.26.1",
50
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "facebook/opt-125m",
3
  "_remove_final_layer_norm": false,
4
  "activation_dropout": 0.0,
5
  "activation_function": "relu",
6
  "architectures": [
7
+ "OPTForCausalLM"
8
  ],
9
  "attention_dropout": 0.0,
10
  "bos_token_id": 2,
 
 
 
 
11
  "do_layer_norm_before": true,
12
  "dropout": 0.1,
 
13
  "enable_bias": true,
14
  "eos_token_id": 2,
 
15
  "ffn_dim": 3072,
16
  "finetuing_config": {
17
  "batch_size": 32,
 
23
  "random_seed": 42,
24
  "template": "<subj-a> is to <obj-a> what <subj-b> is to <obj-b>"
25
  },
26
+ "hidden_size": 768,
27
  "init_std": 0.02,
 
 
 
28
  "layer_norm_elementwise_affine": true,
 
29
  "layerdrop": 0.0,
30
  "max_position_embeddings": 2048,
31
+ "model_type": "opt",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
 
34
  "pad_token_id": 1,
35
  "prefix": "</s>",
 
 
36
  "torch_dtype": "float32",
37
  "transformers_version": "4.26.1",
38
  "use_cache": true,
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa9225b427146a091257a647cf398db7c712f252a942515af23723ef0b3fa639
3
- size 607617881
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8e2bb320709a156e785d5c1b93e359a6a1b309c2cb6aa9eb2e0ee654899617
3
+ size 501024733
tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 1000000000000000019884624838656,
22
- "name_or_path": "analogy_models/opt-125m-analogy-epoch10/model",
23
  "pad_token": {
24
  "__type": "AddedToken",
25
  "content": "<pad>",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 1000000000000000019884624838656,
22
+ "name_or_path": "facebook/opt-125m",
23
  "pad_token": {
24
  "__type": "AddedToken",
25
  "content": "<pad>",
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b9d163f9cd39545f020abdd398173acfcfc56dfe4a490404f260246ee31a7d
3
+ size 3515
validation_accuracy.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"accuracy": 0.5063291139240507, "dataset": "relbert/semeval2012_relational_similarity", "split": "validation"}
vocab.json CHANGED
The diff for this file is too large to render. See raw diff