asahi417 commited on
Commit
3c18977
1 Parent(s): ca6b517
Files changed (3) hide show
  1. analogy.json +1 -0
  2. classification.json +1 -0
  3. config.json +1 -1
analogy.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"distance_function": "cosine_similarity", "sat/test": 0.6023738872403561, "sat/valid": 0.5135135135135135, "u2/test": 0.618421052631579, "u2/valid": 0.5416666666666666, "u4/test": 0.6365740740740741, "u4/valid": 0.6666666666666666, "google/test": 0.868, "google/valid": 0.92, "bats/test": 0.7498610339077265, "bats/valid": 0.7738693467336684, "sat_full": 0.5935828877005348}
classification.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9190899502787404, "test/f1_macro": 0.9137760457433256, "test/f1_micro": 0.9190899502787404, "test/p_macro": 0.9154825883678552, "test/p_micro": 0.9190899502787404, "test/r_macro": 0.9133469621975284, "test/r_micro": 0.9190899502787404}, "lexical_relation_classification/CogALexV": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.854225352112676, "test/f1_macro": 0.6960792498811619, "test/f1_micro": 0.854225352112676, "test/p_macro": 0.718989866350613, "test/p_micro": 0.854225352112676, "test/r_macro": 0.6764892157204768, "test/r_micro": 0.854225352112676}, "lexical_relation_classification/EVALution": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.6738894907908992, "test/f1_macro": 0.6683142084374337, "test/f1_micro": 0.6738894907908992, "test/p_macro": 0.6690155060108055, "test/p_micro": 0.6738894907908992, "test/r_macro": 0.668463840355305, "test/r_micro": 0.6738894907908992}, "lexical_relation_classification/K&H+N": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9637615636085414, "test/f1_macro": 0.890107974704234, "test/f1_micro": 0.9637615636085414, "test/p_macro": 0.9078140536006921, "test/p_micro": 0.9637615636085414, "test/r_macro": 0.8742163941583343, "test/r_micro": 0.9637615636085414}, "lexical_relation_classification/ROOT09": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9053588216859918, "test/f1_macro": 0.9023263285944801, "test/f1_micro": 0.9053588216859918, "test/p_macro": 0.9041432350808178, "test/p_micro": 0.9053588216859918, "test/r_macro": 0.9012883662953741, "test/r_micro": 0.9053588216859918}}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "roberta-large",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "relbert-roberta-large-semeval2012-v2-average-no-mask-prompt-e-nce",
3
  "architectures": [
4
  "RobertaModel"
5
  ],