asahi417 commited on
Commit
c294f73
1 Parent(s): dee754d
Files changed (3) hide show
  1. analogy.json +1 -0
  2. classification.json +1 -0
  3. config.json +1 -1
analogy.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"distance_function": "cosine_similarity", "sat/test": 0.34718100890207715, "sat/valid": 0.43243243243243246, "u2/test": 0.39473684210526316, "u2/valid": 0.2916666666666667, "u4/test": 0.3541666666666667, "u4/valid": 0.2708333333333333, "google/test": 0.618, "google/valid": 0.7, "bats/test": 0.48526959421901056, "bats/valid": 0.4321608040201005, "sat_full": 0.35561497326203206}
classification.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8442067199035709, "test/f1_macro": 0.823901479879959, "test/f1_micro": 0.8442067199035708, "test/p_macro": 0.8677682427313037, "test/p_micro": 0.8442067199035709, "test/r_macro": 0.7919264219515755, "test/r_micro": 0.8442067199035709}, "lexical_relation_classification/CogALexV": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8110328638497653, "test/f1_macro": 0.5472550813103398, "test/f1_micro": 0.8110328638497653, "test/p_macro": 0.5954094678399242, "test/p_micro": 0.8110328638497653, "test/r_macro": 0.5289811892180869, "test/r_micro": 0.8110328638497653}, "lexical_relation_classification/EVALution": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.5769230769230769, "test/f1_macro": 0.5466975926628965, "test/f1_micro": 0.5769230769230769, "test/p_macro": 0.5753894405143832, "test/p_micro": 0.5769230769230769, "test/r_macro": 0.5364787696516441, "test/r_micro": 0.5769230769230769}, "lexical_relation_classification/K&H+N": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9118035751547611, "test/f1_macro": 0.7693980437177949, "test/f1_micro": 0.9118035751547611, "test/p_macro": 0.8742631752394593, "test/p_micro": 0.9118035751547611, "test/r_macro": 0.7402208063327357, "test/r_micro": 0.9118035751547611}, "lexical_relation_classification/ROOT09": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8564713256032591, "test/f1_macro": 0.851273747817193, "test/f1_micro": 0.8564713256032591, "test/p_macro": 0.8474645741806848, "test/p_micro": 0.8564713256032591, "test/r_macro": 0.8567721994290993, "test/r_micro": 0.8564713256032591}}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "roberta-base",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "relbert-roberta-base-semeval2012-v6-mask-prompt-c-triplet-1",
3
  "architectures": [
4
  "RobertaModel"
5
  ],