asahi417 commited on
Commit
56caa5f
·
1 Parent(s): fecfd66
Files changed (3) hide show
  1. analogy.json +1 -0
  2. classification.json +1 -0
  3. config.json +1 -1
analogy.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"distance_function": "cosine_similarity", "sat/test": 0.6112759643916914, "sat/valid": 0.5945945945945946, "u2/test": 0.5964912280701754, "u2/valid": 0.5416666666666666, "u4/test": 0.6087962962962963, "u4/valid": 0.5833333333333334, "google/test": 0.878, "google/valid": 0.96, "bats/test": 0.7576431350750417, "bats/valid": 0.7939698492462312, "sat_full": 0.6096256684491979}
classification.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9264728039777008, "test/f1_macro": 0.9231888761944194, "test/f1_micro": 0.9264728039777008, "test/p_macro": 0.9294637529765207, "test/p_micro": 0.9264728039777008, "test/r_macro": 0.9174590522007549, "test/r_micro": 0.9264728039777008}, "lexical_relation_classification/CogALexV": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8720657276995305, "test/f1_macro": 0.7203249423895846, "test/f1_micro": 0.8720657276995305, "test/p_macro": 0.7652016579658512, "test/p_micro": 0.8720657276995305, "test/r_macro": 0.6854608897905268, "test/r_micro": 0.8720657276995305}, "lexical_relation_classification/EVALution": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.7074756229685807, "test/f1_macro": 0.7003587066174993, "test/f1_micro": 0.7074756229685807, "test/p_macro": 0.706677485330254, "test/p_micro": 0.7074756229685807, "test/r_macro": 0.6971065577211089, "test/r_micro": 0.7074756229685807}, "lexical_relation_classification/K&H+N": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9625095638867636, "test/f1_macro": 0.8943198093953978, "test/f1_micro": 0.9625095638867636, "test/p_macro": 0.9056524167488219, "test/p_micro": 0.9625095638867636, "test/r_macro": 0.883948356494265, "test/r_micro": 0.9625095638867636}, "lexical_relation_classification/ROOT09": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9022250078345346, "test/f1_macro": 0.9008228707899653, "test/f1_micro": 0.9022250078345346, "test/p_macro": 0.8980836170967329, "test/p_micro": 0.9022250078345346, "test/r_macro": 0.9037514246282177, "test/r_micro": 0.9022250078345346}}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "roberta-large",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "relbert-roberta-large-semeval2012-v2-mask-prompt-e-nce",
3
  "architectures": [
4
  "RobertaModel"
5
  ],