HHansi commited on
Commit
d66f492
1 Parent(s): 5e001a2

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<e>": 250002, "</e>": 250003}
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.16.2",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250004
28
+ }
eval_results.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accuracy = 0.72
2
+ cls_report = precision recall f1-score support
3
+
4
+ 0.0 0.7561 0.6327 0.6889 49
5
+ 1.0 0.6949 0.8039 0.7455 51
6
+
7
+ accuracy 0.7200 100
8
+ macro avg 0.7255 0.7183 0.7172 100
9
+ weighted avg 0.7249 0.7200 0.7177 100
10
+
11
+ eval_loss = 0.5693764526110429
12
+ fn = 10
13
+ fp = 18
14
+ macro_f1 = 0.7171717171717171
15
+ mcc = 0.4437350029691738
16
+ tn = 31
17
+ tp = 41
18
+ weighted_f1 = 0.7177373737373738
19
+ weighted_p = 0.725506407606449
20
+ weighted_r = 0.7182873149259704
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "begin_tag": "<e>", "best_model_dir": "best_model/fr_fr", "cache_dir": "temp/cache_dir/", "config": {}, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 70, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 10, "encoding": null, "end_tag": "</e>", "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": false, "evaluate_during_training_steps": 20, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 1e-05, "local_rank": -1, "logging_steps": 20, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 120, "model_name": "xlm-roberta-large", "model_type": "xlmroberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "num_train_epochs": 5, "output_dir": "temp/outputs/", "overwrite_output_dir": true, "process_count": 70, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": false, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_steps": 20, "save_recent_only": true, "silent": false, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": true, "use_multiprocessing": false, "wandb_kwargs": {"group": "fr_fr_xlm-roberta-large_B_concat", "job_type": "2"}, "wandb_project": "TransWiC-groups", "warmup_ratio": 0.1, "warmup_steps": 57, "weight_decay": 0, "skip_special_tokens": true, "model_class": "ClassificationModel", "labels_list": [0, 1], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "stride": 0.8, "tie_value": 1, "tagging": true, "strategy": "B", "special_tags": ["<e>"], "merge_n": 2, "merge_type": "concat"}
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73e6edda26304f99cf5e805a801e93ea9ca838f781c59da0c3b4e01eaf080e9
3
+ size 4504577789
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38030fbe6ee5d91237e2d84f3e627adf2d494a030acd7a41ec9cda6df34968ba
3
+ size 2256539453
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:358c5a102b266abe1e26d94430d927dc1a151702bec6ddede1eb11706168acbe
3
+ size 627
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
test_eval.txt ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Default classification report:
2
+ precision recall f1-score support
3
+
4
+ F 0.7412 0.7620 0.7515 500
5
+ T 0.7551 0.7340 0.7444 500
6
+
7
+ accuracy 0.7480 1000
8
+ macro avg 0.7482 0.7480 0.7480 1000
9
+ weighted avg 0.7482 0.7480 0.7480 1000
10
+
11
+
12
+ ADJ
13
+ Accuracy = 0.7010869565217391
14
+ Weighted Recall = 0.7010869565217391
15
+ Weighted Precision = 0.7000680991144484
16
+ Weighted F1 = 0.7000555105470039
17
+ Macro Recall = 0.6955147321961112
18
+ Macro Precision = 0.6982341557813256
19
+ Macro F1 = 0.6963423050379571
20
+ ADV
21
+ Accuracy = 0.8333333333333334
22
+ Weighted Recall = 0.8333333333333334
23
+ Weighted Precision = 0.8295454545454546
24
+ Weighted F1 = 0.8303693570451436
25
+ Macro Recall = 0.7857142857142857
26
+ Macro Precision = 0.8068181818181819
27
+ Macro F1 = 0.7948016415868673
28
+ NOUN
29
+ Accuracy = 0.7412451361867705
30
+ Weighted Recall = 0.7412451361867705
31
+ Weighted Precision = 0.7415338422415846
32
+ Weighted F1 = 0.7412519920472018
33
+ Macro Recall = 0.7413944542880075
34
+ Macro Precision = 0.7413688673531194
35
+ Macro F1 = 0.7412441567781374
36
+ VERB
37
+ Accuracy = 0.7830882352941176
38
+ Weighted Recall = 0.7830882352941176
39
+ Weighted Precision = 0.7827599789915967
40
+ Weighted F1 = 0.7810735749408122
41
+ Macro Recall = 0.7719602977667493
42
+ Macro Precision = 0.7822802197802198
43
+ Macro F1 = 0.7751845677542272
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "do_lower_case": false, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": "/home/hh2/.cache/huggingface/transformers/7766c86e10505ed9b39af34e456480399bf06e35b36b8f2b917460a2dbe94e59.a984cf52fc87644bd4a2165f1e07e0ac880272c1e82d648b4674907056912bd7", "name_or_path": "xlm-roberta-large", "tokenizer_class": "XLMRobertaTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f807611a8503a7d7259d7023e57258f3aab85a0a28b2b456975cd4625a8a0f99
3
+ size 2811