TransQuest commited on
Commit
a7f24e5
1 Parent(s): 6f272e3

from Google Colab

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "LABEL_0"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "LABEL_0": 0
20
+ },
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "xlm-roberta",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
+ "position_embedding_type": "absolute",
29
+ "type_vocab_size": 1,
30
+ "vocab_size": 250002
31
+ }
eval_results.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ eval_loss = 0.018867283607092264
2
+ mae = 0.09502819008060864
3
+ pearson_corr = 0.7341828207497875
4
+ spearman_corr = 0.659088726870674
model_args.json ADDED
@@ -0,0 +1 @@
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "temp/outputs/best_model", "cache_dir": "temp/cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 10, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": true, "adafactor_relative_step": true, "adafactor_warmup_init": true, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": false, "evaluate_during_training_steps": 300, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 300, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 128, "model_name": "xlm-roberta-large", "model_type": "xlmroberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 3, "optimizer": "AdamW", "output_dir": "temp/outputs/", "overwrite_output_dir": true, "process_count": 1, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": false, "save_optimizer_and_scheduler": true, "save_recent_only": true, "save_steps": 300, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": true, "use_multiprocessing": false, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.1, "warmup_steps": 237, "weight_decay": 0, "model_class": "MonoTransQuestModel", "labels_list": [0], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": true, "sliding_window": false, "special_tokens_list": [], "stride": 0.8, "tie_value": 1}
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9ba11983029f8367436ee081cc87381713f614122a8783c9f17fff91f057bb
3
+ size 4479371847
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8e4fbff06611e0bec86524e9d1756d12905cabb0d82f7ba285a2a36e3b142a
3
+ size 2243934656
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a97c75625c3bd353160c853c9abce2c9fed7e9e5472e368f8c20e8a7ec21345
3
+ size 623
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "do_lower_case": false, "model_max_length": 512, "name_or_path": "xlm-roberta-large"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e28956342b779e50900c22c85fdf0a22e61bc4026e2d1a0cdc4e717882657a
3
+ size 3119