tharindu commited on
Commit
32af448
1 Parent(s): c47abf6

Upload folder using huggingface_hub

Browse files
best_model/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NLPC-UOM/SinBERT-large",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "max_position_embeddings": 514,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "position_embedding_type": "absolute",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.35.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 52000
39
+ }
best_model/eval_results.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss = 0.21681966317683152
2
+ macro_f1 = 0.9363705923825345
3
+ mcc = 0.9163726036647536
4
+ weighted_f1 = 0.9362311531774655
best_model/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
best_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:006a890ae5c1478448e9546d9319b78271061fa5b69307115ed689142ef36f9e
3
+ size 506311456
best_model/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/SinBERT-large/best_model", "cache_dir": "cache_dir/SinBERT-large", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": true, "adafactor_relative_step": true, "adafactor_warmup_init": true, "eval_batch_size": 16, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 600, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": true, "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 600, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "NLPC-UOM/SinBERT-large", "model_type": "roberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "AdamW", "output_dir": "outputs/SinBERT-large", "overwrite_output_dir": true, "process_count": 78, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_recent_only": true, "save_steps": 600, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 16, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_multiprocessing": false, "wandb_kwargs": {}, "wandb_project": "NSINa_category_prediction", "warmup_ratio": 0.06, "warmup_steps": 524, "weight_decay": 0.0, "model_class": "LCPModel", "labels_list": [0, 1, 2, 3], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "special_tokens_list": [], "stride": 0.8, "tie_value": 1}
best_model/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83bc8d1668a74ffd784e59cf6d1ec0ddae8dc4852290f7c35b6a2aa75703f76a
3
+ size 1007967802
best_model/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db102accab4047f842462446421435028dd1c3f65cfb35a26f0ae579a93e5ad1
3
+ size 1064
best_model/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
best_model/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "do_lower_case": false,
49
+ "eos_token": "</s>",
50
+ "errors": "replace",
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "unk_token": "<unk>"
57
+ }
best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9690c764ae76781eef6e9164a6bc27b6745385c84e2585caff73316a612cec7f
3
+ size 3576
best_model/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-8725-epoch-5/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NLPC-UOM/SinBERT-large",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "max_position_embeddings": 514,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "position_embedding_type": "absolute",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.35.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 52000
39
+ }
checkpoint-8725-epoch-5/eval_results.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss = 0.35019068319525104
2
+ macro_f1 = 0.9371727749427544
3
+ mcc = 0.9165943698038781
4
+ weighted_f1 = 0.9372502407778476
checkpoint-8725-epoch-5/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-8725-epoch-5/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7d18fed3ee40c937a987c028c5f5687c9662abbc1fd00bc867a1fdd5c5887f8
3
+ size 506311456
checkpoint-8725-epoch-5/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/SinBERT-large/best_model", "cache_dir": "cache_dir/SinBERT-large", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": true, "adafactor_relative_step": true, "adafactor_warmup_init": true, "eval_batch_size": 16, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 600, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": true, "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 600, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "NLPC-UOM/SinBERT-large", "model_type": "roberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "AdamW", "output_dir": "outputs/SinBERT-large", "overwrite_output_dir": true, "process_count": 78, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_recent_only": true, "save_steps": 600, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 16, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_multiprocessing": false, "wandb_kwargs": {}, "wandb_project": "NSINa_category_prediction", "warmup_ratio": 0.06, "warmup_steps": 524, "weight_decay": 0.0, "model_class": "LCPModel", "labels_list": [0, 1, 2, 3], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "special_tokens_list": [], "stride": 0.8, "tie_value": 1}
checkpoint-8725-epoch-5/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32fad313413349bff3e903649e144e6af52ad6c3b215b38f9aaba44a5361a3b3
3
+ size 1007967802
checkpoint-8725-epoch-5/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf3b20aae09053b469f456c0a1ae63775158cccfb279532733245f8dd12b35b
3
+ size 1064
checkpoint-8725-epoch-5/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
checkpoint-8725-epoch-5/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "do_lower_case": false,
49
+ "eos_token": "</s>",
50
+ "errors": "replace",
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "unk_token": "<unk>"
57
+ }
checkpoint-8725-epoch-5/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9690c764ae76781eef6e9164a6bc27b6745385c84e2585caff73316a612cec7f
3
+ size 3576
checkpoint-8725-epoch-5/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NLPC-UOM/SinBERT-large",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "max_position_embeddings": 514,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "position_embedding_type": "absolute",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.35.2",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 52000
39
+ }
eval_results.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss = 0.35019068319525104
2
+ macro_f1 = 0.9371727749427544
3
+ mcc = 0.9165943698038781
4
+ weighted_f1 = 0.9372502407778476
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7d18fed3ee40c937a987c028c5f5687c9662abbc1fd00bc867a1fdd5c5887f8
3
+ size 506311456
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/SinBERT-large/best_model", "cache_dir": "cache_dir/SinBERT-large", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": true, "adafactor_relative_step": true, "adafactor_warmup_init": true, "eval_batch_size": 16, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 600, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": true, "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 600, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "NLPC-UOM/SinBERT-large", "model_type": "roberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 5, "optimizer": "AdamW", "output_dir": "outputs/SinBERT-large", "overwrite_output_dir": true, "process_count": 78, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_recent_only": true, "save_steps": 600, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 16, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_multiprocessing": false, "wandb_kwargs": {}, "wandb_project": "NSINa_category_prediction", "warmup_ratio": 0.06, "warmup_steps": 524, "weight_decay": 0.0, "model_class": "LCPModel", "labels_list": [0, 1, 2, 3], "labels_map": {}, "lazy_delimiter": "\t", "lazy_labels_column": 1, "lazy_loading": false, "lazy_loading_start_line": 1, "lazy_text_a_column": null, "lazy_text_b_column": null, "lazy_text_column": 0, "onnx": false, "regression": false, "sliding_window": false, "special_tokens_list": [], "stride": 0.8, "tie_value": 1}
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "do_lower_case": false,
49
+ "eos_token": "</s>",
50
+ "errors": "replace",
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "unk_token": "<unk>"
57
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9690c764ae76781eef6e9164a6bc27b6745385c84e2585caff73316a612cec7f
3
+ size 3576
training_progress_scores.csv ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global_step,mcc,train_loss,eval_loss,macro_f1,weighted_f1
2
+ 600,0.8586218410398948,0.03937673568725586,0.3243057405211262,0.8934597509510229,0.8940074854716451
3
+ 1200,0.8880456556141589,0.10992908477783203,0.27570400417772767,0.9125397562882632,0.912045163133305
4
+ 1745,0.9095818626100802,0.272040456533432,0.22852692486161424,0.9312991147257818,0.9314908632911428
5
+ 1800,0.9081609311855388,0.017532706260681152,0.2360150751840208,0.9297402750539504,0.9297381320599136
6
+ 2400,0.9089356189252266,0.6510375738143921,0.22538994926690442,0.9309233513369503,0.9309543352611926
7
+ 3000,0.9141198181989681,0.07086089253425598,0.2388584490471817,0.9352456781054004,0.9351272096671566
8
+ 3490,0.9163726036647536,0.6905561685562134,0.21681966317683152,0.9363705923825345,0.9362311531774655
9
+ 3600,0.9183992486619676,0.3702475428581238,0.23464486745579802,0.9379726486311781,0.9378618223403414
10
+ 4200,0.9110995364352387,0.017334148287773132,0.25100942412103266,0.9332041844869458,0.9329692578421809
11
+ 4800,0.9175064582393759,0.38122090697288513,0.25642930632985206,0.9374653755765934,0.9372747689584016
12
+ 5235,0.9138124152210487,0.09636306762695312,0.2760989813130474,0.9343578537920475,0.934226648180557
13
+ 5400,0.9139278606640884,0.00464998185634613,0.2708434431493215,0.9347945405513579,0.9347766400718157
14
+ 6000,0.9083328107781999,0.001109011471271515,0.3353042191112913,0.9315740926574485,0.9314478267677656
15
+ 6600,0.9166418517805649,0.006841644644737244,0.31161130876424387,0.9372170042851029,0.9372191434166729
16
+ 6980,0.9156723190858234,0.05192357674241066,0.3323965809075762,0.9366327535999623,0.9366528154514633
17
+ 7200,0.9158383882950369,0.001010708510875702,0.31759343657296957,0.9364003268730573,0.936571535947253
18
+ 7800,0.9179323772952851,0.00028868764638900757,0.3352872972514963,0.9381821420825974,0.9382092949156446
19
+ 8400,0.9183692988263616,0.0002934113144874573,0.34579374569305577,0.9383956072964578,0.9384924540991418
20
+ 8725,0.9165943698038781,0.0001783912885002792,0.35019068319525104,0.9371727749427544,0.9372502407778476
vocab.json ADDED
The diff for this file is too large to render. See raw diff