researchaccount commited on
Commit
6e25252
1 Parent(s): b5467ce

Added files

Browse files
Files changed (35) hide show
  1. train_0/best_model/config.json +41 -0
  2. train_0/best_model/pytorch_model.bin +3 -0
  3. train_0/best_model/special_tokens_map.json +1 -0
  4. train_0/best_model/tokenizer.json +0 -0
  5. train_0/best_model/tokenizer_config.json +1 -0
  6. train_0/best_model/training_args.bin +3 -0
  7. train_0/best_model/vocab.txt +0 -0
  8. train_1/best_model/config.json +41 -0
  9. train_1/best_model/pytorch_model.bin +3 -0
  10. train_1/best_model/special_tokens_map.json +1 -0
  11. train_1/best_model/tokenizer.json +0 -0
  12. train_1/best_model/tokenizer_config.json +1 -0
  13. train_1/best_model/training_args.bin +3 -0
  14. train_1/best_model/vocab.txt +0 -0
  15. train_2/best_model/config.json +41 -0
  16. train_2/best_model/pytorch_model.bin +3 -0
  17. train_2/best_model/special_tokens_map.json +1 -0
  18. train_2/best_model/tokenizer.json +0 -0
  19. train_2/best_model/tokenizer_config.json +1 -0
  20. train_2/best_model/training_args.bin +3 -0
  21. train_2/best_model/vocab.txt +0 -0
  22. train_3/best_model/config.json +41 -0
  23. train_3/best_model/pytorch_model.bin +3 -0
  24. train_3/best_model/special_tokens_map.json +1 -0
  25. train_3/best_model/tokenizer.json +0 -0
  26. train_3/best_model/tokenizer_config.json +1 -0
  27. train_3/best_model/training_args.bin +3 -0
  28. train_3/best_model/vocab.txt +0 -0
  29. train_4/best_model/config.json +41 -0
  30. train_4/best_model/pytorch_model.bin +3 -0
  31. train_4/best_model/special_tokens_map.json +1 -0
  32. train_4/best_model/tokenizer.json +0 -0
  33. train_4/best_model/tokenizer_config.json +1 -0
  34. train_4/best_model/training_args.bin +3 -0
  35. train_4/best_model/vocab.txt +0 -0
train_0/best_model/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "UBC-NLP/MARBERT",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Neutral",
14
+ "1": "Negative",
15
+ "2": "Positive"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Negative": 1,
21
+ "Neutral": 0,
22
+ "Positive": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "transformers_version": "4.6.1",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 100000
41
+ }
train_0/best_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04364bfdeba321bd74858d3a9ec84911c2bce6800f01419c1ee5d1b027beec94
3
+ size 651463945
train_0/best_model/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
train_0/best_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_0/best_model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "UBC-NLP/MARBERT"}
train_0/best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8abd18944d9c84d0d28d061809a0154f0d8b4fd5a91b83a8e133ac04acda5e0c
3
+ size 2415
train_0/best_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
train_1/best_model/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "UBC-NLP/MARBERT",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Neutral",
14
+ "1": "Negative",
15
+ "2": "Positive"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Negative": 1,
21
+ "Neutral": 0,
22
+ "Positive": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "transformers_version": "4.6.1",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 100000
41
+ }
train_1/best_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e90ce3485b053508cf5388b5e8b27661ce3ab1cdf2f3bd51c1c068f496f398
3
+ size 651463945
train_1/best_model/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
train_1/best_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_1/best_model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "UBC-NLP/MARBERT"}
train_1/best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390328999eb414efc84a84324ef6070f70be3cc6ccf21a0e6ede125a4e4eaee0
3
+ size 2415
train_1/best_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
train_2/best_model/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "UBC-NLP/MARBERT",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Neutral",
14
+ "1": "Negative",
15
+ "2": "Positive"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Negative": 1,
21
+ "Neutral": 0,
22
+ "Positive": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "transformers_version": "4.6.1",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 100000
41
+ }
train_2/best_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fbc57fd302061137e2fe6da85967fc6ad9dfd7b2eb67db6ed9ad43732ce3c6b
3
+ size 651463945
train_2/best_model/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
train_2/best_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_2/best_model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "UBC-NLP/MARBERT"}
train_2/best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a9af537f5a2190ded1b4ebdb4f5165308a013a9cd6e982bfe51cd5e7ce40a68
3
+ size 2415
train_2/best_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
train_3/best_model/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "UBC-NLP/MARBERT",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Neutral",
14
+ "1": "Negative",
15
+ "2": "Positive"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Negative": 1,
21
+ "Neutral": 0,
22
+ "Positive": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "transformers_version": "4.6.1",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 100000
41
+ }
train_3/best_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c1bb39c0c01a21b851346f5a40bd1265e7dd074cdc913e24032a1700dbe170d
3
+ size 651463945
train_3/best_model/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
train_3/best_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_3/best_model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "UBC-NLP/MARBERT"}
train_3/best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bab4119e7a33be6d141c5ce1d000f6fb05eb5ec7c9330641f2a9db55f06c69ca
3
+ size 2415
train_3/best_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
train_4/best_model/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "UBC-NLP/MARBERT",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "directionality": "bidi",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "Neutral",
14
+ "1": "Negative",
15
+ "2": "Positive"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "Negative": 1,
21
+ "Neutral": 0,
22
+ "Positive": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 12,
28
+ "num_hidden_layers": 12,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "transformers_version": "4.6.1",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 100000
41
+ }
train_4/best_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c18b788ddf1afcae72c6fd792312d513c6f0009f39fdc9b144caf2c054bda7b
3
+ size 651463945
train_4/best_model/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
train_4/best_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
train_4/best_model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "UBC-NLP/MARBERT"}
train_4/best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:283c8d091d93550e55dc3bcdc576d251d242ab57de19e6cb4691e5f6ff4c3246
3
+ size 2415
train_4/best_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff