3huvan commited on
Commit
8f6f06a
·
verified ·
1 Parent(s): 7147937

Add fine-tuned model for rhetorical role prediction

Browse files
Files changed (41) hide show
  1. epoch_1_checkpoint/config.json +55 -0
  2. epoch_1_checkpoint/label_mapping.json +15 -0
  3. epoch_1_checkpoint/metrics.json +7 -0
  4. epoch_1_checkpoint/model.safetensors +3 -0
  5. epoch_1_checkpoint/special_tokens_map.json +7 -0
  6. epoch_1_checkpoint/tokenizer.json +0 -0
  7. epoch_1_checkpoint/tokenizer_config.json +58 -0
  8. epoch_1_checkpoint/vocab.txt +0 -0
  9. epoch_2_checkpoint/config.json +55 -0
  10. epoch_2_checkpoint/label_mapping.json +15 -0
  11. epoch_2_checkpoint/metrics.json +7 -0
  12. epoch_2_checkpoint/model.safetensors +3 -0
  13. epoch_2_checkpoint/special_tokens_map.json +7 -0
  14. epoch_2_checkpoint/tokenizer.json +0 -0
  15. epoch_2_checkpoint/tokenizer_config.json +58 -0
  16. epoch_2_checkpoint/vocab.txt +0 -0
  17. epoch_3_checkpoint/config.json +55 -0
  18. epoch_3_checkpoint/label_mapping.json +15 -0
  19. epoch_3_checkpoint/metrics.json +7 -0
  20. epoch_3_checkpoint/model.safetensors +3 -0
  21. epoch_3_checkpoint/special_tokens_map.json +7 -0
  22. epoch_3_checkpoint/tokenizer.json +0 -0
  23. epoch_3_checkpoint/tokenizer_config.json +58 -0
  24. epoch_3_checkpoint/vocab.txt +0 -0
  25. final_checkpoint/config.json +55 -0
  26. final_checkpoint/label_mapping.json +15 -0
  27. final_checkpoint/metrics.json +89 -0
  28. final_checkpoint/model.safetensors +3 -0
  29. final_checkpoint/special_tokens_map.json +7 -0
  30. final_checkpoint/tokenizer.json +0 -0
  31. final_checkpoint/tokenizer_config.json +58 -0
  32. final_checkpoint/vocab.txt +0 -0
  33. initial_checkpoint/config.json +54 -0
  34. initial_checkpoint/label_mapping.json +15 -0
  35. initial_checkpoint/metrics.json +1 -0
  36. initial_checkpoint/model.safetensors +3 -0
  37. initial_checkpoint/special_tokens_map.json +7 -0
  38. initial_checkpoint/tokenizer.json +0 -0
  39. initial_checkpoint/tokenizer_config.json +58 -0
  40. initial_checkpoint/vocab.txt +0 -0
  41. label_mapping.json +15 -0
epoch_1_checkpoint/config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_10": 10,
31
+ "LABEL_11": 11,
32
+ "LABEL_12": 12,
33
+ "LABEL_2": 2,
34
+ "LABEL_3": 3,
35
+ "LABEL_4": 4,
36
+ "LABEL_5": 5,
37
+ "LABEL_6": 6,
38
+ "LABEL_7": 7,
39
+ "LABEL_8": 8,
40
+ "LABEL_9": 9
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 512,
44
+ "model_type": "bert",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 0,
48
+ "position_embedding_type": "absolute",
49
+ "problem_type": "single_label_classification",
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.52.4",
52
+ "type_vocab_size": 2,
53
+ "use_cache": true,
54
+ "vocab_size": 31090
55
+ }
epoch_1_checkpoint/label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }
epoch_1_checkpoint/metrics.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1,
3
+ "train_loss": 1.2134970770511473,
4
+ "train_f1": 0.5850159013978483,
5
+ "train_acc": 0.6113641068101843,
6
+ "epoch_time": 1287.0544500350952
7
+ }
epoch_1_checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d26daf9e3671da9093947c854929451d2362a41213f0d8397ab655a6c598f0e
3
+ size 439737380
epoch_1_checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
epoch_1_checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
epoch_1_checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
epoch_1_checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
epoch_2_checkpoint/config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_10": 10,
31
+ "LABEL_11": 11,
32
+ "LABEL_12": 12,
33
+ "LABEL_2": 2,
34
+ "LABEL_3": 3,
35
+ "LABEL_4": 4,
36
+ "LABEL_5": 5,
37
+ "LABEL_6": 6,
38
+ "LABEL_7": 7,
39
+ "LABEL_8": 8,
40
+ "LABEL_9": 9
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 512,
44
+ "model_type": "bert",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 0,
48
+ "position_embedding_type": "absolute",
49
+ "problem_type": "single_label_classification",
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.52.4",
52
+ "type_vocab_size": 2,
53
+ "use_cache": true,
54
+ "vocab_size": 31090
55
+ }
epoch_2_checkpoint/label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }
epoch_2_checkpoint/metrics.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2,
3
+ "train_loss": 0.9067330668838675,
4
+ "train_f1": 0.6805352949525697,
5
+ "train_acc": 0.6975781411716001,
6
+ "epoch_time": 1287.245393037796
7
+ }
epoch_2_checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f808d381da5c8b91a29b36cfc2797e30f86a186c5fc99df07fdb64c34c52902
3
+ size 439737380
epoch_2_checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
epoch_2_checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
epoch_2_checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
epoch_2_checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
epoch_3_checkpoint/config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_10": 10,
31
+ "LABEL_11": 11,
32
+ "LABEL_12": 12,
33
+ "LABEL_2": 2,
34
+ "LABEL_3": 3,
35
+ "LABEL_4": 4,
36
+ "LABEL_5": 5,
37
+ "LABEL_6": 6,
38
+ "LABEL_7": 7,
39
+ "LABEL_8": 8,
40
+ "LABEL_9": 9
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 512,
44
+ "model_type": "bert",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 0,
48
+ "position_embedding_type": "absolute",
49
+ "problem_type": "single_label_classification",
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.52.4",
52
+ "type_vocab_size": 2,
53
+ "use_cache": true,
54
+ "vocab_size": 31090
55
+ }
epoch_3_checkpoint/label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }
epoch_3_checkpoint/metrics.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3,
3
+ "train_loss": 0.6808817090361767,
4
+ "train_f1": 0.7583717663390656,
5
+ "train_acc": 0.7675774511833299,
6
+ "epoch_time": 1287.229285478592
7
+ }
epoch_3_checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21aeeb6e862bae2aad96138026892716ca643740c2a41b8ba04caace16a607f1
3
+ size 439737380
epoch_3_checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
epoch_3_checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
epoch_3_checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
epoch_3_checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
final_checkpoint/config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_10": 10,
31
+ "LABEL_11": 11,
32
+ "LABEL_12": 12,
33
+ "LABEL_2": 2,
34
+ "LABEL_3": 3,
35
+ "LABEL_4": 4,
36
+ "LABEL_5": 5,
37
+ "LABEL_6": 6,
38
+ "LABEL_7": 7,
39
+ "LABEL_8": 8,
40
+ "LABEL_9": 9
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 512,
44
+ "model_type": "bert",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 0,
48
+ "position_embedding_type": "absolute",
49
+ "problem_type": "single_label_classification",
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.52.4",
52
+ "type_vocab_size": 2,
53
+ "use_cache": true,
54
+ "vocab_size": 31090
55
+ }
final_checkpoint/label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }
final_checkpoint/metrics.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "SciBERT",
3
+ "model_checkpoint": "allenai/scibert_scivocab_uncased",
4
+ "task": "Rhetorical Role Prediction",
5
+ "train_metrics": {
6
+ "macro_f1": 0.7369821755810839,
7
+ "weighted_f1": 0.8643526380515923,
8
+ "accuracy": 0.8747326295452977,
9
+ "per_label_f1": {
10
+ "ANALYSIS": 0.8920888731834445,
11
+ "ARG_PETITIONER": 0.7804546996946047,
12
+ "ARG_RESPONDENT": 0.5121951219512195,
13
+ "FAC": 0.9187826400144067,
14
+ "ISSUE": 0.9286675639300135,
15
+ "NONE": 0.928243195475433,
16
+ "PREAMBLE": 0.9233246624550973,
17
+ "PRE_NOT_RELIED": 0.037267080745341616,
18
+ "PRE_RELIED": 0.8005381769256643,
19
+ "RATIO": 0.23667100130039012,
20
+ "RLC": 0.800771208226221,
21
+ "RPC": 0.9221824686940966,
22
+ "STA": 0.899581589958159
23
+ },
24
+ "latency_ms_per_doc": 1.1843175841341813,
25
+ "latency_ms_per_sentence": 1.1843175841341813,
26
+ "eval_time_seconds": 415.6238057613373,
27
+ "num_samples": 28986
28
+ },
29
+ "dev_metrics": {
30
+ "macro_f1": 0.49354567091395296,
31
+ "weighted_f1": 0.6356316048961237,
32
+ "accuracy": 0.649183744355679,
33
+ "per_label_f1": {
34
+ "ANALYSIS": 0.6848249027237354,
35
+ "ARG_PETITIONER": 0.27586206896551724,
36
+ "ARG_RESPONDENT": 0.16326530612244897,
37
+ "FAC": 0.6260089686098654,
38
+ "ISSUE": 0.7272727272727273,
39
+ "NONE": 0.85,
40
+ "PREAMBLE": 0.7220376522702104,
41
+ "PRE_NOT_RELIED": 0.0,
42
+ "PRE_RELIED": 0.4549019607843137,
43
+ "RATIO": 0.10666666666666667,
44
+ "RLC": 0.34065934065934067,
45
+ "RPC": 0.7979274611398963,
46
+ "STA": 0.6666666666666666
47
+ },
48
+ "latency_ms_per_doc": 1.2095364566973903,
49
+ "latency_ms_per_sentence": 1.2095364566973903,
50
+ "eval_time_seconds": 41.31308722496033,
51
+ "num_samples": 2879
52
+ },
53
+ "overfitting_gap": 0.22872103315546866,
54
+ "model_size_mb": 419.35161209106445,
55
+ "training_memory_footprint_gb": 3.5974082946777344,
56
+ "label_mapping": {
57
+ "ANALYSIS": 0,
58
+ "ARG_PETITIONER": 1,
59
+ "ARG_RESPONDENT": 2,
60
+ "FAC": 3,
61
+ "ISSUE": 4,
62
+ "NONE": 5,
63
+ "PREAMBLE": 6,
64
+ "PRE_NOT_RELIED": 7,
65
+ "PRE_RELIED": 8,
66
+ "RATIO": 9,
67
+ "RLC": 10,
68
+ "RPC": 11,
69
+ "STA": 12
70
+ },
71
+ "id_mapping": {
72
+ "0": "ANALYSIS",
73
+ "1": "ARG_PETITIONER",
74
+ "2": "ARG_RESPONDENT",
75
+ "3": "FAC",
76
+ "4": "ISSUE",
77
+ "5": "NONE",
78
+ "6": "PREAMBLE",
79
+ "7": "PRE_NOT_RELIED",
80
+ "8": "PRE_RELIED",
81
+ "9": "RATIO",
82
+ "10": "RLC",
83
+ "11": "RPC",
84
+ "12": "STA"
85
+ },
86
+ "training_time_seconds": 3880.8387916088104,
87
+ "evaluation_time_seconds": 456.9819755554199,
88
+ "timestamp": "2025-06-17T19:20:10.428589"
89
+ }
final_checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21aeeb6e862bae2aad96138026892716ca643740c2a41b8ba04caace16a607f1
3
+ size 439737380
final_checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
final_checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
final_checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
final_checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
initial_checkpoint/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9",
21
+ "10": "LABEL_10",
22
+ "11": "LABEL_11",
23
+ "12": "LABEL_12"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "label2id": {
28
+ "LABEL_0": 0,
29
+ "LABEL_1": 1,
30
+ "LABEL_10": 10,
31
+ "LABEL_11": 11,
32
+ "LABEL_12": 12,
33
+ "LABEL_2": 2,
34
+ "LABEL_3": 3,
35
+ "LABEL_4": 4,
36
+ "LABEL_5": 5,
37
+ "LABEL_6": 6,
38
+ "LABEL_7": 7,
39
+ "LABEL_8": 8,
40
+ "LABEL_9": 9
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "max_position_embeddings": 512,
44
+ "model_type": "bert",
45
+ "num_attention_heads": 12,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 0,
48
+ "position_embedding_type": "absolute",
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.52.4",
51
+ "type_vocab_size": 2,
52
+ "use_cache": true,
53
+ "vocab_size": 31090
54
+ }
initial_checkpoint/label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }
initial_checkpoint/metrics.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
initial_checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1422bae0d0227195eb194337325a0d678c4d2b426682781c9754ab186c880338
3
+ size 439737380
initial_checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
initial_checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
initial_checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "101": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "102": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "103": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
initial_checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
label_mapping.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ANALYSIS": 0,
3
+ "ARG_PETITIONER": 1,
4
+ "ARG_RESPONDENT": 2,
5
+ "FAC": 3,
6
+ "ISSUE": 4,
7
+ "NONE": 5,
8
+ "PREAMBLE": 6,
9
+ "PRE_NOT_RELIED": 7,
10
+ "PRE_RELIED": 8,
11
+ "RATIO": 9,
12
+ "RLC": 10,
13
+ "RPC": 11,
14
+ "STA": 12
15
+ }