dean22029 commited on
Commit
0a9e3cb
1 Parent(s): 9bbdf4d

Upload model

Browse files
Files changed (5) hide show
  1. all_results.json +1 -1
  2. config.json +15 -15
  3. model.safetensors +1 -1
  4. tokenizer.json +1 -1
  5. tokenizer_config.json +4 -0
all_results.json CHANGED
@@ -1 +1 @@
1
- {"eval_accuracy": 0.7609399916072178}
 
1
+ {"eval_accuracy": 0.7833151489718841}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "hfl/chinese-roberta-wwm-ext-large",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
@@ -12,20 +12,20 @@
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 1024,
14
  "id2label": {
15
- "0": "LABEL_0",
16
- "1": "LABEL_1",
17
- "2": "LABEL_2",
18
- "3": "LABEL_3",
19
- "4": "LABEL_4",
20
- "5": "LABEL_5",
21
- "6": "LABEL_6",
22
- "7": "LABEL_7",
23
- "8": "LABEL_8",
24
- "9": "LABEL_9",
25
- "10": "LABEL_10",
26
- "11": "LABEL_11",
27
- "12": "LABEL_12",
28
- "13": "LABEL_13"
29
  },
30
  "initializer_range": 0.02,
31
  "intermediate_size": 4096,
 
1
  {
2
+ "_name_or_path": "dean22029/PeoplesDailyClassifier",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
 
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 1024,
14
  "id2label": {
15
+ "0": "0",
16
+ "1": "1",
17
+ "2": "2",
18
+ "3": "3",
19
+ "4": "4",
20
+ "5": "5",
21
+ "6": "6",
22
+ "7": "7",
23
+ "8": "8",
24
+ "9": "9",
25
+ "10": "10",
26
+ "11": "11",
27
+ "12": "12",
28
+ "13": "13"
29
  },
30
  "initializer_range": 0.02,
31
  "intermediate_size": 4096,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b56f7cbbd65f48fc6b41c0c4efcb3afd4a3c4241d5b855cdeaa60aa7bed3e85
3
  size 1302194104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530edbc16833292dbe1a4acfa4ed3d83a391d71786804e3bad205735a4f105c9
3
  size 1302194104
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 128,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 256,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
tokenizer_config.json CHANGED
@@ -45,11 +45,15 @@
45
  "cls_token": "[CLS]",
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
 
48
  "model_max_length": 1000000000000000019884624838656,
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
 
51
  "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
  "tokenizer_class": "BertTokenizer",
 
 
54
  "unk_token": "[UNK]"
55
  }
 
45
  "cls_token": "[CLS]",
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
+ "max_length": 128,
49
  "model_max_length": 1000000000000000019884624838656,
50
  "pad_token": "[PAD]",
51
  "sep_token": "[SEP]",
52
+ "stride": 0,
53
  "strip_accents": null,
54
  "tokenize_chinese_chars": true,
55
  "tokenizer_class": "BertTokenizer",
56
+ "truncation_side": "right",
57
+ "truncation_strategy": "longest_first",
58
  "unk_token": "[UNK]"
59
  }