yubinH commited on
Commit
d19e47d
1 Parent(s): 4710848

End of training

Browse files
all_results.json CHANGED
@@ -1 +1 @@
1
- {"eval_accuracy": 0.9544699235626454}
 
1
+ {"eval_accuracy": 0.9584579594549685}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "bert-base-chinese",
3
  "architectures": [
4
  "BertForMultipleChoice"
5
  ],
 
1
  {
2
+ "_name_or_path": "hfl/chinese-lert-base",
3
  "architectures": [
4
  "BertForMultipleChoice"
5
  ],
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8290d7bb2b51315292d7770a6ab71d707db04fb0b06671824223310d95ad548
3
  size 409142510
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c7b01c94b48c5612ec1e4d0666c567a10c5cdd6b63afdd357c34b9d434eb8e1
3
  size 409142510
tokenizer.json CHANGED
@@ -59,7 +59,7 @@
59
  "clean_text": true,
60
  "handle_chinese_chars": true,
61
  "strip_accents": null,
62
- "lowercase": false
63
  },
64
  "pre_tokenizer": {
65
  "type": "BertPreTokenizer"
 
59
  "clean_text": true,
60
  "handle_chinese_chars": true,
61
  "strip_accents": null,
62
+ "lowercase": true
63
  },
64
  "pre_tokenizer": {
65
  "type": "BertPreTokenizer"
tokenizer_config.json CHANGED
@@ -44,9 +44,9 @@
44
  "additional_special_tokens": [],
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
- "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
- "model_max_length": 512,
50
  "pad_token": "[PAD]",
51
  "sep_token": "[SEP]",
52
  "strip_accents": null,
 
44
  "additional_special_tokens": [],
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
  "mask_token": "[MASK]",
49
+ "model_max_length": 1000000000000000019884624838656,
50
  "pad_token": "[PAD]",
51
  "sep_token": "[SEP]",
52
  "strip_accents": null,