Den4ikAI commited on
Commit
6478c6d
1 Parent(s): 9c5731e

Upload 7 files

Browse files
config.json CHANGED
@@ -26,7 +26,7 @@
26
  "position_embedding_type": "absolute",
27
  "problem_type": "single_label_classification",
28
  "torch_dtype": "float32",
29
- "transformers_version": "4.25.0",
30
  "type_vocab_size": 2,
31
  "use_cache": true,
32
  "vocab_size": 119547
 
26
  "position_embedding_type": "absolute",
27
  "problem_type": "single_label_classification",
28
  "torch_dtype": "float32",
29
+ "transformers_version": "4.11.3",
30
  "type_vocab_size": 2,
31
  "use_cache": true,
32
  "vocab_size": 119547
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1884f8158a263e16a4deb9c6923f0d9960e8a36842eb4418f53b9a7781a5158
3
- size 711492725
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc3d8babc824fd3d4a269ba87136ae49a15e43e1835d176e4d8932e9aab29d1
3
+ size 711507288
special_tokens_map.json CHANGED
@@ -1,7 +1 @@
1
- {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
7
- }
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
 
 
 
 
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,16 +1 @@
1
- {
2
- "cls_token": "[CLS]",
3
- "do_basic_tokenize": true,
4
- "do_lower_case": false,
5
- "mask_token": "[MASK]",
6
- "model_max_length": 1000000000000000019884624838656,
7
- "name_or_path": "model",
8
- "never_split": null,
9
- "pad_token": "[PAD]",
10
- "sep_token": "[SEP]",
11
- "special_tokens_map_file": "model/special_tokens_map.json",
12
- "strip_accents": null,
13
- "tokenize_chinese_chars": true,
14
- "tokenizer_class": "BertTokenizer",
15
- "unk_token": "[UNK]"
16
- }
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "model_max_length": 1000000000000000019884624838656, "name_or_path": "model", "never_split": null, "special_tokens_map_file": "model/special_tokens_map.json", "tokenizer_class": "BertTokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ebe48ba780982a1473816f4be75d03af2811752653b87c0a640fd0fb71bb68a
3
- size 3323
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed40146613bf3b25ded55630bb9c78a5ec08cfd3c0268ce56b75d2d2600be6a6
3
+ size 2799