Vrushali commited on
Commit
554a235
1 Parent(s): b26dc32

End of training

Browse files
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e4c70fffd80278ceeb6587ed19ed4deb56c0f151ef5510835fb86c1c2739ebf
3
  size 433433785
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e70ee2ac51085424903384fc7b8872a2a56ac4a469149eb063d0d09eff27af1
3
  size 433433785
runs/Jun24_18-37-59_9fd756db7ec4/events.out.tfevents.1687631883.9fd756db7ec4.6788.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:528d9aea7c0ce81013441f502b497afcccb0e3bd830784bc02cdd3d31feb8e35
3
+ size 5137
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[MASK]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2ab81d2b389613e116412df7e0e73b6747a81ca04e2fd0110acf001eb4ad189
3
  size 3899
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b237ae0f6f77331e84db23ce73a3e4a0b30158450ccdcabe495a1a8966e7c6b9
3
  size 3899
vocab.txt ADDED
The diff for this file is too large to render. See raw diff