juanreno2012 commited on
Commit
feaac7a
1 Parent(s): 99e6ce5

End of training

Browse files
README.md CHANGED
@@ -47,4 +47,4 @@ The following hyperparameters were used during training:
47
  - Transformers 4.34.0
48
  - Pytorch 2.0.1+cu118
49
  - Datasets 2.14.5
50
- - Tokenizers 0.14.0
 
47
  - Transformers 4.34.0
48
  - Pytorch 2.0.1+cu118
49
  - Datasets 2.14.5
50
+ - Tokenizers 0.14.1
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ec19413129a81c54a869d1c34681df12cb14b6c5705aa0d525b9baf79a2bd7f
3
  size 437997617
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:878c385d24a1be17174212be0c4034afaab1cce39ef489ffc4bb98b684160836
3
  size 437997617
special_tokens_map.json CHANGED
@@ -1,11 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "[PAD]",
4
- "[UNK]",
5
- "[CLS]",
6
- "[SEP]",
7
- "[MASK]"
8
- ],
9
  "cls_token": "[CLS]",
10
  "mask_token": "[MASK]",
11
  "pad_token": "[PAD]",
 
1
  {
 
 
 
 
 
 
 
2
  "cls_token": "[CLS]",
3
  "mask_token": "[MASK]",
4
  "pad_token": "[PAD]",
tokenizer.json CHANGED
@@ -1,14 +1,7 @@
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
- "padding": {
5
- "strategy": "BatchLongest",
6
- "direction": "Right",
7
- "pad_to_multiple_of": null,
8
- "pad_id": 0,
9
- "pad_type_id": 0,
10
- "pad_token": "[PAD]"
11
- },
12
  "added_tokens": [
13
  {
14
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
tokenizer_config.json CHANGED
@@ -41,29 +41,16 @@
41
  "special": true
42
  }
43
  },
44
- "additional_special_tokens": [
45
- "[PAD]",
46
- "[UNK]",
47
- "[CLS]",
48
- "[SEP]",
49
- "[MASK]"
50
- ],
51
  "clean_up_tokenization_spaces": true,
52
  "cls_token": "[CLS]",
53
  "do_lower_case": true,
54
  "mask_token": "[MASK]",
55
- "max_length": 512,
56
  "model_max_length": 512,
57
- "pad_to_multiple_of": null,
58
  "pad_token": "[PAD]",
59
- "pad_token_type_id": 0,
60
- "padding_side": "right",
61
  "sep_token": "[SEP]",
62
- "stride": 0,
63
  "strip_accents": null,
64
  "tokenize_chinese_chars": true,
65
  "tokenizer_class": "BertTokenizer",
66
- "truncation_side": "right",
67
- "truncation_strategy": "longest_first",
68
  "unk_token": "[UNK]"
69
  }
 
41
  "special": true
42
  }
43
  },
44
+ "additional_special_tokens": [],
 
 
 
 
 
 
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_lower_case": true,
48
  "mask_token": "[MASK]",
 
49
  "model_max_length": 512,
 
50
  "pad_token": "[PAD]",
 
 
51
  "sep_token": "[SEP]",
 
52
  "strip_accents": null,
53
  "tokenize_chinese_chars": true,
54
  "tokenizer_class": "BertTokenizer",
 
 
55
  "unk_token": "[UNK]"
56
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e955b8157fb8fb9e5f27704d15e0648eca4dc4ff8d1c9bac9b629385af66b9e
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f532a6011f5da796b3002cb53c290b57dc0de862f3ef9333a33f9ba113f2f1
3
  size 4091