l3cube-pune commited on
Commit
18af84b
1 Parent(s): 1d9b7b9

Update model files

Browse files
Files changed (4) hide show
  1. pytorch_model.bin +1 -1
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +1 -1
  4. vocab.txt +0 -0
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0ecaaa8572710e00116b2f89f9be5cfbfa86aea4546191d4a133573d499876b
3
  size 438140139
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1707cc4191a23bc1057582f98d59248543173516afe86307453b00d71f48d926
3
  size 438140139
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "l3cube-pune/hing-bert", "tokenizer_class": "BertTokenizer"}
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "max_len": 512, "special_tokens_map_file": "/opt/ravindra.n/misc/models/BERT/special_tokens_map.json", "name_or_path": "l3cube-pune/hing-bert", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff