setu4993 commited on
Commit
a0a8b3c
1 Parent(s): abd4e32

Re-export from latest version of `transformers`

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/Users/setu/Downloads/smaller-labse-tf/converted-hf",
3
  "architectures": [
4
  "BertModel"
5
  ],
@@ -19,7 +19,7 @@
19
  "pad_token_id": 0,
20
  "position_embedding_type": "absolute",
21
  "torch_dtype": "float32",
22
- "transformers_version": "4.12.5",
23
  "type_vocab_size": 2,
24
  "use_cache": true,
25
  "vocab_size": 173347
1
  {
2
+ "_name_or_path": "/Users/setu/Models/huggingface/setu4993/smaller-LaBSE",
3
  "architectures": [
4
  "BertModel"
5
  ],
19
  "pad_token_id": 0,
20
  "position_embedding_type": "absolute",
21
  "torch_dtype": "float32",
22
+ "transformers_version": "4.25.1",
23
  "type_vocab_size": 2,
24
  "use_cache": true,
25
  "vocab_size": 173347
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8914f61e60ee24a18b5d2b4094efede534a9bdb7a1adae2c88e8a9787e8bd20d
3
- size 876737057
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86df18d5ac34c03f0da7b6e205fc6f0a8948553ec6e8f06ef6f9382bc5183ecc
3
+ size 876755757
special_tokens_map.json CHANGED
@@ -1 +1,7 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc6d0eb6a5f1e3797b4ca2a2e2f292004bf373ac1ca7ce7f48aa11dcce15cd86
3
- size 876954624
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60659643bb43515153f71438e37b06dfecf740e45865db6455cbe0e7023ad0a5
3
+ size 876954600
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
tokenizer_config.json CHANGED
@@ -1 +1,14 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "never_split": null,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizer",
13
+ "unk_token": "[UNK]"
14
+ }