Xenova HF staff commited on
Commit
08a308f
1 Parent(s): 95d8d8c

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./models/pytorch/all-MiniLM-L6-v2",
3
  "architectures": [
4
  "BertModel"
5
  ],
@@ -18,7 +18,7 @@
18
  "num_hidden_layers": 6,
19
  "pad_token_id": 0,
20
  "position_embedding_type": "absolute",
21
- "transformers_version": "4.26.1",
22
  "type_vocab_size": 2,
23
  "use_cache": true,
24
  "vocab_size": 30522
 
1
  {
2
+ "_name_or_path": "sentence-transformers/all-MiniLM-L6-v2",
3
  "architectures": [
4
  "BertModel"
5
  ],
 
18
  "num_hidden_layers": 6,
19
  "pad_token_id": 0,
20
  "position_embedding_type": "absolute",
21
+ "transformers_version": "4.29.2",
22
  "type_vocab_size": 2,
23
  "use_cache": true,
24
  "vocab_size": 30522
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:759c3cd2b7fe7e93933ad23c4c9181b7396442a2ed746ec7c1d46192c469c46e
3
+ size 90387606
onnx/model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9fe0b21bb68db1b9ac66363294bd820dda3a7c45e7276bb9fe1692cdb17e9b8
3
- size 23583315
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f9a2cd8a5955f62908d5087be47516e9d91849f50579c3e47c73fd2c563b224
3
+ size 22868510
tokenizer.json CHANGED
@@ -19,48 +19,48 @@
19
  "added_tokens": [
20
  {
21
  "id": 0,
22
- "special": true,
23
  "content": "[PAD]",
24
  "single_word": false,
25
  "lstrip": false,
26
  "rstrip": false,
27
- "normalized": false
 
28
  },
29
  {
30
  "id": 100,
31
- "special": true,
32
  "content": "[UNK]",
33
  "single_word": false,
34
  "lstrip": false,
35
  "rstrip": false,
36
- "normalized": false
 
37
  },
38
  {
39
  "id": 101,
40
- "special": true,
41
  "content": "[CLS]",
42
  "single_word": false,
43
  "lstrip": false,
44
  "rstrip": false,
45
- "normalized": false
 
46
  },
47
  {
48
  "id": 102,
49
- "special": true,
50
  "content": "[SEP]",
51
  "single_word": false,
52
  "lstrip": false,
53
  "rstrip": false,
54
- "normalized": false
 
55
  },
56
  {
57
  "id": 103,
58
- "special": true,
59
  "content": "[MASK]",
60
  "single_word": false,
61
  "lstrip": false,
62
  "rstrip": false,
63
- "normalized": false
 
64
  }
65
  ],
66
  "normalizer": {
 
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
22
  "content": "[PAD]",
23
  "single_word": false,
24
  "lstrip": false,
25
  "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
  },
29
  {
30
  "id": 100,
 
31
  "content": "[UNK]",
32
  "single_word": false,
33
  "lstrip": false,
34
  "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
  },
38
  {
39
  "id": 101,
 
40
  "content": "[CLS]",
41
  "single_word": false,
42
  "lstrip": false,
43
  "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
  },
47
  {
48
  "id": 102,
 
49
  "content": "[SEP]",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
  },
56
  {
57
  "id": 103,
 
58
  "content": "[MASK]",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
62
+ "normalized": false,
63
+ "special": true
64
  }
65
  ],
66
  "normalizer": {
tokenizer_config.json CHANGED
@@ -1,14 +1,13 @@
1
  {
 
2
  "cls_token": "[CLS]",
3
  "do_basic_tokenize": true,
4
  "do_lower_case": true,
5
  "mask_token": "[MASK]",
6
  "model_max_length": 512,
7
- "name_or_path": "./models/pytorch/all-MiniLM-L6-v2",
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
- "special_tokens_map_file": null,
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
 
1
  {
2
+ "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
  "do_basic_tokenize": true,
5
  "do_lower_case": true,
6
  "mask_token": "[MASK]",
7
  "model_max_length": 512,
 
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
 
11
  "strip_accents": null,
12
  "tokenize_chinese_chars": true,
13
  "tokenizer_class": "BertTokenizer",