Xenova HF staff commited on
Commit
18f886d
1 Parent(s): 55e4c37

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "distilbert-base-uncased-distilled-squad",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForQuestionAnswering"
@@ -13,11 +13,12 @@
13
  "model_type": "distilbert",
14
  "n_heads": 12,
15
  "n_layers": 6,
 
16
  "pad_token_id": 0,
17
  "qa_dropout": 0.1,
18
  "seq_classif_dropout": 0.2,
19
- "sinusoidal_pos_embds": false,
20
  "tie_weights_": true,
21
- "transformers_version": "4.26.1",
22
- "vocab_size": 30522
23
  }
 
1
  {
2
+ "_name_or_path": "distilbert-base-cased-distilled-squad",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForQuestionAnswering"
 
13
  "model_type": "distilbert",
14
  "n_heads": 12,
15
  "n_layers": 6,
16
+ "output_past": true,
17
  "pad_token_id": 0,
18
  "qa_dropout": 0.1,
19
  "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": true,
21
  "tie_weights_": true,
22
+ "transformers_version": "4.29.2",
23
+ "vocab_size": 28996
24
  }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02086cd2f9c4d720b1ace2861ff1b6785bf4d2b58aa7c6896a5a08c37c527d62
3
+ size 260905268
onnx/model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22f2b002449269feea05c5675b9c387b423a2154dedb2c9d127e6ffdeef7004a
3
- size 67004982
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6d7780509ef483184c2a6bcc80e447f8914866f1f4cdaf2c5f565cf9a830af1
3
+ size 65604078
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,12 +1,11 @@
1
  {
 
2
  "cls_token": "[CLS]",
3
- "do_lower_case": true,
4
  "mask_token": "[MASK]",
5
  "model_max_length": 512,
6
- "name_or_path": "distilbert-base-uncased-distilled-squad",
7
  "pad_token": "[PAD]",
8
  "sep_token": "[SEP]",
9
- "special_tokens_map_file": null,
10
  "strip_accents": null,
11
  "tokenize_chinese_chars": true,
12
  "tokenizer_class": "DistilBertTokenizer",
 
1
  {
2
+ "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
  "mask_token": "[MASK]",
6
  "model_max_length": 512,
 
7
  "pad_token": "[PAD]",
8
  "sep_token": "[SEP]",
 
9
  "strip_accents": null,
10
  "tokenize_chinese_chars": true,
11
  "tokenizer_class": "DistilBertTokenizer",
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff