Mojtaba aka Omid Rohanian commited on
Commit
77ba458
1 Parent(s): e72b7b0

DistilClinicalBERT model and tokeniser

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "clinical-models/models/DistilClinicalBERT/final/model",
3
  "adapters": {
4
  "adapters": {},
5
  "config_map": {},
@@ -24,7 +24,7 @@
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
26
  "torch_dtype": "float32",
27
- "transformers_version": "4.17.0",
28
  "type_vocab_size": 2,
29
  "use_cache": true,
30
  "vocab_size": 28996
 
1
  {
2
+ "_name_or_path": "/users/engs2263/clinical-models/models/DistilClinicalBERT/final/model",
3
  "adapters": {
4
  "adapters": {},
5
  "config_map": {},
 
24
  "pad_token_id": 0,
25
  "position_embedding_type": "absolute",
26
  "torch_dtype": "float32",
27
+ "transformers_version": "4.27.0.dev0",
28
  "type_vocab_size": 2,
29
  "use_cache": true,
30
  "vocab_size": 28996
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.27.0.dev0"
5
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b69e3f394ce0a53467a38f55a82a58721f418a4a22d002a0e1caf512cb3af8e
3
- size 263302891
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e1fa3286278cc79613b8c75480e44bc2a3fd0f883a743decc7bb2e12acbde44
3
+ size 263292011
special_tokens_map.json CHANGED
@@ -1 +1,7 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json CHANGED
@@ -1 +1,14 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-cased", "tokenizer_class": "BertTokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_lower_case": false,
4
+ "mask_token": "[MASK]",
5
+ "model_max_length": 512,
6
+ "name_or_path": "bert-base-cased",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "special_tokens_map_file": null,
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizer",
13
+ "unk_token": "[UNK]"
14
+ }