Ozan Oktay commited on
Commit
0fdcbd3
1 Parent(s): 8d4d877

add tokenizer

Browse files
configuration_cxrbert.py CHANGED
@@ -5,7 +5,7 @@
5
 
6
  from typing import Any
7
 
8
- from transformers import BertConfig
9
 
10
 
11
  class CXRBertConfig(BertConfig):
@@ -19,3 +19,9 @@ class CXRBertConfig(BertConfig):
19
  def __init__(self, projection_size: int = 128, **kwargs: Any) -> None:
20
  super().__init__(**kwargs)
21
  self.projection_size = projection_size
 
 
 
 
 
 
5
 
6
  from typing import Any
7
 
8
+ from transformers import BertConfig, BertTokenizer
9
 
10
 
11
  class CXRBertConfig(BertConfig):
19
  def __init__(self, projection_size: int = 128, **kwargs: Any) -> None:
20
  super().__init__(**kwargs)
21
  self.projection_size = projection_size
22
+
23
+
24
+ class CXRBertTokenizer(BertTokenizer):
25
+ def __init__(self, **kwargs: Any) -> None:
26
+ super().__init__(**kwargs)
27
+
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "name_or_path": "/tmp/hf_demo_may_11th", "special_tokens_map_file": "/mnt/batch/tasks/shared/LS_root/jobs/innereye4ws/azureml/jcxr_1645574625_747e8d7b/wd/azureml/JCXR_1645574625_747e8d7b/pretrained_models/pretrained_bert_models/pubmed_mimic_bert_base/special_tokens_map.json", "tokenizer_file": null, "tokenizer_class": "CXRBertTokenizer", "auto_map": {"AutoTokenizer": ["configuration_cxrbert.CXRBertTokenizer", null]}}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff