muntasir2000 commited on
Commit
10f8197
1 Parent(s): ad15617

Update tokenization_bn.py

Browse files
Files changed (1) hide show
  1. tokenization_bn.py +2 -1
tokenization_bn.py CHANGED
@@ -48,10 +48,11 @@ class BNTokenizer(PreTrainedTokenizer):
48
 
49
  def __init__(self, vocab_file, bos_token=None, eos_token='</s>', unk_token='<unk>', pad_token='<|reserved001|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
50
  self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
51
- super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
52
  self.vocab_file = vocab_file
53
  self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
54
  self.sp_model.Load(vocab_file)
 
 
55
 
56
  @property
57
  def vocab_size(self):
 
48
 
49
  def __init__(self, vocab_file, bos_token=None, eos_token='</s>', unk_token='<unk>', pad_token='<|reserved001|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
50
  self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
 
51
  self.vocab_file = vocab_file
52
  self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
53
  self.sp_model.Load(vocab_file)
54
+ super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
55
+
56
 
57
  @property
58
  def vocab_size(self):