teknium commited on
Commit
4648d06
1 Parent(s): e6fbc0b

Fix for tokenization

Browse files
configuration_mixformer_sequential.py CHANGED
@@ -41,7 +41,7 @@ class MixFormerSequentialConfig(PretrainedConfig):
41
  pad_vocab_size_multiple: Optional[int] = 64,
42
  **kwargs
43
  ) -> None:
44
- self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
45
  self.n_positions = n_positions
46
  self.n_embd = n_embd
47
  self.n_layer = n_layer
 
41
  pad_vocab_size_multiple: Optional[int] = 64,
42
  **kwargs
43
  ) -> None:
44
+ self.vocab_size = vocab_size
45
  self.n_positions = n_positions
46
  self.n_embd = n_embd
47
  self.n_layer = n_layer