Fix for tokenization
Browse files
configuration_mixformer_sequential.py
CHANGED
@@ -41,7 +41,7 @@ class MixFormerSequentialConfig(PretrainedConfig):
|
|
41 |
pad_vocab_size_multiple: Optional[int] = 64,
|
42 |
**kwargs
|
43 |
) -> None:
|
44 |
-
self.vocab_size =
|
45 |
self.n_positions = n_positions
|
46 |
self.n_embd = n_embd
|
47 |
self.n_layer = n_layer
|
|
|
41 |
pad_vocab_size_multiple: Optional[int] = 64,
|
42 |
**kwargs
|
43 |
) -> None:
|
44 |
+
self.vocab_size = vocab_size
|
45 |
self.n_positions = n_positions
|
46 |
self.n_embd = n_embd
|
47 |
self.n_layer = n_layer
|