{ "tokenizer_class": "GPT2Tokenizer", "vocab_size": 50257, // Match this with Mamba's vocab size if needed "padding_side": "right", "special_tokens_map_file": null, "model_max_length": 1024 // Define based on the sequence length your model supports }