from transformers import AutoTokenizer

checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"

tokenizer = AutoTokenizer.from_pretrained('D:/ai/huggingface-models/distilbert-base-uncased-finetuned-sst-2-english')

sequences = ["i've been waiting for a huggingface course for too long", "so have i!"]

model_inputs = tokenizer(sequences, padding="max_length", max_length=10)
print(model_inputs)
model_inputs = tokenizer(sequences, padding=True, max_length=10, truncation=True)
print(model_inputs)
