from transformers import AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")

raw_input = [
    "I love kobe bryant.",
    "Me too."
]

inputs = tokenizer(raw_input, padding=True, return_tensors="pt")
print("After tokennizer: \n", inputs)

str1 = tokenizer.decode(inputs['input_ids'][0])
print("str1\n", str1)

str2 = tokenizer.decode(inputs['input_ids'][1])
print("str2\n", str2)

print("All done!")