import nltk
from nltk.tokenize import word_tokenize

# 分词
text = "Hello, this is a sample text."
tokens = word_tokenize(text)
print(tokens)