import nltk
nltk.download('popular')  # 下载常用数据包
nltk.download('punkt_tab')

from nltk.tokenize import word_tokenize, sent_tokenize

text = "Hello Mr. Smith, how are you doing today? The weather is great, and Python is awesome."

# 句子分词
sentences = sent_tokenize(text)
print("句子分词:", sentences)