# encoding=utf-8
import nltk

# 加载示例文本
text = "The quick brown fox jumps over the lazy dog."

# 对文本进行分词
tokens = nltk.word_tokenize(text)

# 对分词后的文本进行词性标注
tagged = nltk.pos_tag(tokens)

# 输出词性标记
for word, tag in tagged:
    print(word, "->", tag)
