﻿import nltk
import string
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer

# 示例文本
text = "Natural language processing (NLP) is a field of computer science, artificial intelligence, and linguistics. It's concerned with the interactions between computers and human languages. NLTK is a leading platform for building Python programs to work with human language data."

print("原始文本:")
print(text)

# 1. 句子分词
sentences = sent_tokenize(text)
print("\n1. 句子分词:")
for i, sent in enumerate(sentences):
    print(f"句子 {i+1}: {sent}")

# 2. 单词分词
words = word_tokenize(text)
print("\n2. 单词分词:")
print(words)

# 3. 转换为小写
words_lower = [word.lower() for word in words]
print("\n3. 转换为小写:")
print(words_lower)

# 4. 移除标点符号
words_no_punct = [word for word in words_lower if word not in string.punctuation]
print("\n4. 移除标点符号:")
print(words_no_punct)

# 5. 移除停用词
stop_words = set(stopwords.words('english'))
words_no_stop = [word for word in words_no_punct if word not in stop_words]
print("\n5. 移除停用词:")
print(words_no_stop)

# 6. 词干提取
stemmer = PorterStemmer()
words_stemmed = [stemmer.stem(word) for word in words_no_stop]
print("\n6. 词干提取:")
print(words_stemmed)

# 7. 词形还原
lemmatizer = WordNetLemmatizer()
words_lemmatized = [lemmatizer.lemmatize(word) for word in words_no_stop]
print("\n7. 词形还原:")
print(words_lemmatized)

# 8. 完整的文本预处理流程
def preprocess_text(text):
    # 分词
    words = word_tokenize(text)
    # 转换为小写
    words = [word.lower() for word in words]
    # 移除标点符号
    words = [word for word in words if word not in string.punctuation]
    # 移除停用词
    stop_words = set(stopwords.words('english'))
    words = [word for word in words if word not in stop_words]
    # 词形还原
    lemmatizer = WordNetLemmatizer()
    words = [lemmatizer.lemmatize(word) for word in words]
    return words

processed_text = preprocess_text(text)
print("\n8. 完整的预处理结果:")
print(processed_text)