#数据预处理
import nltk
import string
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer

# 下载nltk的必要数据
nltk.download('D:\\test.csv')

# 预处理函数
def preprocess_text(text):
    # 1. 转小写
    text = text.lower()

    # 2. 去除标点符号
    text = ''.join([char for char in text if char not in string.punctuation])

    # 3. 分词
    words = word_tokenize(text)

    # 4. 去除停用词
    stop_words = set(stopwords.words('english'))
    words = [word for word in words if word not in stop_words]

    # 5. 词干提取
    stemmer = PorterStemmer()
    words = [stemmer.stem(word) for word in words]

    return words


# 示例文本
text = "NLTK is a leading platform for building Python programs to work with human language data."

# 调用预处理函数
preprocessed_text = preprocess_text(text)

print("原始文本:", text)
print("预处理后的文本:", preprocessed_text)
