import os
import re
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
from sklearn.decomposition import TruncatedSVD
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import numpy as np
import sys
sys.path.append('my_module_path')
# 加载数据
def load_20newsgroups_data(data_dir):
    data = []
    labels = []
    label_names = sorted(os.listdir(data_dir))  # 获取类别目录
    for label_index, label_name in enumerate(label_names):
        label_dir = os.path.join(data_dir, label_name)
        if os.path.isdir(label_dir):
            for file_name in os.listdir(label_dir):
                file_path = os.path.join(label_dir, file_name)
                try:
                    with open(file_path, 'r', encoding='latin1') as file:
                        data.append(file.read())
                        labels.append(label_index)
                except Exception as e:
                    print(f"Error reading file {file_path}: {e}")
    return data, labels, label_names

# 文本预处理
def preprocess_text(text, stop_words=None, stemmer=None):
    # 转为小写
    text = text.lower()
    # 去除标点符号和多余空格
    text = re.sub(r'[^\w\s]', '', text)
    text = re.sub(r'\s+', ' ', text)
    # 去除停用词
    if stop_words:
        text = " ".join([word for word in text.split() if word not in stop_words])
    # 应用词干提取
    if stemmer:
        text = " ".join([stemmer.stem(word) for word in text.split()])
    return text

# K-Means 训练和预测
def train_kmeans(X_train, n_clusters, n_init=20):
    model = KMeans(n_clusters=n_clusters, init='k-means++', n_init=n_init, random_state=42)
    model.fit(X_train)
    return model

# 对聚类结果重新映射，使其与实际标签更接近
def map_clusters_to_labels(y_true, y_pred, n_clusters):
    label_map = {}
    for cluster in range(n_clusters):
        true_labels = [y_true[i] for i in range(len(y_pred)) if y_pred[i] == cluster]
        if true_labels:
            most_common_label = Counter(true_labels).most_common(1)[0][0]
            label_map[cluster] = most_common_label
    mapped_labels = [label_map[cluster] for cluster in y_pred]
    return mapped_labels

# 主函数
def main():
    # 加载训练集
    train_dir = r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-train'  # 替换为训练集的实际路径
    train_data, train_labels, label_names = load_20newsgroups_data(train_dir)

    # 加载测试集
    test_dir =r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-test'  # 替换为测试集的实际路径
    test_data, test_labels, _ = load_20newsgroups_data(test_dir)

    # 停用词与词干提取器
    stop_words = set(stopwords.words('english'))
    stemmer = PorterStemmer()

    # 文本预处理
    train_data_preprocessed = [preprocess_text(doc, stop_words=stop_words, stemmer=stemmer) for doc in train_data]
    test_data_preprocessed = [preprocess_text(doc, stop_words=stop_words, stemmer=stemmer) for doc in test_data]

    # 使用 TF-IDF 提取特征
    vectorizer = TfidfVectorizer(max_features=10000, ngram_range=(1, 2))
    X_train = vectorizer.fit_transform(train_data_preprocessed)
    X_test = vectorizer.transform(test_data_preprocessed)

    # 降维
    svd = TruncatedSVD(n_components=300, random_state=42)
    X_train_reduced = svd.fit_transform(X_train)
    X_test_reduced = svd.transform(X_test)

    # 使用 K-Means 进行聚类
    n_clusters = len(label_names)  # 聚类数目与类别数目一致
    kmeans = train_kmeans(X_train_reduced, n_clusters, n_init=50)

    # 测试模型并重新映射聚类标签
    train_predictions = kmeans.predict(X_train_reduced)
    test_predictions = kmeans.predict(X_test_reduced)

    # 映射聚类结果到实际标签
    train_mapped_labels = map_clusters_to_labels(train_labels, train_predictions, n_clusters)
    test_mapped_labels = map_clusters_to_labels(test_labels, test_predictions, n_clusters)

    # 输出分类结果
    print("分类结果：")
    for i, (prediction, actual) in enumerate(zip(test_mapped_labels, test_labels)):
        print(f"样本 {i + 1}: 实际类别 = {label_names[actual]}, 预测类别 = {label_names[prediction]}")

    # 输出分类准确率
    accuracy = accuracy_score(test_labels, test_mapped_labels)
    print(f"\n分类准确率：{accuracy:.2%}")


if __name__ == "__main__":
    import nltk
    nltk.download('stopwords')  # 下载停用词列表
    main()
