import numpy as np
import os
import re
from collections import defaultdict


# 数据预处理：加载20Newsgroups数据集
def load_20newsgroups(data_path, categories=None, remove_stopwords=True):
    documents = []
    labels = []
    label_map = {}
    if categories is None:
        # 自动获取所有类别文件夹（如果未指定categories）
        categories = [d for d in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, d))]
    for idx, cat in enumerate(categories):
        label_map[cat] = idx
        cat_path = os.path.join(data_path, cat)
        # 遍历类别文件夹下的所有文件
        for file in os.listdir(cat_path):
            file_path = os.path.join(cat_path, file)
            with open(file_path, 'r', encoding='latin-1') as f:
                content = f.read()
                # 文本预处理：去除邮件头、引用和特殊字符
                content = re.sub(r'From:.*?\n', '', content)  # 去除发件人信息
                content = re.sub(r'Re:.*?\n', '', content)  # 去除回复信息
                content = re.sub(r'\n+', ' ', content)  # 换行符转为空格
                content = re.sub(r'[^a-zA-Z\s]', '', content).lower()  # 保留字母和空格，转为小写
                words = content.split()
                # 去除停用词和短单词
                if remove_stopwords:
                    stopwords = set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at',
                                     'to', 'for', 'of', 'with', 'is', 'are', 'was', 'were',
                                     'it', 'this', 'that', 'these', 'those', 'i', 'you', 'he'])
                    words = [w for w in words if w not in stopwords and len(w) > 2]
                documents.append(words)
                labels.append(idx)
    return documents, labels, label_map


# 朴素贝叶斯分类器
class NaiveBayesClassifier:
    def __init__(self, alpha=1.0):
        self.alpha = alpha  # 拉普拉斯平滑系数
        self.class_prior = {}  # 类先验概率（对数形式）
        self.word_count = {}  # 每个类中各单词的计数
        self.vocab = set()  # 词汇表
        self.class_total_words = {}  # 每个类的总单词数
        self.num_classes = 0

    # 训练模型
    def fit(self, X, y):
        # 计算类先验概率 P(Y)
        class_counts = defaultdict(int)
        for label in y:
            class_counts[label] += 1
        self.num_classes = len(class_counts)
        total_samples = len(y)
        for cls, count in class_counts.items():
            self.class_prior[cls] = np.log(count / total_samples)  # 对数化避免数值下溢

        # 统计每个类中单词的出现次数
        self.word_count = defaultdict(lambda: defaultdict(int))  # 嵌套字典：类别->单词->计数
        self.class_total_words = defaultdict(int)  # 每个类的总单词数
        for doc, label in zip(X, y):
            for word in doc:
                self.word_count[label][word] += 1
                self.vocab.add(word)  # 构建词汇表
                self.class_total_words[label] += 1

    # 预测单个样本
    def predict(self, x):
        max_log_prob = -np.inf
        best_cls = None
        vocab_size = len(self.vocab)
        # 遍历每个类别，计算后验概率对数
        for cls in self.class_prior:
            log_prob = self.class_prior[cls]  # 初始化为先验概率
            total_words = self.class_total_words[cls]
            # 累加每个单词的条件概率对数
            for word in x:
                # 拉普拉斯平滑：避免零概率问题
                word_cnt = self.word_count[cls].get(word, 0)
                log_prob += np.log((word_cnt + self.alpha) / (total_words + self.alpha * vocab_size))
            # 记录最大概率对应的类别
            if log_prob > max_log_prob:
                max_log_prob = log_prob
                best_cls = cls
        return best_cls

    # 计算模型准确率
    def score(self, X_test, y_test):
        correct = 0
        for x, y in zip(X_test, y_test):
            if self.predict(x) == y:
                correct += 1
        return correct / len(y_test)


# 数据集划分（留出法）
def train_test_split(X, y, test_size=0.2, random_state=42):
    np.random.seed(random_state)  # 固定随机种子，保证结果可复现
    indices = np.arange(len(X))
    np.random.shuffle(indices)  # 打乱索引
    test_idx = int(len(X) * test_size)  # 测试集大小
    # 划分训练集和测试集
    X_train = [X[i] for i in indices[test_idx:]]
    y_train = [y[i] for i in indices[test_idx:]]
    X_test = [X[i] for i in indices[:test_idx]]
    y_test = [y[i] for i in indices[:test_idx]]
    return X_train, X_test, y_train, y_test


# 主函数
if __name__ == "__main__":
    # 数据集路径（已修改为你的路径：E:\DATA\20_newsgroups）
    data_path = r"E:\DATA\20_newsgroups"  # 确保该路径下有sci.space等子文件夹
    # 加载指定类别的数据
    categories = ['sci.space', 'comp.graphics', 'rec.sport.baseball']
    X, y, label_map = load_20newsgroups(data_path, categories=categories)

    # 划分训练集和测试集（8:2）
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 训练朴素贝叶斯模型
    nb = NaiveBayesClassifier(alpha=1.0)  # 拉普拉斯平滑系数为1
    nb.fit(X_train, y_train)

    # 评估模型
    accuracy = nb.score(X_test, y_test)
    print(f"类别映射: {label_map}")
    print(f"朴素贝叶斯分类准确率：{accuracy:.4f}")