import numpy as np
import re
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

class NaiveBayes:
    def __init__(self):
        # 存储类别先验概率 P(Y=c_k)
        self.prior_probs = {}
        # 存储类别-特征条件概率 P(X_i=x_i|Y=c_k)
        self.cond_probs = {}
        # 存储所有类别
        self.classes = None
        # 平滑系数（避免条件概率为0）
        self.alpha = 1.0

    def preprocess_text(self, text):
        """文本预处理：去除特殊字符、转为小写、分割为单词"""
        text = re.sub(r'[^a-zA-Z\s]', '', text).lower()  # 保留字母和空格，转小写
        return text.split()

    def build_vocab(self, texts):
        """构建词汇表（所有文本中的唯一单词）"""
        vocab = set()
        for text in texts:
            words = self.preprocess_text(text)
            vocab.update(words)
        return list(vocab)

    def fit(self, X_train, y_train, vocab):
        """训练模型：计算先验概率和条件概率"""
        self.classes = np.unique(y_train)
        n_total = len(X_train)  # 训练集总样本数
        vocab_size = len(vocab)  # 词汇表大小
        word_to_idx = {word: i for i, word in enumerate(vocab)}  # 单词到索引的映射

        # 1. 计算先验概率 P(Y=c_k)
        for c in self.classes:
            n_c = np.sum(y_train == c)  # 类别c的样本数
            self.prior_probs[c] = np.log((n_c + self.alpha) / (n_total + self.alpha * len(self.classes)))  # 对数平滑（避免下溢）

            # 2. 计算类别c下各单词的条件概率 P(X_i=1|Y=c)（假设单词出现为1，不出现为0）
            word_counts = np.zeros(vocab_size)  # 记录类别c中各单词出现次数
            total_words_c = 0  # 类别c中所有单词总出现次数
            for i, text in enumerate(X_train):
                if y_train[i] != c:
                    continue
                words = self.preprocess_text(text)
                total_words_c += len(words)
                for word in words:
                    if word in word_to_idx:
                        word_counts[word_to_idx[word]] += 1

            # 条件概率（对数形式，加平滑）
            self.cond_probs[c] = np.log((word_counts + self.alpha) / (total_words_c + self.alpha * vocab_size))

    def predict(self, X_test, vocab):
        """预测测试集类别"""
        predictions = []
        word_to_idx = {word: i for i, word in enumerate(vocab)}
        for text in X_test:
            words = self.preprocess_text(text)
            # 初始化各类别后验概率（先验概率）
            post_probs = {c: self.prior_probs[c] for c in self.classes}

            # 累加条件概率（因对数，乘法变加法）
            for c in self.classes:
                for word in words:
                    if word in word_to_idx:
                        post_probs[c] += self.cond_probs[c][word_to_idx[word]]

            # 选择后验概率最大的类别
            predictions.append(max(post_probs, key=post_probs.get))
        return predictions

# 测试代码
if __name__ == "__main__":
    # 加载20Newsgroups数据集（选择4个类别简化任务）
    newsgroups = fetch_20newsgroups(subset='all', categories=['sci.space', 'comp.graphics', 'rec.sport.baseball', 'talk.politics.misc'], remove=('headers', 'footers', 'quotes'))
    X = newsgroups.data  # 文本数据
    y = newsgroups.target  # 类别标签
    target_names = newsgroups.target_names

    # 构建词汇表、划分训练集和测试集
    vocab = NaiveBayes().build_vocab(X)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 训练并预测
    nb_model = NaiveBayes()
    nb_model.fit(X_train, y_train, vocab)
    y_pred = nb_model.predict(X_test, vocab)

    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    print(f"朴素贝叶斯分类准确率：{accuracy:.4f}")
    print(f"类别映射：{dict(enumerate(target_names))}")