import numpy as np
from collections import Counter
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split


class NaiveBayes:
    def __init__(self, alpha=1.0):
        """
        初始化朴素贝叶斯分类器

        参数:
            alpha (float): 拉普拉斯平滑参数，默认值为1.0
        """
        self.alpha = alpha
        self.class_priors = {}
        self.feature_likelihoods = {}
        self.classes = []

    def fit(self, X, y):
        """
        训练朴素贝叶斯分类器

        参数:
            X (numpy.ndarray): 训练数据，二维数组，每行为一个样本
            y (numpy.ndarray): 标签，类别对应的数组
        """
        self.classes = np.unique(y)
        num_samples, num_features = X.shape

        # 计算先验概率 P(C)
        self.class_priors = {
            c: np.mean(y == c) for c in self.classes
        }

        # 计算条件概率 P(X|C)，使用拉普拉斯平滑避免零概率问题
        self.feature_likelihoods = {}
        for c in self.classes:
            X_c = X[y == c]  # 属于类别 c 的样本

            likelihoods = {}
            for feature_idx in range(num_features):
                feature_counts = Counter(X_c[:, feature_idx])

                # 对每个特征，计算其所有可能取值的频率，并应用拉普拉斯平滑
                unique_feature_values = len(feature_counts) + 1  # 加1是为了包括可能出现但未观察到的值
                smoothed_counts = {feat_val: count + self.alpha for feat_val, count in feature_counts.items()}
                total_count = X_c.shape[0] + self.alpha * unique_feature_values

                # 将每个特征值的概率存储起来
                likelihoods[feature_idx] = {feat_val: count / total_count for feat_val, count in
                                            smoothed_counts.items()}

                # 添加一个默认值，对应于未观察到的特征值
                likelihoods[feature_idx][None] = self.alpha / total_count

            self.feature_likelihoods[c] = likelihoods

    def predict(self, X):
        """
        使用训练好的模型对新数据进行预测

        参数:
            X (numpy.ndarray): 预测数据，二维数组

        返回:
            numpy.ndarray: 预测标签
        """
        predictions = []
        for sample in X:
            posteriors = {}
            for c in self.classes:
                # 计算后验概率 P(C|X) ∝ P(X|C) * P(C)，使用对数概率防止数值下溢
                posterior = np.log(self.class_priors[c])  # log(P(C))
                for feature_idx, feature_value in enumerate(sample):
                    likelihood = self.feature_likelihoods[c][feature_idx].get(feature_value,
                                                                              self.feature_likelihoods[c][feature_idx][
                                                                                  None])
                    posterior += np.log(likelihood)  # log(P(X|C))
                posteriors[c] = posterior
            # 选择概率最大的类别
            predictions.append(max(posteriors, key=posteriors.get))
        return np.array(predictions)


# 数据预处理和加载示例
def load_data():
    """
    加载并预处理20Newsgroups数据集

    返回:
        tuple: 包含训练集和测试集的数据和标签
    """
    # 加载20Newsgroups数据集，仅选择两个类别以简化问题
    newsgroups = fetch_20newsgroups(subset='all', categories=['sci.space', 'rec.autos'])
    vectorizer = CountVectorizer(binary=True, stop_words='english', max_features=1000)
    X = vectorizer.fit_transform(newsgroups.data).toarray()
    y = newsgroups.target
    return train_test_split(X, y, test_size=0.2, random_state=42)


# 实验运行
if __name__ == "__main__":
    # 加载数据
    X_train, X_test, y_train, y_test = load_data()

    # 创建并训练朴素贝叶斯模型
    model = NaiveBayes(alpha=1.0)
    model.fit(X_train, y_train)

    # 使用模型进行预测
    y_pred = model.predict(X_test)

    # 输出结果
    accuracy = np.mean(y_pred == y_test)
    print(f"测试集准确率: {accuracy:.4f}")