import os
import numpy as np
from collections import Counter

# 朴素贝叶斯算法实现
class NaiveBayes:
    def __init__(self):
        self.class_priors = {}
        self.feature_likelihoods = {}
        self.classes = []

    def fit(self, X, y):
        """
        X: 训练数据，二维数组，每行为一个样本
        y: 标签，类别对应的数组
        """
        self.classes = np.unique(y)
        num_samples, num_features = X.shape

        # 计算先验概率 P(C)
        self.class_priors = {
            c: np.mean(y == c) for c in self.classes
        }

        # 计算条件概率 P(X|C)
        self.feature_likelihoods = {}
        for c in self.classes:
            X_c = X[y == c]  # 属于类别 c 的样本
            likelihoods = {
                feature_idx: Counter(X_c[:, feature_idx])
                for feature_idx in range(num_features)
            }
            self.feature_likelihoods[c] = likelihoods

    def predict(self, X):
        """
        X: 预测数据，二维数组
        返回预测标签
        """
        predictions = []
        for sample in X:
            posteriors = {}
            for c in self.classes:
                # 计算后验概率 P(C|X) \u221d P(X|C) * P(C)
                posterior = np.log(self.class_priors[c])  # log(P(C))
                for feature_idx, feature_value in enumerate(sample):
                    likelihood = self.feature_likelihoods[c][feature_idx].get(feature_value, 1e-6)
                    posterior += np.log(likelihood)  # log(P(X|C))
                posteriors[c] = posterior
            # 选择概率最大的类别
            predictions.append(max(posteriors, key=posteriors.get))
        return np.array(predictions)

# 数据预处理和加载示例
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split

def load_data():
    # 加载20Newsgroups数据集
    newsgroups = fetch_20newsgroups(subset='all', categories=['sci.space', 'rec.autos'])
    vectorizer = CountVectorizer(binary=True, stop_words='english', max_features=1000)
    X = vectorizer.fit_transform(newsgroups.data).toarray()
    y = newsgroups.target
    return train_test_split(X, y, test_size=0.2, random_state=42)

# 实验运行
if __name__ == "__main__":
    X_train, X_test, y_train, y_test = load_data()
    model = NaiveBayes()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)

    # 输出结果
    accuracy = np.mean(y_pred == y_test)
    print(f"测试集准确率: {accuracy:.4f}")
