import os
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt


plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# 朴素贝叶斯算法实现
class NaiveBayes:
    def __init__(self):
        self.class_priors = {}
        self.feature_likelihoods = {}
        self.classes = []

    def fit(self, X, y):
        self.classes = np.unique(y)
        num_samples, num_features = X.shape

        # 计算先验概率 P(C)
        self.class_priors = {
            c: np.mean(y == c) for c in self.classes
        }

        # 计算条件概率 P(X|C)
        self.feature_likelihoods = {}
        for c in self.classes:
            X_c = X[y == c]  # 属于类别 c 的样本
            likelihoods = {
                feature_idx: Counter(X_c[:, feature_idx])
                for feature_idx in range(num_features)
            }
            self.feature_likelihoods[c] = likelihoods

    def predict(self, X):
        predictions = []
        for sample in X:
            posteriors = {}
            for c in self.classes:
                # 计算后验概率 P(C|X) \u221d P(X|C) * P(C)
                posterior = np.log(self.class_priors[c])  # log(P(C))
                for feature_idx, feature_value in enumerate(sample):
                    likelihood = self.feature_likelihoods[c][feature_idx].get(feature_value, 1e-6)
                    posterior += np.log(likelihood)  # log(P(X|C))
                posteriors[c] = posterior
            # 选择概率最大的类别
            predictions.append(max(posteriors, key=posteriors.get))
        return np.array(predictions)


# 数据预处理和加载示例
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split


def load_data():
    # 加载20Newsgroups数据集
    newsgroups = fetch_20newsgroups(subset='all', categories=['sci.space', 'rec.autos'])
    vectorizer = CountVectorizer(binary=True, stop_words='english', max_features=1000)
    X = vectorizer.fit_transform(newsgroups.data).toarray()
    y = newsgroups.target
    return train_test_split(X, y, test_size=0.2, random_state=42)


# 实验运行
if __name__ == "__main__":
    X_train, X_test, y_train, y_test = load_data()
    model = NaiveBayes()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)

    # 计算并输出整体测试集准确率
    accuracy = np.mean(y_pred == y_test)
    print(f"测试集准确率: {accuracy:.4f}")

    # 分别统计每个类别预测正确的样本数量
    correct_count_per_class = {c: 0 for c in model.classes}
    for pred, true in zip(y_pred, y_test):
        if pred == true:
            correct_count_per_class[true] += 1

    # 计算并输出每个类别的预测准确率（转换为百分数形式）
    class_accuracies = {}
    for c in model.classes:
        class_accuracy = correct_count_per_class[c] / np.sum(y_test == c)
        class_accuracies[c] = class_accuracy * 100
        print(f"类别 {c} 的预测准确率: {class_accuracy * 100:.2f}%")


    # 可视化展示每个类别的预测准确率
    plt.bar(class_accuracies.keys(), class_accuracies.values())
    plt.xlabel('类别')
    plt.ylabel('预测准确率（%）')
    plt.title('朴素贝叶斯分类器各类别预测准确率')
    plt.show()