import numpy as np
from collections import Counter
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

# 设置matplotlib显示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 重构的朴素贝叶斯分类器类
class CustomNaiveBayesClassifier:
    def __init__(self):
        self._priors = {}
        self._likelihoods = {}
        self._categories = []

    def train(self, features, labels):
        # 初始化类别列表
        self._categories = np.unique(labels)
        num_of_categories = len(self._categories)

        # 计算每个类别的先验概率 P(C)
        for category in self._categories:
            self._priors[category] = (labels == category).mean()

        # 计算条件概率 P(feature|C)
        for category in self._categories:
            category_features = features[labels == category]
            feature_probabilities = {}
            for idx in range(category_features.shape[1]):
                count = Counter(category_features[:, idx])
                total_count = sum(count.values())
                probabilities = {feat: (cnt + 1) / (total_count + len(count)) for feat, cnt in count.items()}  # 使用拉普拉斯平滑
                feature_probabilities[idx] = probabilities
            self._likelihoods[category] = feature_probabilities

    def classify(self, features):
        predictions = []
        for feature in features:
            best_category = None
            highest_posterior = -np.inf
            for category in self._categories:
                posterior = np.log(self._priors[category])
                for idx, value in enumerate(feature):
                    likelihood = self._likelihoods[category].get(idx, {}).get(value, 1e-9)
                    posterior += np.log(likelihood if likelihood else 1e-9)  # 避免log(0)
                if posterior > highest_posterior:
                    highest_posterior = posterior
                    best_category = category
            predictions.append(best_category)
        return np.array(predictions)

# 准备数据集并转换为TF-IDF特征表示
def prepare_data():
    dataset = fetch_20newsgroups(subset='all', categories=['sci.space', 'rec.autos'])
    vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
    X_transformed = vectorizer.fit_transform(dataset.data).toarray()
    y_labels = dataset.target
    return train_test_split(X_transformed, y_labels, test_size=0.25, random_state=42)

# 主函数执行入口
if __name__ == "__main__":
    X_train, X_test, y_train, y_test = prepare_data()
    classifier = CustomNaiveBayesClassifier()
    classifier.train(X_train, y_train)
    predictions = classifier.classify(X_test)

    # 输出总体准确率
    overall_accuracy = (predictions == y_test).mean()
    print(f"测试集上的总体准确率: {overall_accuracy * 100:.2f}%")

    # 统计并输出每个类别的预测精度
    correct_predictions_per_class = {cat: 0 for cat in classifier._categories}
    class_counts = {cat: 0 for cat in classifier._categories}

    for prediction, actual in zip(predictions, y_test):
        class_counts[actual] += 1
        if prediction == actual:
            correct_predictions_per_class[actual] += 1

    category_names = ['科技空间', '汽车讨论']  # 假设两个类别对应中文名称

    for category, name in zip(classifier._categories, category_names):
        category_accuracy = (correct_predictions_per_class[category] / class_counts[category]) * 100 if class_counts[category] else 0
        print(f"类别 '{name}' 的预测准确率: {category_accuracy:.2f}%")

    # 使用饼图展示每个类别的预测准确性
    fig, ax = plt.subplots()
    wedges, texts, autotexts = ax.pie(
        [correct_predictions_per_class[cat]/class_counts[cat] for cat in classifier._categories],
        labels=[f'类别 {name}' for name in category_names],
        autopct='%1.1f%%',
        startangle=90
    )
    ax.axis('equal')  # 确保饼图为圆形
    plt.title('各分类预测准确率')
    plt.show()