import numpy as np
from collections import defaultdict
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt

class NaiveBayes:
    def __init__(self):
        self.class_prob = {}
        self.feature_prob = {}

    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.classes = np.unique(y)
        for c in self.classes:
            X_c = X[y == c]
            self.class_prob[c] = X_c.shape[0] / n_samples
            self.feature_prob[c] = {}
            for f in range(n_features):
                self.feature_prob[c][f] = {
                    'mean': X_c[:, f].mean(),
                    'var': X_c[:, f].var()
                }

    def predict(self, X):
        predictions = []
        for x in X:
            posteriors = {}
            for c in self.classes:
                prior = np.log(self.class_prob[c])
                likelihood = 0
                for f, val in enumerate(x):
                    mean = self.feature_prob[c][f]['mean']
                    var = self.feature_prob[c][f]['var']
                    likelihood += np.log(self.gaussian_pdf(val, mean, var))
                posteriors[c] = prior + likelihood
            predictions.append(max(posteriors, key=posteriors.get))
        return predictions

    def gaussian_pdf(self, x, mean, var):
        eps = 1e-4
        coeff = 1.0 / np.sqrt(2 * np.pi * var + eps)
        exponent = np.exp(-(x - mean)**2 / (2 * var + eps))
        return coeff * exponent

def run_naive_bayes():
    print("=" * 50)
    print("朴素贝叶斯算法实验")
    print("=" * 50)
    
    # 加载数据集
    print("正在加载20 Newsgroups数据集...")
    categories = ['sci.space', 'comp.graphics', 'rec.sport.baseball']
    newsgroups = fetch_20newsgroups(subset='all', categories=categories, 
                                   remove=('headers', 'footers', 'quotes'), shuffle=True, random_state=42)
    
    print(f"数据集大小: {len(newsgroups.data)}")
    print(f"类别: {newsgroups.target_names}")
    print(f"标签分布: {np.bincount(newsgroups.target)}")
    
    # 特征提取
    print("正在提取TF-IDF特征...")
    vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
    X = vectorizer.fit_transform(newsgroups.data).toarray()
    y = newsgroups.target
    
    # 划分训练测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    print(f"训练集大小: {X_train.shape}")
    print(f"测试集大小: {X_test.shape}")
    
    # 训练模型
    print("正在训练朴素贝叶斯模型...")
    nb = NaiveBayes()
    nb.fit(X_train, y_train)
    
    # 预测
    print("正在进行预测...")
    y_pred = nb.predict(X_test)
    
    # 评估
    accuracy = accuracy_score(y_test, y_pred)
    print(f"\n准确率: {accuracy:.4f}")
    print("\n分类报告:")
    print(classification_report(y_test, y_pred, target_names=newsgroups.target_names))
    
    # 可视化结果
    plt.figure(figsize=(10, 6))
    
    # 真实标签分布
    plt.subplot(1, 2, 1)
    unique, counts = np.unique(y_test, return_counts=True)
    plt.bar(unique, counts, alpha=0.7, color='blue')
    plt.title('真实标签分布')
    plt.xlabel('类别')
    plt.ylabel('数量')
    
    # 预测标签分布
    plt.subplot(1, 2, 2)
    unique_pred, counts_pred = np.unique(y_pred, return_counts=True)
    plt.bar(unique_pred, counts_pred, alpha=0.7, color='red')
    plt.title('预测标签分布')
    plt.xlabel('类别')
    plt.ylabel('数量')
    
    plt.tight_layout()
    plt.savefig('naive_bayes_results.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    return accuracy

if __name__ == "__main__":
    run_naive_bayes()