import numpy as np
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer

class NaiveBayesClassifier:
    def __init__(self):
        self.prior = {}
        self.conditional = {}
        self.classes = None
        self.feature_num = None

    def fit(self, X, y):
        self.classes = np.unique(y)
        self.feature_num = X.shape[1]
        total_samples = len(y)

        for cls in self.classes:
            cls_count = np.sum(y == cls)
            self.prior[cls] = (cls_count + 1) / (total_samples + len(self.classes))

        for cls in self.classes:
            cls_X = X[y == cls]
            self.conditional[cls] = {}
            for i in range(self.feature_num):
                feature_vals = cls_X[:, i]
                bins = np.percentile(feature_vals[feature_vals > 0], [33, 67]) if np.any(feature_vals > 0) else [0.3, 0.6]
                self.conditional[cls][i] = {
                    'bins': bins,
                    'prob': self._calc_feature_prob(feature_vals, bins)
                }

    def _calc_feature_prob(self, vals, bins):
        labels = np.digitize(vals, bins, right=True)
        label_counts = np.bincount(labels, minlength=3)
        prob = (label_counts + 1) / (len(vals) + 3)
        return prob

    def predict(self, X):
        predictions = []
        for x in X:
            posteriors = {}
            for cls in self.classes:
                log_posterior = np.log(self.prior[cls])
                for i in range(self.feature_num):
                    if x[i] == 0:
                        continue 
                    bins = self.conditional[cls][i]['bins']
                    label = np.digitize(x[i], bins, right=True)
                    label = min(max(label, 0), 2) 
                    log_posterior += np.log(self.conditional[cls][i]['prob'][label])
                posteriors[cls] = log_posterior
            predictions.append(max(posteriors, key=posteriors.get))
        return np.array(predictions)

def load_20newsgroups_data():
    categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
    train_data = fetch_20newsgroups(subset='train', categories=categories, remove=('headers', 'footers', 'quotes'), shuffle=True, random_state=42)
    test_data = fetch_20newsgroups(subset='test', categories=categories, remove=('headers', 'footers', 'quotes'), shuffle=True, random_state=42)
    
    vectorizer = TfidfVectorizer(stop_words='english', max_features=1000)
    X_train = vectorizer.fit_transform(train_data.data).toarray()
    X_test = vectorizer.transform(test_data.data).toarray()
    y_train = train_data.target
    y_test = test_data.target
    
    return X_train, X_test, y_train, y_test, train_data.target_names

if __name__ == "__main__":
    X_train, X_test, y_train, y_test, target_names = load_20newsgroups_data()
    
    nb = NaiveBayesClassifier()
    nb.fit(X_train, y_train)
    
    y_pred = nb.predict(X_test)
    
    accuracy = np.sum(y_pred == y_test) / len(y_test)
    print(f"朴素贝叶斯分类准确率：{accuracy:.4f}")