import os
import math
from collections import defaultdict, Counter


class SimpleNaiveBayes:
    def __init__(self):
        self.class_probabilities = {}
        self.feature_probabilities = defaultdict(lambda: defaultdict(dict))

    def train(self, documents, labels):
        # 统计每个类别的文档数量
        label_counts = Counter(labels)
        total_documents = len(documents)

        # 计算先验概率 P(C)
        for label in label_counts:
            self.class_probabilities[label] = label_counts[label] / total_documents

        # 对每个类别，统计每个词在该类别下的出现次数
        feature_counts_per_class = defaultdict(Counter)
        total_features_per_class = defaultdict(int)

        for doc, label in zip(documents, labels):
            word_counts = Counter(doc)
            for word, count in word_counts.items():
                feature_counts_per_class[label][word] += count
                total_features_per_class[label] += count

        # 计算条件概率 P(X|C)，并应用拉普拉斯平滑
        vocab = set(word for doc in documents for word in doc)
        vocab_size = len(vocab)

        for label in self.class_probabilities.keys():
            for word in vocab:
                count = feature_counts_per_class[label][word]
                self.feature_probabilities[label][word] = (count + 1) / (total_features_per_class[label] + vocab_size)

    def predict(self, document):
        max_prob = -math.inf
        best_label = None

        for label, prior in self.class_probabilities.items():
            log_likelihood = math.log(prior)
            for word in document:
                if word in self.feature_probabilities[label]:
                    log_likelihood += math.log(self.feature_probabilities[label][word])
                else:
                    # 如果一个词在训练集中从未见过，则赋予其极小的概率
                    log_likelihood += math.log(1 / (sum(self.feature_probabilities[label].values()) + 1))

            if log_likelihood > max_prob:
                max_prob = log_likelihood
                best_label = label

        return best_label


def load_and_prepare_data(local_path, categories=None):
    data = []
    labels = []

    for category in categories or os.listdir(local_path):
        category_path = os.path.join(local_path, category)
        if not os.path.isdir(category_path):
            continue

        for file_name in os.listdir(category_path):
            with open(os.path.join(category_path, file_name), 'r', encoding='latin-1') as file:
                content = file.read().lower()
                words = content.split()  # 简单分词
                data.append(words)
                labels.append(category)

    return data, labels


if __name__ == "__main__":
    local_data_path = "D:\\xmk_py\\20_newsgroups"
    categories = ['sci.space', 'rec.autos']  # 只选择两个类别进行测试

    try:
        data, labels = load_and_prepare_data(local_data_path, categories=categories)

        # 假设我们已经把数据集划分成了训练集和测试集
        split_point = int(len(data) * 0.8)
        X_train, X_test = data[:split_point], data[split_point:]
        y_train, y_test = labels[:split_point], labels[split_point:]

        model = SimpleNaiveBayes()
        model.train(X_train, y_train)

        predictions = [model.predict(doc) for doc in X_test]

        accuracy = sum(1 for pred, true in zip(predictions, y_test) if pred == true) / len(y_test)
        print(f"测试集准确率: {accuracy:.4f}")

    except Exception as e:
        print(f"An error occurred: {e}")