# naive_bayes.py
import os
import re
import numpy as np
import random

# 添加一个简单的英文停用词列表
STOP_WORDS = set([
    'a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any', 'are', "aren't", 'as', 'at',
    'be', 'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by', 'can', "can't", 'cannot',
    'could', "couldn't", 'did', "didn't", 'do', 'does', "doesn't", 'doing', "don't", 'down', 'during', 'each', 'few',
    'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't", 'having', 'he', "he'd", "he'll",
    "he's", 'her', 'here', "here's", 'hers', 'herself', 'him', 'himself', 'his', 'how', "how's", 'i', "i'd", "i'll",
    "i'm", "i've", 'if', 'in', 'into', 'is', "isn't", 'it', "it's", 'its', 'itself', "let's", 'me', 'more', 'most',
    "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other', 'ought', 'our',
    'ours', 'ourselves', 'out', 'over', 'own', 'same', "shan't", 'she', "she'd", "she'll", "she's", 'should',
    "shouldn't", 'so', 'some', 'such', 'than', 'that', "that's", 'the', 'their', 'theirs', 'them', 'themselves',
    'then', 'there', "there's", 'these', 'they', "they'd", "they'll", "they're", "they've", 'this', 'those',
    'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was', "wasn't", 'we', "we'd", "we'll", "we're",
    "we've", 'were', "weren't", 'what', "what's", 'when', "when's", 'where', "where's", 'which', 'while', 'who',
    "who's", 'whom', 'why', "why's", 'with', "won't", 'would', "wouldn't", 'you', "you'd", "you'll", "you're",
    "you've", 'your', 'yours', 'yourself', 'yourselves'
])


class NaiveBayes:
    def __init__(self):
        self.class_probabilities = {}
        self.word_probabilities = {}
        self.classes = set()
        self.vocabulary = set()

    def _tokenize(self, text):
        """将文本转换为小写、分词，并去除停用词"""
        text = text.lower()
        words = re.findall(r'\b\w+\b', text)
        # 过滤掉停用词和纯数字
        return [word for word in words if word not in STOP_WORDS and not word.isdigit()]

    def fit(self, X, y):
        """训练模型"""
        num_docs = len(X)
        self.classes = set(y)
        docs_in_class = {cls: [] for cls in self.classes}
        for i, doc in enumerate(X):
            docs_in_class[y[i]].append(doc)

        # 建立一个全局词汇表
        all_words = set()
        for doc in X:
            tokens = self._tokenize(doc)
            for word in tokens:
                all_words.add(word)
        self.vocabulary = all_words

        for cls in self.classes:
            self.class_probabilities[cls] = len(docs_in_class[cls]) / num_docs

            word_counts = {}
            total_words_in_class = 0
            for doc in docs_in_class[cls]:
                tokens = self._tokenize(doc)
                for word in tokens:
                    word_counts[word] = word_counts.get(word, 0) + 1
                    total_words_in_class += 1

            self.word_probabilities[cls] = {}
            vocab_size = len(self.vocabulary)
            for word in self.vocabulary:
                # 使用拉普拉斯平滑
                self.word_probabilities[cls][word] = \
                    (word_counts.get(word, 0) + 1) / (total_words_in_class + vocab_size)

    def predict(self, X):
        """对新文档进行分类"""
        predictions = []
        for doc in X:
            tokens = self._tokenize(doc)
            max_prob = -np.inf
            best_class = None
            for cls in self.classes:
                class_prob = np.log(self.class_probabilities[cls])
                for word in tokens:
                    if word in self.vocabulary:
                        class_prob += np.log(self.word_probabilities[cls][word])

                if class_prob > max_prob:
                    max_prob = class_prob
                    best_class = cls
            predictions.append(best_class)
        return predictions


def load_20newsgroups_data(path):
    """加载20 Newsgroups数据集"""
    X = []
    y = []
    if not os.path.isdir(path):
        raise FileNotFoundError(f"提供的路径不是一个有效的目录: {path}")

    for class_name in os.listdir(path):
        class_path = os.path.join(path, class_name)
        if os.path.isdir(class_path):
            for file_name in os.listdir(class_path):
                file_path = os.path.join(class_path, file_name)
                with open(file_path, 'r', errors='ignore', encoding='latin1') as f:
                    X.append(f.read())
                    y.append(class_name)
    return X, y


if __name__ == '__main__':
    data_path = r'D:\a_shiyan4\20news-bydate-train'

    try:
        X_data, y_data = load_20newsgroups_data(data_path)

        if not X_data:
            print(f"错误：在路径 '{data_path}' 下没有找到任何数据。")
        else:
            # --- 这里是修改的地方 ---
            # 增加样本数量以提高准确率
            SAMPLE_SIZE = 8000

            # 将数据和标签打包在一起进行随机抽样
            combined = list(zip(X_data, y_data))
            random.shuffle(combined)

            # 取样
            sample_data = combined[:SAMPLE_SIZE]
            sample_X, sample_y = zip(*sample_data)

            # 划分训练集和测试集 (80% 训练, 20% 测试)
            split_point = int(len(sample_X) * 0.8)
            X_train = sample_X[:split_point]
            y_train = sample_y[:split_point]
            X_test = sample_X[split_point:]
            y_test_true = sample_y[split_point:]

            print(f"使用 {len(X_train)} 个样本进行训练, {len(X_test)} 个样本进行测试。")
            print("开始训练模型（数据量较大，可能需要一些时间）...")
            nb = NaiveBayes()
            nb.fit(X_train, y_train)
            print("模型训练完成。")

            predictions = nb.predict(X_test)

            print("\n实验结果（展示前5个预测）：")
            for i in range(min(5, len(X_test))):
                doc_preview = X_test[i].replace('\n', ' ').replace('\r', '')
                print(f"文档: {doc_preview[:80]}...")
                print(f"  -> 预测类别: {predictions[i]}")
                print(f"  -> 真实类别: {y_test_true[i]}\n")

            accuracy = np.mean(np.array(predictions) == np.array(y_test_true))
            print(f"在 {len(X_test)} 个测试样本上的预测准确率: {accuracy * 100:.2f}%")

    except FileNotFoundError as e:
        print(f"错误：数据集路径 '{data_path}' 不存在或不是一个目录。")
        print(e)