import os
import re
import random
import numpy as np
from collections import defaultdict
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer

nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()


class NaiveBayesClassifier:
    def __init__(self):
        self.class_word_counts = defaultdict(lambda: defaultdict(int))  # 每个类别中每个词的计数
        self.class_total_words = defaultdict(int)  # 每个类别中的总词数
        self.class_doc_counts = defaultdict(int)  # 每个类别的文档数
        self.total_docs = 0  # 总文档数
        self.vocabulary = set()  # 词汇表

    def preprocess(self, text):
        # 转为小写
        text = text.lower()
        # 去除特殊字符和数字（只保留字母和空格）
        text = re.sub(r'[^a-zA-Z\s]', ' ', text)
        # 分词
        words = text.split()
        # 过滤短词（长度≤2）、停用词，并提取词干
        words = [
            stemmer.stem(word)  # 词干提取
            for word in words
            if len(word) > 2 and  # 过滤短词
               word not in stop_words  # 过滤停用词
        ]
        return words

    def train(self, texts, labels):
        for text, label in zip(texts, labels):
            self.class_doc_counts[label] += 1
            self.total_docs += 1

            words = self.preprocess(text)
            for word in words:
                self.vocabulary.add(word)
                self.class_word_counts[label][word] += 1
                self.class_total_words[label] += 1

    def predict(self, text, laplace=1):
        words = self.preprocess(text)
        max_log_prob = -float('inf')  # 记录最大对数概率
        best_class = None
        vocab_size = len(self.vocabulary)

        for class_label in self.class_doc_counts:
            # 计算先验概率的对数 log(P(Y))
            prior = self.class_doc_counts[class_label] / self.total_docs
            log_prior = np.log(prior)  # 先验概率取对数

            # 计算似然概率的对数 log(P(X|Y)) = sum(log(P(x_i|Y)))
            log_likelihood = 0.0
            for word in words:
                # 拉普拉斯平滑：避免零概率
                word_count = self.class_word_counts[class_label].get(word, 0)
                prob = (word_count + laplace) / (self.class_total_words[class_label] + laplace * vocab_size)
                log_likelihood += np.log(prob)

            log_posterior = log_prior + log_likelihood

            if log_posterior > max_log_prob:
                max_log_prob = log_posterior
                best_class = class_label

        return best_class


def load_20newsgroups(data_dir, categories=None, max_docs_per_category=200):
    texts = []
    labels = []

    if not categories:
        categories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]

    print(f"开始加载类别：{categories}")
    for category in categories:
        cat_dir = os.path.join(data_dir, category)
        if not os.path.isdir(cat_dir):
            print(f"警告：类别文件夹不存在 {cat_dir}，跳过")
            continue

        files = os.listdir(cat_dir)
        files = files[:max_docs_per_category]
        print(f"加载类别 {category}：{len(files)} 篇文档")

        for file in files:
            file_path = os.path.join(cat_dir, file)
            try:
                with open(file_path, 'r', encoding='latin-1') as f:
                    texts.append(f.read())
                    labels.append(category)
            except Exception as e:
                print(f"警告：读取文件 {file_path} 失败，错误：{e}")
                continue

    print(f"数据加载完成：共 {len(texts)} 篇文档，{len(set(labels))} 个类别")
    return texts, labels


if __name__ == "__main__":
    data_path = "20news-18828"
    categories = ['sci.space', 'rec.sport.baseball', 'talk.politics.misc', 'comp.graphics']
    train_ratio = 0.8  # 训练集比例
    max_docs_per_category = 200  # 每个类别最大文档数

    # 加载数据
    texts, labels = load_20newsgroups(data_path, categories, max_docs_per_category)

    # 划分训练集和测试集（随机打乱）
    combined = list(zip(texts, labels))
    random.shuffle(combined)  # 打乱顺序
    texts, labels = zip(*combined)
    split_idx = int(train_ratio * len(texts))
    train_texts, train_labels = texts[:split_idx], labels[:split_idx]
    test_texts, test_labels = texts[split_idx:], labels[split_idx:]

    print(f"训练集：{len(train_texts)} 篇，测试集：{len(test_texts)} 篇")
    print("开始训练模型...")
    classifier = NaiveBayesClassifier()
    classifier.train(train_texts, train_labels)

    print("\n训练后类别文档分布：")
    for cls, count in classifier.class_doc_counts.items():
        print(f"类别 {cls}：{count} 篇文档，总词数 {classifier.class_total_words[cls]}")
    print(f"词汇表大小：{len(classifier.vocabulary)}")

    print("\n开始测试模型...")
    correct = 0
    total = len(test_texts)
    error_cases = []

    for i in range(total):
        text = test_texts[i]
        true_label = test_labels[i]
        pred_label = classifier.predict(text)

        if pred_label == true_label:
            correct += 1
        else:
            error_cases.append((text, true_label, pred_label))

    accuracy = correct / total
    print(f"\n分类准确率：{accuracy:.4f}")
    print(f"正确分类：{correct}/{total}")
