import os
import re
from collections import defaultdict, Counter
import math
# 加载数据
def load_20newsgroups_data(data_dir):
    data = []
    labels = []
    label_names = sorted(os.listdir(data_dir))  # 获取类别目录
    for label_index, label_name in enumerate(label_names):
        label_dir = os.path.join(data_dir, label_name)
        if os.path.isdir(label_dir):
            for file_name in os.listdir(label_dir):
                file_path = os.path.join(label_dir, file_name)
                try:
                    with open(file_path, 'r', encoding='latin1') as file:
                        data.append(file.read())
                        labels.append(label_index)
                except Exception as e:
                    print(f"Error reading file {file_path}: {e}")
    return data, labels, label_names
# 文本预处理
def preprocess_text(text):
    # 转为小写
    text = text.lower()
    # 去除标点符号和多余空格
    text = re.sub(r'[^\w\s]', '', text)
    text = re.sub(r'\s+', ' ', text)
    return text
# 构建词汇表
def build_vocab(data, vocab_size=5000):
    all_words = []
    for doc in data:
        all_words.extend(doc.split())
    most_common = Counter(all_words).most_common(vocab_size)
    vocab = {word: idx for idx, (word, _) in enumerate(most_common)}
    return vocab
# 文本转换为词频向量
def text_to_vector(text, vocab):
    vector = [0] * len(vocab)
    for word in text.split():
        if word in vocab:
            vector[vocab[word]] += 1
    return vector

# 朴素贝叶斯训练
def train_naive_bayes(X_train, y_train, vocab_size):
    # 统计每个类别的文档数量
    class_doc_count = Counter(y_train)
    total_docs = len(y_train)
    # 计算先验概率 P(C)
    prior_prob = {cls: count / total_docs for cls, count in class_doc_count.items()}
    # 计算条件概率 P(x_i|C) 的词频计数
    word_count = defaultdict(lambda: [0] * vocab_size)  # 每个类别的词频计数
    total_words = defaultdict(int)  # 每个类别的总词数
    for vector, label in zip(X_train, y_train):
        for idx, count in enumerate(vector):
            word_count[label][idx] += count
            total_words[label] += count

    # 平滑并计算条件概率
    cond_prob = {}
    for cls in word_count:
        cond_prob[cls] = [(word_count[cls][i] + 1) / (total_words[cls] + vocab_size) for i in range(vocab_size)]

    return prior_prob, cond_prob

# 朴素贝叶斯预测
def predict_naive_bayes(X_test, prior_prob, cond_prob):
    predictions = []
    for vector in X_test:
        scores = {}
        for cls in prior_prob:
            log_prob = math.log(prior_prob[cls])  # 计算 P(C)
            for idx, count in enumerate(vector):
                if count > 0:
                    log_prob += count * math.log(cond_prob[cls][idx])  # 加上 P(x_i|C)
            scores[cls] = log_prob
        predictions.append(max(scores, key=scores.get))  # 返回最大概率的类别
    return predictions

# 主函数
def main():
    # 加载训练集
    train_dir =  r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-train' # 替换为训练集的实际路径
    train_data, train_labels, label_names = load_20newsgroups_data(train_dir)
    # 加载测试集
    test_dir = r'D:\Users\wjh\OneDrive\桌面\py-project\20news-bydate-test\20news-bydate-test' # 替换为测试集的实际路径
    test_data, test_labels, _ = load_20newsgroups_data(test_dir)
    # 文本预处理
    train_data_preprocessed = [preprocess_text(doc) for doc in train_data]
    test_data_preprocessed = [preprocess_text(doc) for doc in test_data]
    # 构建词汇表并提取特征
    vocab = build_vocab(train_data_preprocessed, vocab_size=5000)
    X_train = [text_to_vector(doc, vocab) for doc in train_data_preprocessed]
    X_test = [text_to_vector(doc, vocab) for doc in test_data_preprocessed]
    # 训练朴素贝叶斯模型
    prior_prob, cond_prob = train_naive_bayes(X_train, train_labels, len(vocab))
    # 测试模型并计算分类结果
    predictions = predict_naive_bayes(X_test, prior_prob, cond_prob)
    # 输出分类结果
    print("分类结果：")
    for i, (prediction, actual) in enumerate(zip(predictions, test_labels)):
        print(f"样本 {i + 1}: 实际类别 = {label_names[actual]}, 预测类别 = {label_names[prediction]}")
    # 输出分类准确率
    accuracy = sum(p == t for p, t in zip(predictions, test_labels)) / len(test_labels)
    print(f"\n分类准确率：{accuracy:.2%}")

if __name__ == "__main__":
    main()