import os
import numpy as np
import re
import math
from collections import defaultdict, Counter
import random

class NaiveBayesClassifier:
    def __init__(self, alpha=1.0):
        """
        初始化朴素贝叶斯分类器
        
        参数:
        alpha: 拉普拉斯平滑参数
        """
        self.alpha = alpha
        self.class_priors = {}
        self.feature_probs = {}
        self.vocab = set()
        self.classes = []
    
    def preprocess_text(self, text):
        """
        文本预处理：转换为小写，移除标点符号，分词
        
        参数:
        text: 原始文本
        
        返回:
        处理后的单词列表
        """
        # 转换为小写
        text = text.lower()
        # 移除非字母字符，保留单词
        text = re.sub(r'[^a-zA-Z\s]', ' ', text)
        # 分词
        words = text.split()
        return words
    
    def build_vocabulary(self, documents, min_count=5):
        """
        构建词汇表
        
        参数:
        documents: 所有文档的列表
        min_count: 单词最少出现次数
        """
        word_counts = Counter()
        
        for doc in documents:
            words = self.preprocess_text(doc)
            word_counts.update(words)
        
        # 只保留出现次数超过min_count的单词
        self.vocab = {word for word, count in word_counts.items() if count >= min_count}
        print(f"词汇表大小: {len(self.vocab)}")
    
    def extract_features(self, document):
        """
        从文档中提取特征（词袋模型）
        
        参数:
        document: 单个文档
        
        返回:
        特征向量（字典形式，word:count）
        """
        words = self.preprocess_text(document)
        features = Counter(words)
        
        # 只保留在词汇表中的单词
        features = {word: count for word, count in features.items() if word in self.vocab}
        
        return features
    
    def fit(self, documents, labels):
        """
        训练朴素贝叶斯模型
        
        参数:
        documents: 训练文档列表
        labels: 对应的标签列表
        """
        # 获取所有类别
        self.classes = list(set(labels))
        print(f"类别数量: {len(self.classes)}")
        
        # 构建词汇表
        self.build_vocabulary(documents)
        
        # 计算每个类别的先验概率
        total_docs = len(documents)
        for c in self.classes:
            class_docs_count = sum(1 for label in labels if label == c)
            self.class_priors[c] = class_docs_count / total_docs
        
        # 计算每个类别下每个单词的条件概率
        self.feature_probs = {}
        
        # 首先统计每个类别中每个单词的出现次数
        class_word_counts = {}
        class_total_words = {}
        
        for c in self.classes:
            class_word_counts[c] = defaultdict(int)
            class_total_words[c] = 0
        
        for doc, label in zip(documents, labels):
            features = self.extract_features(doc)
            for word, count in features.items():
                class_word_counts[label][word] += count
                class_total_words[label] += count
        
        # 计算条件概率（使用拉普拉斯平滑）
        vocab_size = len(self.vocab)
        
        for c in self.classes:
            self.feature_probs[c] = {}
            for word in self.vocab:
                # 拉普拉斯平滑
                word_count = class_word_counts[c].get(word, 0)
                self.feature_probs[c][word] = (word_count + self.alpha) / (class_total_words[c] + self.alpha * vocab_size)
    
    def predict(self, document):
        """
        预测单个文档的类别
        
        参数:
        document: 待预测文档
        
        返回:
        预测的类别
        """
        features = self.extract_features(document)
        
        # 计算每个类别的后验概率（对数形式）
        class_scores = {}
        
        for c in self.classes:
            # 先验概率的对数
            score = math.log(self.class_priors[c])
            
            # 似然概率的对数和
            for word, count in features.items():
                if word in self.feature_probs[c]:
                    score += count * math.log(self.feature_probs[c][word])
            
            class_scores[c] = score
        
        # 返回得分最高的类别
        return max(class_scores.items(), key=lambda x: x[1])[0]
    
    def evaluate(self, test_documents, test_labels):
        """
        评估模型性能
        
        参数:
        test_documents: 测试文档列表
        test_labels: 测试标签列表
        
        返回:
        准确率
        """
        correct = 0
        total = len(test_documents)
        
        for doc, true_label in zip(test_documents, test_labels):
            predicted_label = self.predict(doc)
            if predicted_label == true_label:
                correct += 1
        
        accuracy = correct / total
        return accuracy

def load_20newsgroups_data(data_path, test_ratio=0.2):
    """
    从20_newsgroups文件夹加载数据
    
    参数:
    data_path: 数据文件夹路径
    test_ratio: 测试集比例
    
    返回:
    train_docs, train_labels, test_docs, test_labels, class_names
    """
    # 获取所有类别文件夹
    categories = [d for d in os.listdir(data_path) 
                 if os.path.isdir(os.path.join(data_path, d))]
    
    all_documents = []
    all_labels = []
    class_to_idx = {category: i for i, category in enumerate(categories)}
    idx_to_class = {i: category for category, i in class_to_idx.items()}
    
    print("加载数据...")
    
    # 读取每个类别的文档
    for category in categories:
        category_path = os.path.join(data_path, category)
        
        # 获取该类别下的所有文件
        files = [f for f in os.listdir(category_path) 
                if os.path.isfile(os.path.join(category_path, f))]
        
        print(f"类别 '{category}': {len(files)} 个文档")
        
        for filename in files:
            file_path = os.path.join(category_path, filename)
            
            try:
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read()
                
                all_documents.append(content)
                all_labels.append(class_to_idx[category])
            
            except Exception as e:
                print(f"读取文件 {file_path} 时出错: {e}")
    
    print(f"总共加载了 {len(all_documents)} 个文档")
    
    # 打乱数据并划分训练集和测试集
    combined = list(zip(all_documents, all_labels))
    random.shuffle(combined)
    all_documents[:], all_labels[:] = zip(*combined)
    
    split_idx = int(len(all_documents) * (1 - test_ratio))
    
    train_docs = all_documents[:split_idx]
    train_labels = all_labels[:split_idx]
    test_docs = all_documents[split_idx:]
    test_labels = all_labels[split_idx:]
    
    print(f"训练集大小: {len(train_docs)}")
    print(f"测试集大小: {len(test_docs)}")
    
    return train_docs, train_labels, test_docs, test_labels, idx_to_class

def main():
    """
    主函数：加载数据，训练模型，评估性能
    """
    # 数据路径 - 请根据实际情况修改
    data_path = "20_newsgroups"  # 替换为你的20_newsgroups文件夹路径
    
    if not os.path.exists(data_path):
        print(f"错误: 数据路径 '{data_path}' 不存在!")
        print("请确保20_newsgroups数据集已下载并解压到当前目录")
        return
    
    # 加载数据
    train_docs, train_labels, test_docs, test_labels, class_names = load_20newsgroups_data(data_path)
    
    # 创建并训练朴素贝叶斯分类器
    print("\n训练朴素贝叶斯分类器...")
    nb_classifier = NaiveBayesClassifier(alpha=1.0)
    nb_classifier.fit(train_docs, train_labels)
    
    # 评估模型
    print("\n评估模型性能...")
    accuracy = nb_classifier.evaluate(test_docs, test_labels)
    print(f"测试集准确率: {accuracy:.4f} ({accuracy*100:.2f}%)")
    
    # 显示一些预测示例
    print("\n预测示例:")
    print("-" * 50)
    
    for i in range(min(5, len(test_docs))):
        true_class = class_names[test_labels[i]]
        predicted_class = class_names[nb_classifier.predict(test_docs[i])]
        
        # 显示文档前100个字符
        preview = test_docs[i][:100].replace('\n', ' ') + "..."
        
        print(f"文档 {i+1}:")
        print(f"  预览: {preview}")
        print(f"  真实类别: {true_class}")
        print(f"  预测类别: {predicted_class}")
        print(f"  是否正确: {'✓' if true_class == predicted_class else '✗'}")
        print()

if __name__ == "__main__":
    main()
