import os
import re
import math
from collections import defaultdict

class NaiveBayesTextClassifier:
    def __init__(self):
        self.class_word_count = defaultdict(lambda: defaultdict(int))
        self.class_total_words = defaultdict(int)
        self.class_sample_count = defaultdict(int)
        self.vocab = set()
        self.total_samples = 0
        self.word_class_count = defaultdict(int)
        self.total_classes = 0
        
        self.STOP_WORDS = {
            "the", "a", "an", "and", "or", "but", "is", "are", "was", "were", 
            "in", "on", "at", "to", "for", "of", "with", "by", "from", "about",
            "i", "you", "he", "she", "it", "we", "they", "me", "him", "her",
            "my", "your", "his", "its", "our", "their", "this", "that", "these",
            "those", "be", "been", "being", "have", "has", "had", "do", "does",
            "did", "will", "would", "shall", "should", "may", "might", "can",
            "could", "must", "let", "subject", "re", "writes", "write", "said",
            "get", "make", "know", "like", "go", "take", "see", "come", "think"
        }

    def preprocess(self, text):
        text_parts = text.split('\n\n', 1)
        if len(text_parts) > 1:
            text = text_parts[1]
        
        text = text.lower()
        text = re.sub(r'[^\w\s]', '', text)
        text = re.sub(r'\d+', '', text)
        
        words = text.split()
        filtered_words = [
            word for word in words 
            if len(word) > 2 and word not in self.STOP_WORDS
        ]
        return filtered_words

    def train(self, train_dir):
        for class_name in os.listdir(train_dir):
            class_dir = os.path.join(train_dir, class_name)
            if not os.path.isdir(class_dir):
                continue
            
            for filename in os.listdir(class_dir):
                file_path = os.path.join(class_dir, filename)
                try:
                    with open(file_path, 'r', encoding='latin-1') as f:
                        text = f.read()
                        words = self.preprocess(text)
                        
                        self.class_sample_count[class_name] += 1
                        self.total_samples += 1
                        for word in words:
                            self.class_word_count[class_name][word] += 1
                            self.class_total_words[class_name] += 1
                            self.vocab.add(word)
                except Exception as e:
                    print(f"读取文件{file_path}失败：{e}")
                    continue
        
        self.total_classes = len(self.class_sample_count)
        for class_name in self.class_word_count:
            for word in self.class_word_count[class_name]:
                self.word_class_count[word] += 1
        
        print(f"训练完成！")
        print(f"总类别数：{self.total_classes}，总样本数：{self.total_samples}，词汇表大小：{len(self.vocab)}")

    def predict(self, text):
        words = self.preprocess(text)
        vocab_size = len(self.vocab)
        max_log_prob = -float('inf')
        pred_class = None

        for class_name in self.class_sample_count:
            prior = self.class_sample_count[class_name] / self.total_samples
            log_prior = math.log(prior)
            
            log_cond_prob = 0.0
            class_total = self.class_total_words[class_name]
            
            for word in words:
                word_count = self.class_word_count[class_name].get(word, 0)
                prob = (word_count + 1) / (class_total + vocab_size)
                word_classes = self.word_class_count.get(word, 1)
                weight = math.log(self.total_classes / word_classes)
                log_cond_prob += math.log(prob) * weight
            
            total_log_prob = log_prior + log_cond_prob
            if total_log_prob > max_log_prob:
                max_log_prob = total_log_prob
                pred_class = class_name
        return pred_class

    def evaluate(self, test_dir):
        print("\n=== 测试集结构检查 ===")
        class_count = 0
        total_files = 0
        for class_name in os.listdir(test_dir):
            class_dir = os.path.join(test_dir, class_name)
            if os.path.isdir(class_dir):
                class_count += 1
                file_num = len(os.listdir(class_dir))
                total_files += file_num
                print(f"类别{class_name}：{file_num}个文件")
        print(f"测试集总类别数：{class_count}（标准应为20），总文件数：{total_files}（标准应为7539）")
        
        print("\n=== 开始评估 ===")
        correct = 0
        total = 0
        for class_name in os.listdir(test_dir):
            class_dir = os.path.join(test_dir, class_name)
            if not os.path.isdir(class_dir):
                continue
            for filename in os.listdir(class_dir):
                file_path = os.path.join(class_dir, filename)
                try:
                    with open(file_path, 'r', encoding='latin-1') as f:
                        text = f.read()
                        pred = self.predict(text)
                        if pred == class_name:
                            correct += 1
                        total += 1
                except Exception as e:
                    print(f"读取文件{file_path}失败：{e}")
                    continue
        
        if total == 0:
            print("无有效测试样本！")
            return 0.0, 0, 0
        accuracy = correct / total
        return accuracy, correct, total

if __name__ == "__main__":
    TRAIN_DIR = "./20news-bydate-train"
    TEST_DIR = "./20news-bydate-test"
    
    classifier = NaiveBayesTextClassifier()
    classifier.train(TRAIN_DIR)
    
    accuracy, correct, total = classifier.evaluate(TEST_DIR)
    print(f"\n=== 评估结果 ===")
    print(f"测试集准确率：{accuracy:.4f}")
    print(f"正确预测数：{correct}/{total}")
