import math
import re
import random
from collections import defaultdict, Counter

class NaiveBayesTextClassifier:
    def __init__(self):
        self.class_word_counts = defaultdict(Counter)  # 每个类别下的词频统计
        self.class_total_words = defaultdict(int)      # 每个类别下的总词数
        self.class_doc_counts = defaultdict(int)       # 每个类别的文档数
        self.vocab = set()                             # 词汇表
        self.total_docs = 0                            # 总文档数
        self.alpha = 1.0                               # 拉普拉斯平滑系数

    def train(self, texts, labels):
        """训练模型：texts为文本列表，labels为对应的类别标签列表"""
        self.total_docs = len(texts)
        
        # 遍历所有文本和标签
        for text, label in zip(texts, labels):
            self.class_doc_counts[label] += 1  # 类别文档数+1
            words = self._preprocess(text)     # 预处理文本
            self.vocab.update(words)           # 更新词汇表
            
            # 统计词频和总词数
            for word in words:
                self.class_word_counts[label][word] += 1
                self.class_total_words[label] += 1

    def predict(self, text):
        """预测文本类别"""
        words = self._preprocess(text)
        max_log_prob = -float('inf')
        best_class = None
        
        # 计算每个类别的后验概率（对数形式）
        for label in self.class_doc_counts:
            # 1. 计算先验概率的对数：log(P(类别))
            log_prior = math.log(self.class_doc_counts[label] / self.total_docs)
            
            # 2. 计算似然概率的对数：log(P(文本|类别))
            log_likelihood = 0.0
            vocab_size = len(self.vocab)
            total_words_in_class = self.class_total_words[label]
            
            for word in words:
                # 拉普拉斯平滑：P(词|类别) = (词频 + alpha) / (总词数 + alpha*词汇表大小)
                word_count = self.class_word_counts[label].get(word, 0)
                prob = (word_count + self.alpha) / (total_words_in_class + self.alpha * vocab_size)
                log_likelihood += math.log(prob)
            
            # 3. 总后验概率（对数形式）
            total_log_prob = log_prior + log_likelihood
            
            # 记录最大概率的类别
            if total_log_prob > max_log_prob:
                max_log_prob = total_log_prob
                best_class = label
        
        return best_class

    def evaluate(self, texts, labels):
        """评估模型准确率"""
        correct = 0
        for text, label in zip(texts, labels):
            if self.predict(text) == label:
                correct += 1
        return correct / len(texts) if texts else 0.0

    def _preprocess(self, text):
        """文本预处理：小写化、去除特殊字符、分词"""
        # 转为小写
        text = text.lower()
        # 去除特殊字符和数字
        text = re.sub(r'[^a-z\s]', '', text)
        # 分词（按空格分割）
        words = text.split()
        # 过滤空字符串
        return [word for word in words if word.strip()]


# 生成模拟文本数据（用于测试）
def generate_sample_data(n_samples=1000):
    """生成模拟的分类文本数据，包含4个类别"""
    # 每个类别的特征词
    class_features = {
        "科技": ["人工智能", "编程", "算法", "计算机", "数据", "网络", "芯片", "软件"],
        "体育": ["足球", "比赛", "运动员", "教练", "进球", "奥运会", "篮球", "训练"],
        "娱乐": ["电影", "明星", "音乐", "演唱会", "电视剧", "票房", "导演", "演员"],
        "政治": ["政府", "政策", "选举", "法律", "会议", "领导人", "议会", "改革"]
    }
    # 通用词
    common_words = ["的", "是", "在", "有", "和", "就", "也", "都", "而", "及"]
    
    texts = []
    labels = []
    classes = list(class_features.keys())
    
    for _ in range(n_samples):
        # 随机选择一个类别
        label = random.choice(classes)
        labels.append(label)
        
        # 生成10-20个词的文本（70%特征词 + 30%通用词）
        text_length = random.randint(10, 20)
        n_feature_words = int(text_length * 0.7)
        n_common_words = text_length - n_feature_words
        
        # 选择特征词
        words = [random.choice(class_features[label]) for _ in range(n_feature_words)]
        # 选择通用词
        words += [random.choice(common_words) for _ in range(n_common_words)]
        # 打乱顺序
        random.shuffle(words)
        
        texts.append(" ".join(words))
    
    return texts, labels


# 测试代码
if __name__ == "__main__":
    # 生成模拟数据
    print("生成模拟文本数据...")
    texts, labels = generate_sample_data(n_samples=1000)
    
    # 划分训练集和测试集（7:3）
    split_idx = int(0.7 * len(texts))
    train_texts, test_texts = texts[:split_idx], texts[split_idx:]
    train_labels, test_labels = labels[:split_idx], labels[split_idx:]
    
    # 初始化并训练模型
    print("训练朴素贝叶斯分类器...")
    classifier = NaiveBayesTextClassifier()
    classifier.train(train_texts, train_labels)
    
    # 评估模型
    train_acc = classifier.evaluate(train_texts, train_labels)
    test_acc = classifier.evaluate(test_texts, test_labels)
    print(f"训练集准确率: {train_acc:.4f}")
    print(f"测试集准确率: {test_acc:.4f}")
    
    # 测试单条预测
    sample_texts = [
        "人工智能算法在计算机领域的应用越来越广泛",
        "足球比赛中运动员表现出色，最终赢得了胜利",
        "这部电影由著名导演执导，票房突破十亿"
    ]
    print("\n单条文本预测结果:")
    for text in sample_texts:
        pred = classifier.predict(text)
        print(f"文本: {text}")
        print(f"预测类别: {pred}\n")
