"""
朴素贝叶斯算法（适配自定义.data/.label文件）
- 需将train.data、train.label、test.data、test.label与代码放在同一目录
- 功能：加载预处理特征数据，训练朴素贝叶斯分类器并评估准确率
"""

import numpy as np

# ---------------------- 1. 数据加载函数 ----------------------
def load_data(data_path, label_path):
    """加载.data和.label文件，返回特征列表、标签列表、类别数"""
    features = []
    labels = []
    with open(data_path, 'r') as f_data, open(label_path, 'r') as f_label:
        for data_line, label_line in zip(f_data, f_label):
            # 假设特征为空格分隔的字符串（如词索引/词频），标签为整数
            feat = data_line.strip().split()
            label = int(label_line.strip())
            features.append(feat)
            labels.append(label)
    return features, labels, len(set(labels))

# ---------------------- 2. 朴素贝叶斯分类器（适配离散特征） ----------------------
class NaiveBayesClassifier:
    def __init__(self, alpha=1.0):
        self.alpha = alpha  # 拉普拉斯平滑系数
        self.class_prior = {}  # 先验概率 P(C)
        self.feature_prob = {}  # 条件概率 P(feature|C)
        self.classes = None
        self.vocab = set()      # 所有特征的集合
        self.class_feature_total = {}  # 每个类别下的总特征数（用于平滑）

    def fit(self, features, labels):
        """训练模型：统计先验概率和条件概率"""
        self.classes = set(labels)
        total_samples = len(labels)

        # 统计每个类别的样本数，计算先验概率
        class_count = {c: 0 for c in self.classes}
        for label in labels:
            class_count[label] += 1
        for c in self.classes:
            self.class_prior[c] = class_count[c] / total_samples

        # 统计每个特征在每个类别中的出现次数，以及每个类别的总特征数
        feature_class_count = {c: {} for c in self.classes}
        self.class_feature_total = {c: 0 for c in self.classes}

        for feat, label in zip(features, labels):
            for f in feat:
                self.vocab.add(f)
                feature_class_count[label][f] = feature_class_count[label].get(f, 0) + 1
                self.class_feature_total[label] += 1

        vocab_size = len(self.vocab)

        # 计算条件概率（拉普拉斯平滑）
        for c in self.classes:
            self.feature_prob[c] = {}
            for f in self.vocab:
                count = feature_class_count[c].get(f, 0) + self.alpha
                total = self.class_feature_total[c] + self.alpha * vocab_size
                self.feature_prob[c][f] = count / total

    def predict(self, features):
        """预测单个样本的类别：通过对数概率避免下溢，取最大值"""
        max_log_prob = -np.inf
        best_class = None
        vocab_size = len(self.vocab)

        for c in self.classes:
            log_prob = np.log(self.class_prior[c])  # 先验概率的对数
            for f in features:
                if f in self.feature_prob[c]:
                    log_prob += np.log(self.feature_prob[c][f])
                else:
                    # 训练集中未出现的特征，使用平滑后的概率
                    count = self.alpha
                    total = self.class_feature_total[c] + self.alpha * vocab_size
                    log_prob += np.log(count / total)
            if log_prob > max_log_prob:
                max_log_prob = log_prob
                best_class = c
        return best_class

# ---------------------- 3. 模型训练与评估 ----------------------
if __name__ == "__main__":
    # 加载训练集和测试集
    train_features, train_labels, num_classes = load_data('train.data', 'train.label')
    test_features, test_labels, _ = load_data('test.data', 'test.label')

    print(f"训练集规模：{len(train_features)}条记录，{num_classes}个类别")
    print(f"测试集规模：{len(test_features)}条记录")

    # 初始化并训练模型
    nb_model = NaiveBayesClassifier(alpha=1.0)
    nb_model.fit(train_features, train_labels)

    # 评估测试集准确率
    correct = 0
    for feat, label in zip(test_features, test_labels):
        pred = nb_model.predict(feat)
        if pred == label:
            correct += 1
    accuracy = correct / len(test_labels)
    print(f"测试集准确率：{accuracy:.4f}")