import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report

# 自定义高斯朴素贝叶斯类
class GaussianNB:
    def __init__(self):
        self.classes = None          # 存储类别标签
        self.priors = {}             # 先验概率 P(y)
        self.means = {}              # 每个类别的特征均值
        self.variances = {}          # 每个类别的特征方差

    def fit(self, X, y):
        self.classes = np.unique(y)
        n_samples, n_features = X.shape
        
        # 计算每个类别的先验概率
        for c in self.classes:
            X_c = X[y == c]
            self.priors[c] = X_c.shape[0] / n_samples
            self.means[c] = X_c.mean(axis=0)     # 计算均值
            self.variances[c] = X_c.var(axis=0)  # 计算方差
        return self

    def _gaussian_pdf(self, x, mean, var):
        # 计算高斯概率密度函数（对数形式避免数值下溢）
        exponent = -0.5 * ((x - mean)**2) / (var + 1e-9)
        log_prob = -0.5 * np.log(2 * np.pi * (var + 1e-9)) + exponent
        return np.exp(log_prob)  # 返回实际概率

    def predict(self, X):
        posteriors = []
        for c in self.classes:
            prior = np.log(self.priors[c])  # 先验概率的对数
            # 计算每个特征的条件概率对数并求和
            likelihood = np.sum(np.log(self._gaussian_pdf(X, self.means[c], self.variances[c])), axis=1)
            posterior = prior + likelihood
            posteriors.append(posterior)
        # 选择后验概率最大的类别
        return self.classes[np.argmax(posteriors, axis=0)]

# 实例化模型并训练
gnb = GaussianNB()
gnb.fit(X_train_scaled, y_train)

# 预测测试集
y_pred = gnb.predict(X_test_scaled)

# 评估模型
print("\n=== 测试集评估结果 ===")
print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
print("\n混淆矩阵:")
print(confusion_matrix(y_test, y_pred))
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=target_names))