import numpy as np
from ucimlrepo import fetch_ucirepo

# fetch dataset
iris = fetch_ucirepo(id=53)

# data (as pandas dataframes)
X = iris.data.features
y = iris.data.targets

class NaiveBayes:
    def __init__(self):
        self.class_log_prior_ = None
        self.feature_log_prob_ = None

    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.classes_ = np.unique(y)
        n_classes = len(self.classes_)

        # 计算类别的先验概率
        self.class_log_prior_ = np.log(np.bincount(y) / n_samples)

        # 计算给定类别下特征的条件概率
        self.feature_log_prob_ = np.empty((n_classes, n_features))
        for i in range(n_classes):
            X_class = X[y==i]
            self.feature_log_prob_[i] = np.log((X_class.sum(axis=0) + 1) / (X_class.sum() + n_features))

    def predict(self, X):
        return np.argmax(self.predict_log_proba(X), axis=1)

    def predict_log_proba(self, X):
        return self.class_log_prior_ + X @ self.feature_log_prob_.T

def accuracy(y_true, y_pred):
    return np.mean(y_true == y_pred)

class LabelEncoder:
    def __init__(self):
        self.classes_ = []
        self.mapping_ = {}

    def fit(self, y):
        self.classes_ = np.unique(y)
        for i, cls in enumerate(self.classes_):
            self.mapping_[cls] = i

    def transform(self, y):
        return np.array([self.mapping_[cls] for cls in y])

    def fit_transform(self, y):
        self.fit(y)
        return self.transform(y)

# 将类别名称映射到整数
encoder = LabelEncoder()
y_encoded = encoder.fit_transform(y.values.ravel())

# 使用朴素贝叶斯分类器
nb = NaiveBayes()
nb.fit(X.values, y_encoded)
predictions = nb.predict(X.values)

# 将整数标签映射回类别名称
predicted_classes = [encoder.classes_[label] for label in predictions]

# 输出分类结果
print("分类结果:", predicted_classes)
accuracy = accuracy(y_encoded, predictions)
print(f'准确率: {accuracy * 100:.2f}%')