import pandas as pd
import numpy as np

# 加载 Iris 数据集
file_path = 'iris/iris.data'
columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']
data = pd.read_csv(file_path, header=None, names=columns)

# 将数据集分为特征和标签
X = data.iloc[:, :-1].values
y = data['class'].values

# 朴素贝叶斯分类器类
class NaiveBayes:
    def fit(self, X, y):
        self.classes = np.unique(y)
        self.class_priors = {}
        self.params = {}

        # 计算先验概率和每个类的参数
        for cls in self.classes:
            X_cls = X[y == cls]
            self.class_priors[cls] = len(X_cls) / len(y)
            self.params[cls] = {
                'mean': X_cls.mean(axis=0),
                'std': X_cls.std(axis=0)
            }

    def predict(self, X):
        predictions = []
        for x in X:
            posteriors = []
            for cls in self.classes:
                prior = np.log(self.class_priors[cls])
                likelihood = np.sum(np.log(self._pdf(cls, x)))
                posterior = prior + likelihood
                posteriors.append(posterior)
            predictions.append(self.classes[np.argmax(posteriors)])
        return np.array(predictions)

    def _pdf(self, cls, x):
        mean = self.params[cls]['mean']
        std = self.params[cls]['std']
        # 计算概率密度函数
        coeff = 1 / (std * np.sqrt(2 * np.pi))
        exponent = np.exp(-0.5 * ((x - mean) / std) ** 2)
        return coeff * exponent

# 划分训练集和测试集
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 训练朴素贝叶斯分类器
model = NaiveBayes()
model.fit(X_train, y_train)

# 预测
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)

print(f"朴素贝叶斯分类器准确率: {accuracy:.2f}")