import numpy as np
import requests

# 定义贝叶斯分类器类
class NaiveBayes:
    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.classes = np.unique(y)
        n_classes = len(self.classes)

        self.mean = np.zeros((n_classes, n_features), dtype=np.float64)
        self.var = np.zeros((n_classes, n_features), dtype=np.float64)
        self.prior = np.zeros(n_classes, dtype=np.float64)

        for c in self.classes:
            X_c = X[y == c]
            self.mean[c, :] = X_c.mean(axis=0)
            self.var[c, :] = X_c.var(axis=0)
            self.prior[c] = X_c.shape[0] / float(n_samples)

    def predict(self, X):
        y_pred = [self._predict(x) for x in X]
        return y_pred

    def _predict(self, x):
        posteriors = []

        for idx, c in enumerate(self.classes):
            prior = np.log(self.prior[idx])
            class_conditional = np.sum(np.log(self._pdf(idx, x)))
            posterior = prior + class_conditional
            posteriors.append(posterior)

        return self.classes[np.argmax(posteriors)]

    def _pdf(self, class_idx, x):
        mean = self.mean[class_idx]
        var = self.var[class_idx]
        numerator = np.exp(- (x - mean) ** 2 / (2 * var))
        denominator = np.sqrt(2 * np.pi * var)
        return numerator / denominator


# 加载鸢尾花数据集
def load_iris(file_path='D:\\系统默认\\桌面\\机器学习作业\\贝叶斯\\iris.data'):
    with open(file_path, 'r') as file:
        data = file.read().split('\n')

    # 过滤空行
    data = [row.split(',') for row in data if row]

    features = np.array([list(map(float, row[:-1])) for row in data])
    labels = np.array([row[-1] for row in data])

    label_mapping = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
    labels = np.array([label_mapping[label] for label in labels])

    return features, labels


# 定义训练集和测试集划分函数
def train_test_split(features, labels, test_size=0.2, random_state=None):
    np.random.seed(random_state)
    indices = np.arange(len(labels))
    np.random.shuffle(indices)
    split_index = int(len(labels) * (1 - test_size))
    train_indices = indices[:split_index]
    test_indices = indices[split_index:]
    train_features = features[train_indices]
    train_labels = labels[train_indices]
    test_features = features[test_indices]
    test_labels = labels[test_indices]
    return train_features, test_features, train_labels, test_labels


# 定义特征标准化函数
def standardize_features(features):
    mean = np.mean(features, axis=0)
    std = np.std(features, axis=0)
    standardized_features = (features - mean) / std
    return standardized_features


# 加载鸢尾花数据集
features, labels = load_iris()

# 划分训练集和测试集
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.2, random_state=42)

# 特征标准化
train_features = standardize_features(train_features)
test_features = standardize_features(test_features)

# 训练模型
model = NaiveBayes()
model.fit(train_features, train_labels)

# 预测
y_pred = model.predict(test_features)

# 输出每个样本的预测结果
for i in range(len(y_pred)):
    print(f"样本 {i+1}: 预测类别为 {y_pred[i]}, 实际类别为 {test_labels[i]}")

# 计算准确率
accuracy = np.mean(y_pred == test_labels)
print(f'准确率: {accuracy}')
