import numpy as np
class SimpleNaiveBayes:
    def __init__(self):
        self.class_prob = {}
        self.feature_prob = {}
    def fit(self, X, y):
        classes, counts = np.unique(y, return_counts = True)
        for label, count in zip(classes, counts):
            self.class_prob[label] = count / len(y)
        # 计算特征的条件概率
        for feature in range(len(X[0])):
            for label in classes:
                self.feature_prob[(label, feature)] = {}
                features, counts = np.unique([x[feature] for x, l in zip(X, y) if l == label], return_counts = True)
                total = sum(counts)
                for f, c in zip(features, counts):
                    self.feature_prob[(label, feature)][f] = c / total
    def predict(self, X):
        predictions = []
        for x in X:
            probs = {}
            for label in self.class_prob:
                prob = np.log(self.class_prob[label])
                for i, feature in enumerate(x):
                    if feature in self.feature_prob[(label, i)]:
                        prob += np.log(self.feature_prob[(label, i)][feature])
                    else:
                        prob += np.log(1e-10)  # 拉普拉斯平滑
                probs[label] = prob
            predictions.append(max(probs, key = probs.get))
        return predictions
# 示例数据集
X = np.array([
    ['圆的', '红色'], ['圆的', '黄色'], ['圆的', '绿色'],
    ['长的', '黄色'], ['长的', '绿色'], ['圆的', '红色']
])
y = np.array(['苹果', '橘子', '苹果', '香蕉', '香蕉', '苹果'])
# 训练模型
nb = SimpleNaiveBayes()
nb.fit(X, y)
# 预测
test_X = np.array([['圆的', '红色']])
print("预测结果:", nb.predict(test_X))
