from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import numpy as np

# 创建综合数据集
from sklearn.datasets import make_classification

X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, 
                          n_informative=15, random_state=42)

# MultinomialNB 不能接受负数作为输入
X = np.abs(X)

# 比较不同的朴素贝叶斯模型
models = {
    'GaussianNB': GaussianNB(),      # 连续数据
    'MultinomialNB': MultinomialNB(), # 计数数据
    'BernoulliNB': BernoulliNB()     # 二值数据
}

# 评估每个模型
results = {}
for name, model in models.items():
    scores = cross_val_score(model, X, y, cv=5, scoring='accuracy')
    results[name] = scores.mean()
    print(f"{name} 平均准确率: {scores.mean():.3f} (+/- {scores.std() * 2:.3f})")

# 可视化比较
plt.figure(figsize=(10, 6))
plt.bar(results.keys(), results.values(), color=['skyblue', 'lightcoral', 'lightgreen'])
plt.title('不同朴素贝叶斯变体性能比较')
plt.ylabel('准确率')
plt.ylim(0, 1)
for i, (name, score) in enumerate(results.items()):
    plt.text(i, score + 0.01, f'{score:.3f}', ha='center')
plt.show()