
from sklearn.metrics import confusion_matrix, recall_score


from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import label_binarize, LabelBinarizer
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score, precision_score, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
# 读取数据，假设数据文件名为 data.csv
data = pd.read_csv('vice.csv')

# 将第一列广告转化数量有值置1，保持0不变
data.iloc[:, 0] = data.iloc[:, 0].apply(lambda x: 1 if x != 0 else 0)

# 提取特征列（去掉第一列）
X = data.iloc[:, 1:]

# 提取目标变量（第一列）
y = data.iloc[:, 0]

# 分割数据集为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建基学习器
svm_clf = SVC(kernel='linear', probability=True)
bayes_clf = GaussianNB()
fisher_clf = LinearDiscriminantAnalysis(n_components=1)
tree_clf = DecisionTreeClassifier(max_depth=4, random_state=42)
knn_clf = KNeighborsClassifier(n_neighbors=15)

# 创建Voting Classifier，这里使用软投票
voting_clf = VotingClassifier(estimators=[
    ('svm', svm_clf),
    ('bayes', bayes_clf),
    ('tree', tree_clf),
    ('knn', knn_clf)
], voting='soft')
# 定义交叉验证的折数

# 训练集成模型
voting_clf.fit(X_train, y_train)

# 训练单独的模型
for clf in (svm_clf, bayes_clf, tree_clf, knn_clf):
    clf.fit(X_train, y_train)

# 训练Stacking集成模型
# 输出每个模型的性能
models = { 'SVM': svm_clf, 'Naive Bayes': bayes_clf,
           'Decision Tree': tree_clf, 'KNN': knn_clf}

for model_name, model in models.items():
    y_pred = model.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred,average='weighted')
    precision = precision_score(y_test, y_pred, average='weighted')
    f1 = f1_score(y_test, y_pred, average='weighted')
    print(f'{model_name} - Accuracy: {accuracy:.4f}, Precision: {precision:.4f}, Recall:{recall:.4f},F1 Score: {f1:.4f}','\n')

y_pred = voting_clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print('\n')
print("voting_clf",f' - Accuracy: {accuracy:.4f}, Precision: {precision:.4f}, F1 Score: {f1:.4f}')

# 创建Stacking集成分类器
stacking_clf = StackingClassifier(
    estimators=[
        ('svm', svm_clf),
        ('bayes', bayes_clf),
        ('fisher', fisher_clf),
        ('tree', tree_clf),
        ('knn', knn_clf)
    ],
    final_estimator=LogisticRegression(),
    cv=5
)

stacking_clf.fit(X_train, y_train)

y_pred = stacking_clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print("stacking_clf",f' - Accuracy: {accuracy:.4f}, Precision: {precision:.4f}, F1 Score: {f1:.4f}')