# import numpy as np
# import matplotlib.pyplot as plt
# from sklearn.datasets import load_iris
# from sklearn.linear_model import SGDClassifier
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import accuracy_score
#
#
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
#
# # 定义sign函数
# def function_sign(w,x):
#     result=np.sum(w*x)
#     if result>=0:
#         return 1
#     else:
#         return -1
#
# # 权值更新函数
# def mySGD(x,y,w,rate):
#     t=function_sign(w,x)
#     f=True
#     if y!=t:
#         f=False
#     return w+rate*(y-t)*x,f
#
# # 调用主函数
# def function_main(x,y,w,rate):
#     cols=x.shape[0]
#     epoch=0
#     while True:
#         epoch+=1
#         flag = 1
#         i=0
#         while i<cols:
#             w,f= mySGD(x[i],y[i],w,rate)
#             i += 1
#             if f!=True:
#                 flag=0
#         if 1==flag:
#             return w,epoch
#
# # 获取正确率
# def getAccuracyRate(w,x,y):
#     cols=x.shape[0]
#     count=0
#     i=0
#     while i< cols:
#         if function_sign(w,x[i])==y[i]:
#             count+=1
#         i+=1
#     return count/cols
#
# # 加载Iris数据集
# iris = load_iris()
# data = iris.data
# target = iris.target
#
# # 筛选Setosa和Versicolour这两类数据的样本
# # 0表示Setosa，1表示Versicolour
# mask = np.isin(target, [0, 1])
# filtered_data = data[mask]
# filtered_target = target[mask]
# filtered_target[:50] = -1
# # 选择只包含sepal length (cm)和sepal width (cm)这两个属性的数据
# # 假设这些属性在数据集中的列索引分别为0和1
# selected_features = filtered_data[:, [0, 1]]
#
# # 输出结果以验证
# # print("筛选后的数据（只包含sepal length和sepal width）：")
# # print(selected_features)
# # print("筛选后的类别标签：")
# # print(filtered_target)
#
# #在每一个输入样本后面补-1
# num_rows, num_cols = selected_features.shape
# new_column = np.full((num_rows, 1), -1)
# selected_features = np.hstack((selected_features, new_column))
#
# #划分训练集和测试集
# rows_to_extract1 = np.r_[0:40, 50:90]
# rows_to_extract2 = np.r_[40:50, 90:100]
# trainingSet_x=selected_features[rows_to_extract1,:]
# testSet_x=selected_features[rows_to_extract2,:]
# trainingSet_y=np.zeros(80)
# testSet_y=np.zeros(20)
# trainingSet_y[0:40]=filtered_target[0:40]
# trainingSet_y[40:80]=filtered_target[50:90]
# testSet_y[0:10]=filtered_target[40:50]
# testSet_y[10:20]=filtered_target[90:100]
# rate=0.01
# w=[1,1,0]
#
# w,epoch=function_main(trainingSet_x,trainingSet_y,w,rate)
#
# print("错误率: ",1-getAccuracyRate(w,testSet_x,testSet_y))
# #绘图
# slope=-w[0]/w[1]
# intercept=w[2]/w[1]
# line_x = np.linspace(4, 7, 20)  # 生成用于绘制直线的x值
# line_y = slope * line_x + intercept
#
# plt.figure(figsize=(6, 4))
# plt.scatter(selected_features[:, 0],selected_features[:, 1], c=filtered_target, cmap='viridis', edgecolor='k', s=50)
# plt.xlabel('Sepal Length (cm)')
# plt.ylabel('Sepal Width (cm)')
# plt.title('原始数据散点图')
# plt.plot(line_x, line_y, color='red')
#
# fig, axs = plt.subplots(1, 2, figsize=(12, 4))
# axs[0].scatter(trainingSet_x[:,0],trainingSet_x[:,1], c=trainingSet_y, cmap='viridis', edgecolor='k', s=50)
# axs[0].set_title('训练集分类散点图')
# axs[0].set_xlabel('Sepal Length (cm)')
# axs[0].set_ylabel('Sepal Width (cm)')
# axs[0].plot(line_x, line_y, color='red')
#
# axs[1].scatter(testSet_x[:,0],testSet_x[:,1], c=testSet_y, cmap='viridis', edgecolor='k', s=50)
# axs[1].set_title('测试集分类散点图')
# axs[1].set_xlabel('Sepal Length (cm)')
# axs[1].set_ylabel('Sepal Width (cm)')
# axs[1].plot(line_x, line_y, color='red')
#
# #----------------------------------------------------------- (二)网络调参
# w=[1,1,0]
# rate=0.01
# w,epoch=function_main(trainingSet_x,trainingSet_y,w,rate)
# print("学习率:",rate," 正确率：",getAccuracyRate(w,testSet_x,testSet_y)," 训练轮数: ",epoch,"\n")
#
# w=[1,1,0]
# rate=0.05
# w,epoch=function_main(trainingSet_x,trainingSet_y,w,rate)
# print("学习率:",rate," 正确率：",getAccuracyRate(w,testSet_x,testSet_y)," 训练轮数: ",epoch,"\n")
#
# w=[1,1,0]
# rate=0.1
# w,epoch=function_main(trainingSet_x,trainingSet_y,w,rate)
# print("学习率:",rate," 正确率：",getAccuracyRate(w,testSet_x,testSet_y)," 训练轮数: ",epoch,"\n")
#
# w=[1,1,0]
# rate=0.5
# w,epoch=function_main(trainingSet_x,trainingSet_y,w,rate)
# print("学习率:",rate," 正确率：",getAccuracyRate(w,testSet_x,testSet_y)," 训练轮数: ",epoch,"\n")
#
# #---------------------------------------------------------- (三)Scikit-learn机器学习库的调用
# X = selected_features
# y = filtered_target
#
# # 将数据集拆分为训练集和测试集
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
#
# # 初始化SGDClassifier，使用perceptron损失函数
# clf = SGDClassifier(loss='perceptron', max_iter=1000, tol=1e-3, random_state=42)
#
# # 训练模型
# clf.fit(X_train, y_train)
#
# # 在测试集上进行预测
# y_pred = clf.predict(X_test)
#
# # 计算准确率
# accuracy = accuracy_score(y_test, y_pred)
# print(f"正确率: {accuracy:.2f}")
#
# # 绘图可视化结果
# # 绘制分类直线（决策边界）
# coef = clf.coef_[0]
# intercept = clf.intercept_
#
# # 计算决策边界的x坐标
# x_vals = np.linspace(min(X[:, 0]) - 1, max(X[:, 0]) + 1, 100)
# y_vals = -(coef[0] * x_vals + intercept) / coef[1]
#
# fig, axs = plt.subplots(1, 2, figsize=(12, 4))
# axs[0].scatter(X_train[:, 0], X_train[:, 1], c=y_train,  cmap='viridis', edgecolor='k', s=50)
# axs[0].set_title('使用Scikit-learn的训练集分类散点图')
# axs[0].set_xlabel('Sepal Length (cm)')
# axs[0].set_ylabel('Sepal Width (cm)')
# axs[0].plot(line_x, line_y, color='red')
#
#
# axs[1].scatter(X_test[:, 0], X_test[:, 1], c=y_test,  cmap='viridis', edgecolor='k', s=50)
# axs[1].set_title('使用Scikit-learn的测试集分类散点图')
# axs[1].set_xlabel('Sepal Length (cm)')
# axs[1].set_ylabel('Sepal Width (cm)')
# axs[1].plot(line_x, line_y, color='red')
#
# # 显示图形
# plt.show()

# import numpy as np
# import matplotlib.pyplot as plt
# from sklearn.svm import SVC
# from sklearn.metrics import accuracy_score
# from sklearn.preprocessing import StandardScaler
# from sklearn import datasets
# from sklearn.model_selection import train_test_split
# from sklearn.inspection import DecisionBoundaryDisplay
# from sklearn.datasets import make_moons
# from sklearn.pipeline import make_pipeline
#
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
#
# #------------------------------------------（一）
# #-----------------------------（一）.1
# iris = datasets.load_iris()
#
# # 提取出sepal length和sepal width特征，以及目标变量
# X = iris.data[:, [0, 1]]
# y = iris.target
# mask = y < 2
# X, y = X[mask], y[mask]
#
# # 为每一类选择前30个作为训练集，剩余的20个作为测试集
# # 先划分类别，再划分训练/测试集
# n_samples_per_class = 30
# X_class0 = X[y == 0][:n_samples_per_class]
# y_class0 = y[y == 0][:n_samples_per_class]
# X_class1 = X[y == 1][:n_samples_per_class]
# y_class1 = y[y == 1][:n_samples_per_class]
#
# X_train = np.vstack((X_class0, X_class1))
# y_train = np.hstack((y_class0, y_class1))
#
# X_test_class0 = X[y == 0][n_samples_per_class:]
# y_test_class0 = y[y == 0][n_samples_per_class:]
# X_test_class1 = X[y == 1][n_samples_per_class:]
# y_test_class1 = y[y == 1][n_samples_per_class:]
#
# X_test = np.vstack((X_test_class0, X_test_class1))
# y_test = np.hstack((y_test_class0, y_test_class1))
#
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.transform(X_test)
#
# #-----------------------------（一）.2
# # 绘制散点图
# colors = ['b' if label == 0 else 'orange' for label in y]
# plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=None, edgecolor='k', s=40)
# plt.title('原始数据散点图')
# plt.xlabel('Sepal Length (cm)')
# plt.ylabel('Sepal Width (cm)')
# legend_handles = [plt.Line2D([0], [0], color='b', marker='o', label='Setosa'),
#                   plt.Line2D([0], [0], color='orange', marker='o', label='Versicolour')]
# plt.legend(handles=legend_handles, loc='upper left')
# plt.show()
#
# #-----------------------------（一）.3
# # 特征缩放
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.transform(X_test)
#
# # 定义C值和核函数的组合
# C_values = [0.01, 0.1, 1.0, 10, 100]
# kernels = ['linear', 'poly']
#
# # 存储测试正确率
# test_accuracies = {}
#
# # 训练模型并评估
# for C in C_values:
#     for kernel in kernels:
#         # 训练SVM模型
#         svm_model = SVC(C=C, kernel=kernel)
#         svm_model.fit(X_train, y_train)
#
#         # 预测测试集
#         y_pred = svm_model.predict(X_test)
#
#         # 计算测试正确率
#         accuracy = accuracy_score(y_test, y_pred)
#         test_accuracies[(C, kernel)] = accuracy
#
#         # 可视化决策边界和决策间隔
#         fig, ax = plt.subplots()
#         DecisionBoundaryDisplay.from_estimator(svm_model, X_train, ax=ax, response_method='predict')
#
#         # 绘制测试数据点
#         scatter = ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap='coolwarm', edgecolors='k')
#         ax.set_title(f'C={C}, kernel={kernel}, Test Accuracy={accuracy:.2f}')
#         ax.set_xlabel('Sepal Length (cm)')
#         ax.set_ylabel('Sepal Width (cm)')
#         ax.legend(*scatter.legend_elements(), title="Classes")
#         plt.show()
#
# # 打印所有组合的测试正确率
# for (C, kernel), accuracy in test_accuracies.items():
#     print(f'C={C}, kernel={kernel}, Test Accuracy={accuracy:.2f}')

# #------------------------------------------（二）
#
# #-----------------------------（二）.1. 生成非线性数据
# x, y = make_moons(n_samples=100, noise=0.2, random_state=0)
#
# #-----------------------------（二）.2. 画出生成非线性数据的散点图
# plt.scatter(x[:, 0], x[:, 1], c=y, cmap='coolwarm', edgecolors='k')
# plt.title('Generated Non-linear Data')
# plt.xlabel('Feature 1')
# plt.ylabel('Feature 2')
# plt.show()
#
#
#
# def plot_decision_boundary(clf, x, y, ax=None, title=None):
#     if ax is None:
#         fig, ax = plt.subplots()
#
#     x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
#     y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
#     xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
#                          np.arange(y_min, y_max, 0.01))
#
#     Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
#     Z = Z.reshape(xx.shape)
#
#     ax.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.coolwarm)
#     scatter = ax.scatter(x[:, 0], x[:, 1], c=y, cmap='coolwarm', edgecolors='k')
#
#     if title:
#         ax.set_title(title)
#     ax.set_xlabel('Feature 1')
#     ax.set_ylabel('Feature 2')
#
#     ax.legend(*scatter.legend_elements(), title="Classes")
#
#     return ax
#
#
# #-----------------------------（二）.3. 训练SVM模型并绘制决策平面
# kernels = ['linear', 'poly', 'rbf']
# params = {
#     'poly': {'degree': 3, 'coef0': 1, 'C': 1.0},
#     'rbf': {'gamma': 0.1, 'C': 1.0}
# }
#
# for kernel in kernels:
#     if kernel == 'linear':
#         clf = make_pipeline(StandardScaler(), SVC(kernel=kernel, C=1.0))
#     else:
#         clf = make_pipeline(StandardScaler(), SVC(kernel=kernel, **params[kernel]))
#
#     clf.fit(x, y)
#     ax = plot_decision_boundary(clf, x, y, title=f'Decision Boundary with {kernel} Kernel')
#     plt.show()

import numpy as np
from sklearn.datasets import make_moons, load_iris
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import VotingClassifier
from collections import Counter
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

#------------------------------------------（一）
#-----------------------------（一）.1

# 生成非线性数据
x, y = make_moons(n_samples=1000, noise=0.4, random_state=0)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=0)

#-----------------------------（一）.2
# 加载Iris数据集
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
X_iris_train, X_iris_test, y_iris_train, y_iris_test = train_test_split(X_iris, y_iris, test_size=0.3, random_state=42)

#------------------------------------------（二）
#-----------------------------（二）.1
knn = KNeighborsClassifier(n_neighbors=5)
logreg = LogisticRegression(max_iter=10000)  # 增加迭代次数以确保收敛
gnb = GaussianNB()

#-----------------------------（二）.2
# 训练分类器
knn.fit(x_train, y_train)
logreg.fit(x_train, y_train)
gnb.fit(x_train, y_train)

# 预测并计算准确率
y_pred_knn = knn.predict(x_test)
y_pred_logreg = logreg.predict(x_test)
y_pred_gnb = gnb.predict(x_test)

acc_knn = accuracy_score(y_test, y_pred_knn)
acc_logreg = accuracy_score(y_test, y_pred_logreg)
acc_gnb = accuracy_score(y_test, y_pred_gnb)

print(f"KNN 准确率: {acc_knn:.4f}")
print(f"逻辑回归 准确率: {acc_logreg:.4f}")
print(f"高斯朴素贝叶斯 准确率: {acc_gnb:.4f}")

#-----------------------------（二）.3
# 手写多数投票集成
classifiers = [knn, logreg, gnb]
class MajorityVotingClassifier:
    def __init__(self, classifiers):
        self.classifiers = classifiers

    def fit(self, X, y):
        for clf in self.classifiers:
            clf.fit(X, y)

    def predict(self, X):
        # 获取每个分类器的预测结果
        predictions = np.array([clf.predict(X) for clf in self.classifiers]).T
        # 对每个样本的预测结果进行多数投票
        majority_votes = np.apply_along_axis(lambda x: Counter(x).most_common(1)[0][0], axis=1, arr=predictions)
        return majority_votes



majority_voting_clf = MajorityVotingClassifier(classifiers)
majority_voting_clf.fit(x_train, y_train)
y_pred = majority_voting_clf.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'多数投票 准确率: {accuracy:.2f}')

#-----------------------------（二）.4
# 使用sklearn的VotingClassifier实现硬投票和软投票
voting_clf_hard = VotingClassifier(
    estimators=[('knn', knn), ('logreg', logreg), ('gnb', gnb)],
    voting='hard'
)
voting_clf_soft = VotingClassifier(
    estimators=[('knn', knn), ('logreg', logreg)],
    voting='soft'
)

# 训练VotingClassifier
voting_clf_hard.fit(x_train, y_train)
voting_clf_soft.fit(x_train, y_train)
# 预测并计算准确率
y_pred_voting_hard = voting_clf_hard.predict(x_test)
y_pred_voting_soft = voting_clf_soft.predict(x_test)

acc_voting_hard = accuracy_score(y_test, y_pred_voting_hard)
acc_voting_soft = accuracy_score(y_test, y_pred_voting_soft)

print(f"sklearn硬投票集成 准确率: {acc_voting_hard:.4f}")
print(f"sklearn软投票集成 准确率: {acc_voting_soft:.4f}")


# 可视化结果（可选）
def plot_decision_boundaries(clf, x, y, title):
    x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
    y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
                         np.arange(y_min, y_max, 0.01))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, alpha=0.8)
    plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', marker='o')
    plt.title(title)
    plt.show()

plot_decision_boundaries(knn, x_test, y_test, "KNN Decision Boundary")
plot_decision_boundaries(logreg, x_test, y_test, "Logistic Regression Decision Boundary")
plot_decision_boundaries(gnb, x_test, y_test, "Gaussian Naive Bayes Decision Boundary")
plot_decision_boundaries(majority_voting_clf, x_test, y_test, "Majority Voting Classifier Decision Boundary")
plot_decision_boundaries(voting_clf_hard, x_test, y_test, "Hard Voting Classifier Decision Boundary")
plot_decision_boundaries(voting_clf_soft, x_test, y_test, "Soft Voting Classifier Decision Boundary")

# ------------------------------------------（三）
# -----------------------------（三）.1
base_clf = DecisionTreeClassifier(random_state=42)
bagging_clf = BaggingClassifier(base_estimator=base_clf, n_estimators=100, random_state=42, max_samples=1.0,
                                max_features=1.0)

# 训练随机森林
bagging_clf.fit(x_train, y_train)
y_pred = bagging_clf.predict(x_test)

tree_feature_importances = base_clf.fit(x_train, y_train).feature_importances_
feature_names = ['Feature 1', 'Feature 2']
feature_importances = pd.DataFrame({
    'Feature': feature_names,
    'Importance': tree_feature_importances
})

# 绘制条形图来显示特征重要性
print("BaggingClassifier: Make Moons Dataset 属性重要性: ",tree_feature_importances)

plt.figure(figsize=(8, 6))
plt.barh(feature_importances['Feature'], feature_importances['Importance'], color='lightcoral')
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.title('BaggingClassifier: Feature Importance in Make Moons Dataset')
plt.gca().invert_yaxis()
plt.show()
plot_decision_boundaries(bagging_clf, x_test, y_test, 'BaggingClassifier Classification Result on Test Set')


feature_names = iris.feature_names
target_names = iris.target_names

bagging_clf.fit(X_iris_train,y_iris_train)
y_pred = bagging_clf.predict(X_iris_test)


#计算特征重要性
importances = np.mean([tree.feature_importances_ for tree in bagging_clf.estimators_], axis=0)

print("\nBaggingClassifier: iris Dataset 属性重要性:")
for idx, importance in enumerate(importances):
    print(f"{iris.feature_names[idx]}: {importance:.4f}")
plt.figure(figsize=(10, 6))
plt.bar(feature_names, importances, color='skyblue')
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.title('BaggingClassifier: Feature Importance in Iris Dataset')
plt.show()
#-----------------------------（三）.2

# 创建随机森林分类器
rf_moons = RandomForestClassifier(n_estimators=100, random_state=42)
rf_iris = RandomForestClassifier(n_estimators=100, random_state=42)

# 训练模型
rf_moons.fit(x_train, y_train)
rf_iris.fit(X_iris_train, y_iris_train)

# 在测试集上进行预测
y_moons_pred = rf_moons.predict(x_test)
y_iris_pred = rf_iris.predict(X_iris_test)

# 获取特征重要性
importances_moons = rf_moons.feature_importances_
importances_iris = rf_iris.feature_importances_

feature_names = ['Feature 1', 'Feature 2']
feature_importances = pd.DataFrame({
    'Feature': feature_names,
    'Importance': importances_moons
})
print("RandomForestClassifier: Make_moons Feature Importances: ",importances_moons)

plt.figure(figsize=(8, 6))
plt.barh(feature_importances['Feature'], feature_importances['Importance'], color='lightcoral')
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.title('RandomForestClassifier: Feature Importance in Make Moons Dataset')
plt.gca().invert_yaxis()
plt.show()
plot_decision_boundaries(rf_moons, x_test, y_test, 'RandomForestClassifier Classification Result on Test Set')


print("\nRandomForestClassifier: Iris Feature Importances:")
for idx, importance in enumerate(importances_iris):
    print(f"{iris.feature_names[idx]}: {importance:.4f}")

plt.figure(figsize=(10, 6))
plt.bar(iris.feature_names, importances_iris, color='skyblue')
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.title('RandomForestClassifier: Feature Importance in Iris Dataset')
plt.show()

#------------------------------------------（四）
#-----------------------------（四）.1

# 创建AdaBoost分类器，使用决策树作为弱分类器
base_estimator = DecisionTreeClassifier(max_depth=1)
ada_clf = AdaBoostClassifier(base_estimator=base_estimator, n_estimators=50, random_state=42)

ada_clf.fit(x_train, y_train)

# 绘制训练集和测试集的决策边界
plt.figure(figsize=(8, 6))
plot_decision_boundaries(ada_clf, x_test, y_test, title='AdaBoostClassifier: AdaBoost on Test Set')
plt.show()

#-----------------------------（四）.2
# 创建GradientBoosting分类器
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)
gbc.fit(x_train, y_train)

# 绘制训练集和测试集的决策边界
plt.figure(figsize=(8, 6))
plot_decision_boundaries(gbc, x_test, y_test, title='GradientBoostingClassifier: AdaBoost on Test Set')
plt.show()