import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.decomposition import PCA

iris = datasets.load_iris()

df_iris = pd.DataFrame(iris.data, columns=iris.feature_names)
df_iris['target'] = iris.target

# print(iris.feature_names)

dataX = df_iris.drop(columns=["target"]).values
dataY = df_iris['target'].values

x_train, x_test, y_train, y_test = train_test_split(dataX, dataY, test_size=0.2, random_state=123)

pca = PCA(n_components=2)
x_train_pca = pca.fit_transform(x_train)
x_test_pca = pca.fit_transform(x_test)

# 验证拆分结果
print("训练集特征形状:", x_train.shape)
print("训练集目标形状:", y_train.shape)
print("测试集特征形状:", x_test.shape)
print("测试集目标形状:", y_test.shape)

ss = StandardScaler()
x_scaled = ss.fit_transform(x_train_pca)

# mm = MinMaxScaler()
# x_scaled = mm.fit_transform(x_train)

model = LogisticRegression(solver="saga")

# 添加多项式特征
# degree=2 表示生成最高二次多项式特征
poly = PolynomialFeatures(degree=5, include_bias=False)

# 使用Pipeline将标准化、多项式特征和模型组合在一起
pipeline = Pipeline([
    ('scaler', ss),
    ('poly', poly),
    ('model', model)
])


# model.fit(x_train_pca, y_train)
# y_pred = model.predict(x_test_pca)

pipeline.fit(x_train_pca, y_train)
y_pred = pipeline.predict(x_test_pca)

# 打印模型的准确率、分类报告和混淆矩阵
print("模型准确率：", accuracy_score(y_test, y_pred))
print("分类报告：\n", classification_report(y_test, y_pred))
print("混淆矩阵：\n", confusion_matrix(y_test, y_pred))


# 绘制散点图
# plt.scatter(x_train_pca[:, 0], x_train_pca[:, 1], c=y_train, cmap=plt.cm.Set1, edgecolor='k')
# plt.title("Iris Dataset")
# plt.show()

# 绘制直方图
# plt.hist(x_train[:, 0], bins=30, color='skyblue', edgecolor='black')
# plt.ylabel("Frequency")
# plt.title("Histogram of Sepal Length")
# plt.show()

# 可视化决策边界
def plot_decision_boundary(X, y, model):
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 500), np.linspace(y_min, y_max, 500))

    # 预测每个点的类别
    Z = pipeline.predict(np.c_[xx.ravel(), yy.ravel()])
    # Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    # 绘制决策边界
    plt.contourf(xx, yy, Z, alpha=0.3, cmap='viridis')
    plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o', cmap='viridis')
    plt.xlabel('X')
    plt.ylabel('Y')
    plt.title('Logistic Regression Decision Boundary on Iris Dataset')
    # plt.xlim(x_min, x_max)
    # plt.ylim(y_min, y_max)
    plt.colorbar()


# 绘制决策边界
plt.figure(figsize=(10, 6))
plot_decision_boundary(x_scaled, y_train, model)
plt.show()
