import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用 SimHei 字体支持中文
plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号

# Sigmoid 函数
def sigmoid(z):
    """
    Sigmoid 函数，将线性组合转换为概率
    """
    return 1 / (1 + np.exp(-z))

# 损失函数（对数损失）
def compute_loss(y, y_pred):
    """
    计算对数损失（Log Loss）
    y: 真实标签 (0 或 1)
    y_pred: 预测概率
    """
    m = len(y)
    # 避免 log(0) 的情况，添加一个小的 epsilon
    epsilon = 1e-15
    y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
    loss = - (1/m) * np.sum(y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred))
    return loss

# 梯度下降更新参数
def gradient_descent(X, y, W, b, learning_rate, num_iterations):
    """
    使用梯度下降法更新权重 W 和偏置 b
    X: 特征数据
    y: 真实标签
    W: 权重向量
    b: 偏置
    learning_rate: 学习率
    num_iterations: 迭代次数
    """
    m = len(y)
    losses = []  # 记录损失变化

    for i in range(num_iterations):
        # 计算线性部分
        z = np.dot(X, W) + b
        # 通过 Sigmoid 函数转换为概率
        y_pred = sigmoid(z)
        # 计算损失
        loss = compute_loss(y, y_pred)
        losses.append(loss)

        # 计算梯度
        dz = y_pred - y
        dW = (1/m) * np.dot(X.T, dz)
        db = (1/m) * np.sum(dz)

        # 更新参数
        W -= learning_rate * dW
        b -= learning_rate * db

        # 每 100 次迭代打印一次损失
        if (i + 1) % 100 == 0:
            print(f"迭代 {i + 1}, 损失: {loss:.4f}")

    return W, b, losses

# 预测函数
def predict(X, W, b):
    """
    使用训练好的模型进行预测
    X: 特征数据
    W: 权重向量
    b: 偏置
    """
    z = np.dot(X, W) + b
    y_pred = sigmoid(z)
    return (y_pred >= 0.5).astype(int)

# 1. 输入数据：加载鸢尾花数据集
iris = load_iris()
X = iris.data
y = iris.target

# 转换为二分类问题：仅使用 setosa (0) 和 versicolor (1)
mask = (y == 0) | (y == 1)
X = X[mask]
y = y[mask]

# 2. 数据预处理：划分训练集和测试集，特征标准化
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 3. 初始化参数 W 和 b
n_features = X_train_scaled.shape[1]
W = np.zeros(n_features)  # 初始化权重为 0
b = 0  # 初始化偏置为 0

# 4. 使用梯度下降训练模型
learning_rate = 0.01
num_iterations = 1000
W, b, losses = gradient_descent(X_train_scaled, y_train, W, b, learning_rate, num_iterations)

# 5. 预测
y_pred = predict(X_test_scaled, W, b)

# 6. 评估模型
accuracy = np.mean(y_pred == y_test)
print(f"\n测试集准确率: {accuracy:.4f}")

# 7. 可视化损失曲线
plt.figure(figsize=(8, 6))
plt.plot(losses)
plt.title('训练过程中的损失变化')
plt.xlabel('迭代次数')
plt.ylabel('对数损失')
plt.grid(True)
plt.show()

# 8. 可视化决策边界（使用前两个特征：花萼长度和花萼宽度）
def plot_decision_boundary(X, y, W, b, scaler):
    X_2d = X[:, [0, 1]]  # 仅使用花萼长度和花萼宽度
    x_min, x_max = X_2d[:, 0].min() - 1, X_2d[:, 0].max() + 1
    y_min, y_max = X_2d[:, 1].min() - 1, X_2d[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                         np.arange(y_min, y_max, 0.1))

    # 使用训练数据的均值填充其他特征
    other_features = np.mean(X[:, 2:], axis=0)
    X_grid = np.c_[xx.ravel(), yy.ravel(), np.full_like(xx.ravel(), other_features[0]), np.full_like(xx.ravel(), other_features[1])]
    X_grid_scaled = scaler.transform(X_grid)
    Z = predict(X_grid_scaled, W, b)
    Z = Z.reshape(xx.shape)

    plt.figure(figsize=(8, 6))
    plt.contourf(xx, yy, Z, alpha=0.3, cmap='coolwarm')
    plt.scatter(X_2d[y == 0, 0], X_2d[y == 0, 1], label='山鸢尾 (setosa)', color='blue', edgecolor='k')
    plt.scatter(X_2d[y == 1, 0], X_2d[y == 1, 1], label='变色鸢尾 (versicolor)', color='red', edgecolor='k')
    plt.xlabel('花萼长度 (cm)')
    plt.ylabel('花萼宽度 (cm)')
    plt.title('逻辑回归决策边界')
    plt.legend()
    plt.show()

# 调用函数绘制决策边界
plot_decision_boundary(X, y, W, b, scaler)

# 9. 打印最终参数
print("\n最终权重 W:", W)
print("最终偏置 b:", b)