import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D

# 改进的Matplotlib中文显示设置 - 特别针对Windows系统
# 在文件开头添加更适合Windows系统的字体设置
import matplotlib.pyplot as plt
# 修改字体设置，只使用Windows系统上常见的中文字体
plt.rcParams["font.family"] = ["SimHei", "Microsoft YaHei", "SimSun"]
plt.rcParams["axes.unicode_minus"] = False  # 确保负号正确显示

# 在数据集信息显示部分，同样只使用Windows系统常见字体
# （这段代码应该在创建info_ax对象后使用）
# info_ax.text(0, 0.95, dataset_info, fontsize=10, \
#            transform=info_ax.transAxes, \
#            bbox=dict(facecolor='white', alpha=0.8, edgecolor='gray'), \
#            verticalalignment='top', \
#            fontproperties={'family': ['SimHei', 'Microsoft YaHei', 'SimSun']})
class LogisticRegression:
    """
    逻辑回归模型实现，支持动态显示训练过程
    """
    def __init__(self, learning_rate=0.01, iterations=100):
        """
        初始化模型参数
        :param learning_rate: 学习率
        :param iterations: 迭代次数
        """
        self.learning_rate = learning_rate
        self.iterations = iterations
        self.weights = None
        self.bias = None
        self.costs = []  # 记录每一步的损失
        self.weights_history = []  # 记录权重变化
        self.bias_history = []  # 记录偏置变化
        self.train_accuracies = []  # 记录训练集准确率
    
    def sigmoid(self, z):
        """
        sigmoid激活函数
        """
        return 1 / (1 + np.exp(-z))
    
    def fit(self, X, y, X_val=None, y_val=None, verbose=True):
        """
        训练模型并记录参数变化
        """
        n_samples, n_features = X.shape
        # 修改为随机初始化权重，而不是全零初始化
        np.random.seed(42)  # 设置随机种子以确保结果可复现
        self.weights = np.random.randn(n_features) * 0.1  # 小标准差的高斯分布
        self.bias = np.random.randn() * 0.1  # 同样为偏置添加小的随机值
        
        for i in range(self.iterations):
            # 前向传播
            linear_model = np.dot(X, self.weights) + self.bias
            y_predicted = self.sigmoid(linear_model)
            
            # 计算损失（交叉熵）
            cost = -np.mean(y * np.log(y_predicted) + (1 - y) * np.log(1 - y_predicted))
            self.costs.append(cost)
            
            # 计算训练集准确率并记录
            y_train_pred = [1 if prob >= 0.5 else 0 for prob in y_predicted]
            train_accuracy = accuracy_score(y, y_train_pred)
            self.train_accuracies.append(train_accuracy)
            
            # 记录参数
            self.weights_history.append(self.weights.copy())
            self.bias_history.append(self.bias)
            
            # 计算梯度
            dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))
            db = (1 / n_samples) * np.sum(y_predicted - y)
            
            # 参数更新
            self.weights -= self.learning_rate * dw
            self.bias -= self.learning_rate * db
            
            # 打印训练过程
            if verbose and (i % 10 == 0 or i == self.iterations - 1):
                print(f"迭代 {i+1}/{self.iterations}: 损失 = {cost:.6f}, 权重 = {self.weights}, 偏置 = {self.bias:.6f}, 训练准确率 = {train_accuracy:.4f}")
    
    def predict(self, X, threshold=0.5):
        """
        预测类别
        """
        probabilities = self.sigmoid(np.dot(X, self.weights) + self.bias)
        return [1 if prob >= threshold else 0 for prob in probabilities]

    def compute_cost(self, X, y, weights, bias):
        """
        计算给定权重和偏置下的损失值
        用于生成损失函数曲面
        """
        linear_model = np.dot(X, weights) + bias
        y_predicted = self.sigmoid(linear_model)
        # 避免除零错误
        y_predicted = np.clip(y_predicted, 1e-10, 1 - 1e-10)
        cost = -np.mean(y * np.log(y_predicted) + (1 - y) * np.log(1 - y_predicted))
        return cost

def main():
    # 1. 加载数据
    print("加载鸢尾花数据集...")
    iris = load_iris()
    X = iris.data[:, 2:4]  # 取花瓣长度和宽度两个特征
    y = iris.target
    
    # 修改为只保留山鸢尾(Setosa, 0)和维吉尼亚鸢尾(Virginica, 2)，并重新映射标签为0和1
    X = X[(y == 0) | (y == 2)]
    y = y[(y == 0) | (y == 2)]
    # 重新映射标签：将山鸢尾(0)保持为0，维吉尼亚鸢尾(2)设为1
    y = y // 2  # 0保持为0，2变为1
    
    # 2. 划分训练集和测试集
    print("划分训练集和测试集...")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )
    print(f"训练集大小: {X_train.shape[0]}，测试集大小: {X_test.shape[0]}")
    
    # 3. 初始化模型 - 增加学习率以获得更明显的梯度下降轨迹
    print("初始化逻辑回归模型...")
    model = LogisticRegression(learning_rate=0.1, iterations=100)
    
    # 4. 训练模型
    print("开始训练模型...")
    model.fit(X_train, y_train, X_test, y_test)
    
    # 5. 创建可视化图表
    # 主窗口 - 用于显示决策边界、权重、偏置和准确率等图表
    main_fig = plt.figure(figsize=(16, 10))
    main_gs = main_fig.add_gridspec(2, 2, hspace=0.4)
    
    # 单独窗口 - 用于显示损失函数曲面和训练轨迹
    loss_fig = plt.figure(figsize=(10, 8))
    
    # 初始化损失函数窗口的子图
    loss_ax = loss_fig.add_subplot(111, projection='3d')
    
    # 添加数据集信息显示
    # 获取数据集的详细信息
    total_samples = X.shape[0]
    feature_count = X.shape[1]
    train_samples = X_train.shape[0]
    test_samples = X_test.shape[0]
    class_0_count = np.sum(y == 0)
    class_1_count = np.sum(y == 1)
    
    # 准备数据集信息文本
    dataset_info = (
        "数据集信息:\n"
        "总样本数: {}\n"
        "特征数量: {}\n"
        "训练集样本: {}\n"
        "测试集样本: {}\n"
        "特征: 花瓣长度 (cm), 花瓣宽度 (cm)\n"
        "类别分布:\n"
        "  - 山鸢尾 (Setosa): {}个样本 (标签: 0)\n"
        "  - 维吉尼亚鸢尾 (Virginica): {}个样本 (标签: 1)"
    ).format(total_samples, feature_count, train_samples, test_samples, class_0_count, class_1_count)
    
    # 在第二个窗口中添加数据集信息文本框
    # 使用add_axes创建一个独立的文本区域
    info_ax = loss_fig.add_axes([0.02, 0.02, 0.25, 0.3])
    info_ax.axis('off')  # 关闭坐标轴
    # 为文本单独设置字体
    info_ax.text(0, 0.95, dataset_info, fontsize=10, \
                transform=info_ax.transAxes, \
                bbox=dict(facecolor='white', alpha=0.8, edgecolor='gray'), \
                verticalalignment='top', \
                fontproperties={'family': ['SimHei', 'Microsoft YaHei', 'SimSun']})  # 移除'Arial Unicode MS'
    # 删除重复的文本显示
    # info_ax.text(0, 0.95, dataset_info, fontsize=10, \
    #            transform=info_ax.transAxes, \
    #            bbox=dict(facecolor='white', alpha=0.8, edgecolor='gray'), \
    #            verticalalignment='top', \
    #            family='monospace')

    # 调整主3D图的位置，为信息框留出空间
    loss_ax.set_position([0.3, 0.05, 0.65, 0.9])
    
    # 初始化主窗口的子图
    main_axes = {
        'decision': main_fig.add_subplot(main_gs[0, 0]),
        'weights': main_fig.add_subplot(main_gs[0, 1]),
        'bias': main_fig.add_subplot(main_gs[1, 0]),
        'accuracy': main_fig.add_subplot(main_gs[1, 1]),
        'metrics': main_fig.add_axes([0.85, 0.1, 0.15, 0.8])  # 使用add_axes来放置在右侧
    }
    
    # 准备决策边界的数据
    x1_min, x1_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
    x2_min, x2_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, 0.01),
                          np.arange(x2_min, x2_max, 0.01))
    
    # 初始化主窗口的各个子图
    # 子图1: 训练数据散点图和决策边界
    scatter0 = main_axes['decision'].scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], 
                                      color='red', marker='o', label='山鸢尾 (Setosa)')
    scatter1 = main_axes['decision'].scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], 
                                      color='blue', marker='x', label='维吉尼亚鸢尾 (Virginica)')
    main_axes['decision'].set_xlabel('花瓣长度 (cm)')
    main_axes['decision'].set_ylabel('花瓣宽度 (cm)')
    main_axes['decision'].set_title('决策边界变化 (显示概率值)')
    main_axes['decision'].legend()
    main_axes['decision'].grid(True)
    contour = None
    contour_lines = None
    contour_labels = None
    text_iter = main_axes['decision'].text(0.02, 0.95, '', transform=main_axes['decision'].transAxes)
    
    # 子图2: 权重变化
    weights_line1, = main_axes['weights'].plot([], [], 'r-', label='权重1')
    weights_line2, = main_axes['weights'].plot([], [], 'g-', label='权重2')
    # 将权重历史转换为numpy数组以便正确计算极值
    weights_array = np.array(model.weights_history)
    main_axes['weights'].set_xlim(0, model.iterations)
    main_axes['weights'].set_ylim(weights_array.min() * 1.1, weights_array.max() * 1.1)
    main_axes['weights'].set_xlabel('迭代次数')
    main_axes['weights'].set_ylabel('权重值')
    main_axes['weights'].set_title('权重参数变化')
    main_axes['weights'].legend()
    main_axes['weights'].grid(True)
    
    # 子图3: 偏置变化
    bias_line, = main_axes['bias'].plot([], [], 'b-')
    main_axes['bias'].set_xlim(0, model.iterations)
    main_axes['bias'].set_ylim(min(model.bias_history) * 1.1, max(model.bias_history) * 1.1)
    main_axes['bias'].set_xlabel('迭代次数')
    main_axes['bias'].set_ylabel('偏置值')
    main_axes['bias'].set_title('偏置参数变化')
    main_axes['bias'].grid(True)
    
    # 子图4: 训练准确率变化
    accuracy_line, = main_axes['accuracy'].plot([], [], 'g-')
    main_axes['accuracy'].set_xlim(0, model.iterations)
    main_axes['accuracy'].set_ylim(0.5, 1.0)
    main_axes['accuracy'].set_xlabel('迭代次数')
    main_axes['accuracy'].set_ylabel('准确率')
    main_axes['accuracy'].set_title('训练集准确率变化')
    main_axes['accuracy'].grid(True)
    
    # 子图5: 模型评估指标和公式显示
    main_axes['metrics'].axis('off')  # 关闭坐标轴
    model_metrics_text = main_axes['metrics'].text(-0.5, 0.7, '', fontsize=10, transform=main_axes['metrics'].transAxes)
    
    # 添加逻辑回归公式和交叉熵损失函数显示
    formula_text = """
    逻辑回归核心公式：
    1. 线性预测: z = w·x + b
    2. Sigmoid激活: σ(z) = 1/(1+e^(-z))
    3. 预测概率: P(y=1|x) = σ(w·x + b)
    4. 决策边界: w·x + b = 0
    
    交叉熵损失函数:
    1. 单样本损失: L(y, ŷ) = -[y·log(ŷ) + (1-y)·log(1-ŷ)]
    2. 总体损失: J(w,b) = -1/m · Σ[y·log(ŷ) + (1-y)·log(1-ŷ)]
    其中，y为真实标签，ŷ为预测概率，m为样本数量
    """
    # 将公式文本的y坐标从0.1调整为0.05，使其再向下移动两行
    main_axes['metrics'].text(-0.9, 0.05, formula_text, fontsize=11, transform=main_axes['metrics'].transAxes, 
                   bbox=dict(facecolor='wheat', alpha=0.5))
    
    main_axes['metrics'].set_title('实时模型评估指标与公式')
    main_axes['metrics'].axis('off')  # 隐藏坐标轴
    
    # 初始化损失函数曲面和训练轨迹图
    # 计算权重空间网格 - 使用更小的范围，使显示比例更大
    # 减小权重范围，让参数和损失显示更清晰
    w1_range = np.linspace(-1, 1, 100)  # 将权重1范围从(-20, 20)缩小到(-10, 10)
    w2_range = np.linspace(-1, 1, 100)    # 将权重2范围从(-15, 15)缩小到(-8, 8)
    W1, W2 = np.meshgrid(w1_range, w2_range)
    
    # 计算损失函数值 - 使用当前训练后的偏置值，而不是平均值
    # 从训练历史中获取最终的偏置值，这样曲面能更准确反映实际损失地形
    final_bias = model.bias_history[-1] if model.bias_history else 0
    Z_surface = np.zeros(W1.shape)
    for i in range(W1.shape[0]):
        for j in range(W1.shape[1]):
            Z_surface[i, j] = model.compute_cost(X_train, y_train, 
                                                np.array([W1[i, j], W2[i, j]]), 
                                                final_bias)
    
    # 裁剪Z_surface值，确保不超出0-1范围
    Z_surface = np.clip(Z_surface, 0, 1)
    
    # 绘制损失函数曲面
    surf = loss_ax.plot_surface(W1, W2, Z_surface, cmap='viridis', alpha=0.7, linewidth=0, antialiased=False)
    
    # 初始化训练轨迹线
    # 确保有数据才初始化轨迹线和点
    if len(model.weights_history) > 0:
        weights_data = np.array(model.weights_history)
        costs_data = np.array(model.costs)
        trajectory_line, = loss_ax.plot(weights_data[:, 0], weights_data[:, 1], costs_data, 'r-', linewidth=2)
        current_point, = loss_ax.plot([model.weights_history[0][0]], [model.weights_history[0][1]], [model.costs[0]], 'go', markersize=8)
    else:
        trajectory_line, = loss_ax.plot([], [], [], 'r-', linewidth=2)
        current_point, = loss_ax.plot([], [], [], 'go', markersize=8)

    loss_ax.set_xlabel('权重1')
    loss_ax.set_ylabel('权重2')
    loss_ax.set_zlabel('损失值')
    loss_ax.set_title('损失函数曲面与训练轨迹')
    
    # 设置3D图视角，使轨迹显示更清晰
    loss_ax.view_init(elev=30, azim=45)  # 微调视角以突出曲面特征
    
    # 限制损失值的显示范围，最高为1
    loss_ax.set_zlim(0, 1)  # 设置Z轴范围从0到1
    
    # 确保在3D图中正确显示坐标轴
    loss_ax.margins(0)  # 移除边距
    
    # 添加颜色条
    cbar = loss_fig.colorbar(surf, ax=loss_ax, shrink=0.5, aspect=5)
    cbar.set_label('损失值')
    
    # 更新函数
    def update(frame):
        # 检查动画是否应该运行
        nonlocal contour, contour_lines, contour_labels
        
        # 如果动画暂停，返回空列表，不更新任何元素
        if not animation_running:
            return []
        
        # 使用传入的frame参数
        current_frame = frame
        
        # 更新主窗口的决策边界
        # 移除之前的等高线和标签
        if contour is not None:
            for c in contour.collections:
                c.remove()
        if contour_lines is not None:
            for c in contour_lines.collections:
                c.remove()
        if contour_labels is not None:
            for text in contour_labels:
                text.remove()
        
        # 使用当前帧的参数来绘制决策边界
        weights = model.weights_history[current_frame]
        bias = model.bias_history[current_frame]
        
        # 计算决策边界
        Z = model.sigmoid(np.dot(np.c_[xx1.ravel(), xx2.ravel()], weights) + bias)
        Z = Z.reshape(xx1.shape)
        
        # 绘制决策边界颜色填充（概率分布）
        contour = main_axes['decision'].contourf(xx1, xx2, Z, alpha=0.3, cmap='coolwarm')
        
        # 绘制概率等高线
        contour_levels = [0.1, 0.3, 0.5, 0.7, 0.9]  # 设置要显示的概率值
        contour_lines = main_axes['decision'].contour(xx1, xx2, Z, levels=contour_levels, colors='black', linewidths=1)
        
        # 添加概率数值标签
        contour_labels = main_axes['decision'].clabel(contour_lines, inline=True, fontsize=8, fmt='%.1f')
        
        # 更新迭代次数文本
        text_iter.set_text(f'迭代次数: {current_frame+1}')
        
        # 更新权重曲线
        weights_data = np.array(model.weights_history[:current_frame+1])
        weights_line1.set_data(range(current_frame+1), weights_data[:, 0])
        weights_line2.set_data(range(current_frame+1), weights_data[:, 1])
        
        # 更新偏置曲线
        bias_line.set_data(range(current_frame+1), model.bias_history[:current_frame+1])
        
        # 更新准确率曲线
        accuracy_line.set_data(range(current_frame+1), model.train_accuracies[:current_frame+1])
        
        # 在测试集上评估当前模型并更新指标显示
        current_model = LogisticRegression()
        current_model.weights = weights
        current_model.bias = bias
        y_pred = current_model.predict(X_test)
        current_accuracy = accuracy_score(y_test, y_pred)
        
        # 准备模型评估指标文本
        metrics_text = (
            f"当前迭代: {current_frame+1}\n"+
            f"损失值: {model.costs[current_frame]:.6f}\n"+
            f"训练准确率: {model.train_accuracies[current_frame]:.4f}\n"+
            f"测试准确率: {current_accuracy:.4f}\n"+
            f"权重1: {weights[0]:.4f}\n"+
            f"权重2: {weights[1]:.4f}\n"+
            f"偏置: {bias:.4f}\n"+
            f"决策边界方程: {weights[0]:.2f}*x1 + {weights[1]:.2f}*x2 + {bias:.2f} = 0"
        )
        model_metrics_text.set_text(metrics_text)
        
        # 更新损失函数窗口中的训练轨迹
        weights_history_array = np.array(model.weights_history[:current_frame+1])
        costs_history_array = np.array(model.costs[:current_frame+1])
        # 裁剪costs_history_array，确保不超出0-1范围
        costs_history_array = np.clip(costs_history_array, 0, 1)
        trajectory_line.set_data(weights_history_array[:, 0], weights_history_array[:, 1])
        trajectory_line.set_3d_properties(costs_history_array)
        
        # 更新当前点 - 确保显示当前迭代的位置
        current_point.set_data([weights[0]], [weights[1]])
        current_point.set_3d_properties([min(model.costs[current_frame], 1)])  # 裁剪当前点的损失值为最大1
        
        # 强制刷新两个窗口，确保同步更新
        loss_fig.canvas.draw_idle()
        main_fig.canvas.draw_idle()
        
        # 重新应用Z轴范围限制，防止自动调整
        loss_ax.set_zlim(0, 1)
        
        # 返回所有需要更新的元素
        updated_elements = (list(contour.collections) + list(contour_lines.collections) + 
                           contour_labels + [text_iter, weights_line1, weights_line2, 
                           bias_line, accuracy_line, model_metrics_text, trajectory_line, current_point])
        
        return updated_elements
    
    # 创建动画
    print("创建训练过程动画...")
    # 全局变量，控制动画是否运行
    animation_running = True
    # 关键优化：设置blit=False，这样Matplotlib会重绘整个图形，确保两个窗口都能正确更新
    ani = FuncAnimation(main_fig, update, frames=model.iterations, interval=1000, blit=False)
    
     
    # 添加颜色条，显示概率对应的颜色
    cbar_main = main_fig.colorbar(contour, ax=main_axes['decision'])
    cbar_main.set_label('类别1概率值')
    
    # 6. 模型最终评估
    print("\n评估模型最终性能...")
    y_pred = model.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    print(f"测试集最终准确率: {accuracy:.4f}")
    print("\n混淆矩阵:")
    print(confusion_matrix(y_test, y_pred))
    print("\n分类报告:")
    print(classification_report(y_test, y_pred))
    
    # 调整布局
    main_fig.tight_layout(rect=[0, 0, 0.85, 1.0])  # 为右侧的metrics留出空间，不再为顶部按钮预留空间
    loss_fig.tight_layout()
    
    # 设置窗口最大化 - 使用兼容性更好的方法
    # 主窗口
    main_mng = plt.get_current_fig_manager()
    try:
        # 尝试不同的窗口最大化方法，适配不同的后端
        if hasattr(main_mng, 'window'):
            # Qt后端
            if hasattr(main_mng.window, 'showMaximized'):
                main_mng.window.showMaximized()
            # Tk后端
            elif hasattr(main_mng.window, 'state'):
                main_mng.window.state('zoomed')
            # WX后端
            elif hasattr(main_mng.window, 'Maximize'):
                main_mng.window.Maximize(True)
    except Exception as e:
        print(f"无法最大化主窗口: {e}")
        # 如果最大化失败，继续显示窗口
        pass
    
    # 显示两个窗口
    plt.show()

if __name__ == "__main__":
    main()