import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.font_manager as fm
import os

# 设置中文字体和支持负号显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

# 配置GPU内存增长（如果可用）
try:
    # 设置GPU内存增长，避免一次性占用所有GPU内存
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"检测到 {len(gpus)} 个GPU设备，已设置内存增长模式")
    else:
        print("未检测到GPU设备，将使用CPU运行")
except Exception as e:
    print(f"GPU配置失败: {e}")
    print("将使用CPU运行")

# 设置TensorFlow日志级别，减少警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def create_sample_data():
    """创建示例数据集"""
    # 生成示例数据 - 简单的非线性分类问题
    np.random.seed(42)
    x = np.random.randn(1000, 2)
    y = (x[:, 0] ** 2 + x[:, 1] ** 2 > 1).astype(int)
    return x, y


def create_model():
    """创建简单的神经网络模型"""
    model = keras.Sequential([
        layers.Dense(64, activation='relu', input_shape=(2,)),
        layers.Dense(32, activation='relu'),
        layers.Dense(1, activation='sigmoid')
    ])
    return model


def train_with_optimizer(optimizer_name, x_train, y_train, x_val, y_val, epochs=100):
    """使用指定优化器训练模型"""
    try:
        model = create_model()

        # 根据优化器名称创建优化器
        if optimizer_name == 'SGD':
            optimizer = keras.optimizers.SGD(learning_rate=0.01)
        elif optimizer_name == 'SGD_momentum':
            optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)
        elif optimizer_name == 'Adam':
            optimizer = keras.optimizers.Adam(learning_rate=0.001)
        elif optimizer_name == 'RMSprop':
            optimizer = keras.optimizers.RMSprop(learning_rate=0.001)
        elif optimizer_name == 'Adagrad':
            optimizer = keras.optimizers.Adagrad(learning_rate=0.01)
        elif optimizer_name == 'Adadelta':
            optimizer = keras.optimizers.Adadelta(learning_rate=1.0)
        else:
            optimizer = keras.optimizers.SGD(learning_rate=0.01)

        model.compile(
            optimizer=optimizer,
            loss='binary_crossentropy',
            metrics=['accuracy']
        )

        # 训练模型并记录历史
        history = model.fit(
            x_train, y_train,
            validation_data=(x_val, y_val),
            epochs=epochs,
            batch_size=32,
            verbose=0
        )

        return history
    except Exception as e:
        print(f"训练 {optimizer_name} 优化器时发生错误: {e}")
        # 返回空的历史记录，避免程序崩溃
        return None


def visualize_optimizers():
    """可视化不同优化器的性能"""
    try:
        # 创建数据
        x, y = create_sample_data()

        # 分割训练集和验证集
        split_idx = int(0.8 * len(x))
        x_train, x_val = x[:split_idx], x[split_idx:]
        y_train, y_val = y[:split_idx], y[split_idx:]

        # 定义要比较的优化器
        optimizers = ['SGD', 'SGD_momentum', 'Adam', 'RMSprop', 'Adagrad', 'Adadelta']
        optimizer_names = ['SGD', 'SGD+动量', 'Adam', 'RMSprop', 'Adagrad', 'Adadelta']

        # 训练模型并收集历史数据
        histories = {}
        for opt in optimizers:
            print(f"正在训练使用 {opt} 优化器的模型...")
            histories[opt] = train_with_optimizer(opt, x_train, y_train, x_val, y_val)

        # 过滤掉训练失败的优化器
        successful_optimizers = [opt for opt in optimizers if histories[opt] is not None]
        successful_optimizer_names = [name for i, name in enumerate(optimizer_names) if optimizers[i] in successful_optimizers]

        if not successful_optimizers:
            print("所有优化器训练都失败了，请检查环境配置")
            return

        # 创建图形
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('不同优化器的性能比较', fontsize=16)

        # 1. 训练损失比较
        for i, opt in enumerate(successful_optimizers):
            axes[0, 0].plot(histories[opt].history['loss'], label=successful_optimizer_names[i])
        axes[0, 0].set_title('训练损失')
        axes[0, 0].set_xlabel('训练轮数')
        axes[0, 0].set_ylabel('损失')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)

        # 2. 验证损失比较
        for i, opt in enumerate(successful_optimizers):
            axes[0, 1].plot(histories[opt].history['val_loss'], label=successful_optimizer_names[i])
        axes[0, 1].set_title('验证损失')
        axes[0, 1].set_xlabel('训练轮数')
        axes[0, 1].set_ylabel('损失')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)

        # 3. 训练准确率比较
        for i, opt in enumerate(successful_optimizers):
            axes[1, 0].plot(histories[opt].history['accuracy'], label=successful_optimizer_names[i])
        axes[1, 0].set_title('训练准确率')
        axes[1, 0].set_xlabel('训练轮数')
        axes[1, 0].set_ylabel('准确率')
        axes[1, 0].legend()
        axes[1, 0].grid(True, alpha=0.3)

        # 4. 验证准确率比较
        for i, opt in enumerate(successful_optimizers):
            axes[1, 1].plot(histories[opt].history['val_accuracy'], label=successful_optimizer_names[i])
        axes[1, 1].set_title('验证准确率')
        axes[1, 1].set_xlabel('训练轮数')
        axes[1, 1].set_ylabel('准确率')
        axes[1, 1].legend()
        axes[1, 1].grid(True, alpha=0.3)

        plt.tight_layout()
        plt.show()
    except Exception as e:
        print(f"可视化优化器性能时发生错误: {e}")


def visualize_optimizer_paths():
    """可视化优化器在损失函数空间中的路径"""

    try:
        # 创建一个简单的二次函数作为损失函数
        def loss_function(x, y):
            return x ** 2 + 2 * y ** 2  # 椭圆形状的损失函数

        # 创建网格
        x = np.linspace(-2, 2, 100)
        y = np.linspace(-2, 2, 100)
        X, Y = np.meshgrid(x, y)
        Z = loss_function(X, Y)

        # 初始化参数
        start_x, start_y = 1.5, 1.5
        learning_rate = 0.1
        momentum = 0.9
        epochs = 20

        # 记录不同优化器的路径
        paths = {}
        
        # 定义优化器名称（用于图例显示）
        optimizer_names = ['SGD', 'SGD+动量', 'Adam']

        # SGD
        x_path, y_path = [start_x], [start_y]
        current_x, current_y = start_x, start_y
        for _ in range(epochs):
            grad_x = 2 * current_x
            grad_y = 4 * current_y
            current_x -= learning_rate * grad_x
            current_y -= learning_rate * grad_y
            x_path.append(current_x)
            y_path.append(current_y)
        paths['SGD'] = (x_path, y_path)

        # SGD with momentum
        x_path, y_path = [start_x], [start_y]
        current_x, current_y = start_x, start_y
        velocity_x, velocity_y = 0, 0
        for _ in range(epochs):
            grad_x = 2 * current_x
            grad_y = 4 * current_y
            velocity_x = momentum * velocity_x - learning_rate * grad_x
            velocity_y = momentum * velocity_y - learning_rate * grad_y
            current_x += velocity_x
            current_y += velocity_y
            x_path.append(current_x)
            y_path.append(current_y)
        paths['SGD_momentum'] = (x_path, y_path)

        # Adam (简化版)
        x_path, y_path = [start_x], [start_y]
        current_x, current_y = start_x, start_y
        m_x, m_y = 0, 0  # 一阶矩估计
        v_x, v_y = 0, 0  # 二阶矩估计
        beta1, beta2 = 0.9, 0.999
        epsilon = 1e-8
        for t in range(1, epochs + 1):
            grad_x = 2 * current_x
            grad_y = 4 * current_y

            # 更新一阶矩估计
            m_x = beta1 * m_x + (1 - beta1) * grad_x
            m_y = beta1 * m_y + (1 - beta1) * grad_y

            # 更新二阶矩估计
            v_x = beta2 * v_x + (1 - beta2) * grad_x ** 2
            v_y = beta2 * v_y + (1 - beta2) * grad_y ** 2

            # 偏差修正
            m_x_hat = m_x / (1 - beta1 ** t)
            m_y_hat = m_y / (1 - beta1 ** t)
            v_x_hat = v_x / (1 - beta2 ** t)
            v_y_hat = v_y / (1 - beta2 ** t)

            # 更新参数
            current_x -= learning_rate * m_x_hat / (np.sqrt(v_x_hat) + epsilon)
            current_y -= learning_rate * m_y_hat / (np.sqrt(v_y_hat) + epsilon)

            x_path.append(current_x)
            y_path.append(current_y)
        paths['Adam'] = (x_path, y_path)

        # 创建图形
        fig, ax = plt.subplots(1, 1, figsize=(10, 8))

        # 绘制损失函数等高线
        contour = ax.contour(X, Y, Z, levels=20, alpha=0.6)
        ax.clabel(contour, inline=True, fontsize=8)

        # 绘制不同优化器的路径
        colors = ['blue', 'red', 'green']
        names = ['SGD', 'SGD_momentum', 'Adam']  # 修复键名匹配问题
        for i, (name, color) in enumerate(zip(names, colors)):
            x_path, y_path = paths[name]
            ax.plot(x_path, y_path, 'o-', color=color, label=optimizer_names[i], markersize=4)  # 使用显示名称
            ax.plot(x_path[0], y_path[0], 'o', color=color, markersize=8, markeredgecolor='black')
            ax.plot(x_path[-1], y_path[-1], 's', color=color, markersize=8, markeredgecolor='black')

        ax.set_xlabel('参数 x')
        ax.set_ylabel('参数 y')
        ax.set_title('优化器在损失函数空间中的路径')
        ax.legend()
        ax.grid(True, alpha=0.3)

        # 添加说明
        ax.text(0.05, 0.95, '○ 起始点\n■ 最终点', transform=ax.transAxes,
                verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))

        plt.tight_layout()
        plt.show()
    except Exception as e:
        print(f"可视化优化器路径时发生错误: {e}")


def visualize_learning_rates():
    """可视化不同学习率的影响"""
    try:
        # 创建数据
        x, y = create_sample_data()

        # 分割训练集和验证集
        split_idx = int(0.8 * len(x))
        x_train, x_val = x[:split_idx], x[split_idx:]
        y_train, y_val = y[:split_idx], y[split_idx:]

        # 定义不同的学习率
        learning_rates = [0.001, 0.01, 0.1, 1.0]
        lr_names = ['0.001', '0.01', '0.1', '1.0']

        # 训练模型并收集历史数据
        histories = {}
        for lr in learning_rates:
            try:
                model = create_model()
                optimizer = keras.optimizers.SGD(learning_rate=lr)
                model.compile(
                    optimizer=optimizer,
                    loss='binary_crossentropy',
                    metrics=['accuracy']
                )

                history = model.fit(
                    x_train, y_train,
                    validation_data=(x_val, y_val),
                    epochs=50,
                    batch_size=32,
                    verbose=0
                )

                histories[lr] = history
            except Exception as e:
                print(f"学习率 {lr} 训练失败: {e}")
                histories[lr] = None

        # 过滤掉训练失败的学习率
        successful_lrs = [lr for lr in learning_rates if histories[lr] is not None]
        successful_lr_names = [name for i, name in enumerate(lr_names) if learning_rates[i] in successful_lrs]

        if not successful_lrs:
            print("所有学习率训练都失败了，请检查环境配置")
            return

        # 创建图形
        fig, axes = plt.subplots(1, 2, figsize=(15, 6))
        fig.suptitle('不同学习率对SGD优化器的影响', fontsize=16)

        # 1. 训练损失比较
        for i, lr in enumerate(successful_lrs):
            axes[0].plot(histories[lr].history['loss'], label=f'学习率={successful_lr_names[i]}')
        axes[0].set_title('训练损失')
        axes[0].set_xlabel('训练轮数')
        axes[0].set_ylabel('损失')
        axes[0].set_yscale('log')
        axes[0].legend()
        axes[0].grid(True, alpha=0.3)

        # 2. 验证准确率比较
        for i, lr in enumerate(successful_lrs):
            axes[1].plot(histories[lr].history['val_accuracy'], label=f'学习率={successful_lr_names[i]}')
        axes[1].set_title('验证准确率')
        axes[1].set_xlabel('训练轮数')
        axes[1].set_ylabel('准确率')
        axes[1].legend()
        axes[1].grid(True, alpha=0.3)

        plt.tight_layout()
        plt.show()
    except Exception as e:
        print(f"可视化学习率影响时发生错误: {e}")


if __name__ == '__main__':
    print("正在演示不同优化器的性能...")
    visualize_optimizers()

    print("正在演示优化器在损失函数空间中的路径...")
    visualize_optimizer_paths()

    print("正在演示不同学习率的影响...")
    visualize_learning_rates()

    print("所有演示完成！")