import numpy as np
import os
import json
import logging
from datetime import datetime
import matplotlib.pyplot as plt
from base_nn import Dense, Flatten, Dropout, ReLU, Sigmoid, Tanh, MSE, CrossEntropy
from cnn import Conv2D, MaxPool2D
from rnn import RNN, LSTM, GRU
from model import NeuralNetwork, CNNVisualizer, RNNVisualizer

# 配置matplotlib支持中文显示
plt.rcParams["font.family"] = ["Microsoft YaHei", "SimHei", "sans-serif"]
plt.rcParams["axes.unicode_minus"] = False  # 正确显示负号

# 配置日志 - 修复中文乱码问题
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("training.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)

# 模型超参数配置 - 增加学习率衰减和早停参数
CONFIG = {
    "cnn": {
        "epochs": 10,
        "batch_size": 32,
        "learning_rate": 0.001,
        "lr_decay": 0.95,  # 学习率衰减率
        "decay_step": 3,  # 衰减步数
        "patience": 3,  # 早停耐心值
        "n_train_samples": 2000,
        "n_val_samples": 400
    },
    "rnn": {
        "epochs": 15,
        "batch_size": 32,
        "learning_rate": 0.001,
        "lr_decay": 0.9,
        "decay_step": 5,
        "patience": 4,
        "n_train_samples": 1000,
        "n_val_samples": 200,
        "seq_length": 10,
        "input_dim": 2,
        "output_dim": 2
    },
    "lstm": {
        "epochs": 15,
        "batch_size": 32,
        "learning_rate": 0.001,
        "lr_decay": 0.9,
        "decay_step": 5,
        "patience": 4,
        "n_train_samples": 1000,
        "n_val_samples": 200,
        "seq_length": 20,
        "vocab_size": 100,
        "num_classes": 3
    },
    "gru": {
        "epochs": 15,
        "batch_size": 32,
        "learning_rate": 0.001,
        "lr_decay": 0.9,
        "decay_step": 5,
        "patience": 4,
        "n_train_samples": 1000,
        "n_val_samples": 200,
        "seq_length": 20,
        "input_dim": 1,
        "output_dim": 1
    }
}


# 确保结果保存目录存在
def ensure_dir(directory):
    """确保目录存在，如果不存在则创建"""
    if not os.path.exists(directory):
        os.makedirs(directory)
        logging.info(f"创建目录: {directory}")


# 获取当前时间戳作为子目录，用于区分不同时间的实验结果
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
base_results_dir = os.path.join("results", timestamp)

# 为每种模型创建单独的结果目录
model_dirs = {
    "cnn": os.path.join(base_results_dir, "cnn"),
    "rnn": os.path.join(base_results_dir, "rnn"),
    "lstm": os.path.join(base_results_dir, "lstm"),
    "gru": os.path.join(base_results_dir, "gru")
}

# 初始化所有结果目录
for dir_path in model_dirs.values():
    ensure_dir(dir_path)
    ensure_dir(os.path.join(dir_path, "plots"))  # 图像保存子目录
    ensure_dir(os.path.join(dir_path, "data"))  # 数据保存子目录
    ensure_dir(os.path.join(dir_path, "models"))  # 模型保存子目录


def save_figure(fig, save_path, dpi=300, bbox_inches='tight'):
    """保存图形并处理可能的错误"""
    try:
        fig.savefig(save_path, dpi=dpi, bbox_inches=bbox_inches)
        logging.info(f"成功保存图片至: {save_path}")
        return True
    except Exception as e:
        logging.error(f"保存图片失败: {str(e)}")
        return False
    finally:
        plt.close(fig)


def train_model(model, model_name, x_train, y_train, x_val, y_val, config, data_dir, models_dir):
    """通用模型训练函数 - 优化版"""
    try:
        logging.info(f"开始训练{model_name}模型...")

        best_val_loss = float('inf')
        patience_counter = 0
        current_lr = config["learning_rate"]

        for epoch in range(config["epochs"]):
            # 学习率衰减
            if epoch > 0 and epoch % config["decay_step"] == 0:
                current_lr *= config["lr_decay"]
                logging.info(f"学习率衰减至: {current_lr:.6f}")

            # 打乱数据
            indices = np.random.permutation(x_train.shape[0])
            x_train_shuffled = x_train[indices]
            y_train_shuffled = y_train[indices]

            epoch_loss = 0
            epoch_acc = 0
            num_batches = x_train.shape[0] // config["batch_size"]

            # 训练批次
            for i in range(num_batches):
                # 获取批次数据
                start = i * config["batch_size"]
                end = start + config["batch_size"]
                x_batch = x_train_shuffled[start:end]
                y_batch = y_train_shuffled[start:end]

                # 前向传播
                y_pred = model.forward(x_batch, training=True)

                # 计算损失
                loss = model.loss.compute(y_batch, y_pred)
                epoch_loss += loss

                # 计算准确率
                acc = model._compute_accuracy(y_batch, y_pred)
                epoch_acc += acc

                # 反向传播
                output_gradient = model.loss.gradient(y_batch, y_pred)
                model.backward(output_gradient, current_lr)

            # 计算平均损失和准确率
            avg_loss = epoch_loss / num_batches
            avg_acc = epoch_acc / num_batches

            model.train_loss_history.append(avg_loss)
            model.train_acc_history.append(avg_acc)

            # 验证集评估
            val_loss = None
            val_acc = None
            if x_val is not None and y_val is not None:
                y_val_pred = model.forward(x_val, training=False)
                val_loss = model.loss.compute(y_val, y_val_pred)
                val_acc = model._compute_accuracy(y_val, y_val_pred)

                model.val_loss_history.append(val_loss)
                model.val_acc_history.append(val_acc)

            # 打印训练信息
            print(f"Epoch {epoch + 1}/{config['epochs']}")
            print(f"Train Loss: {avg_loss:.4f}, Train Acc: {avg_acc:.4f}, LR: {current_lr:.6f}")
            if val_loss is not None and val_acc is not None:
                print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
            print("-" * 50)

            # 早停机制和保存最佳模型
            if val_loss is not None:
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    patience_counter = 0
                    # 保存最佳模型
                    model_path = os.path.join(models_dir, f"{model_name}_best.npz")
                    model.save_model(model_path)
                    logging.info(f"保存最佳模型至: {model_path}")
                else:
                    patience_counter += 1
                    if patience_counter >= config["patience"]:
                        logging.info(f"早停机制触发，在第{epoch + 1}轮停止训练")
                        break

        # 保存最终模型
        final_model_path = os.path.join(models_dir, f"{model_name}_final.npz")
        model.save_model(final_model_path)
        logging.info(f"保存最终模型至: {final_model_path}")

        # 保存训练历史
        history_path = os.path.join(data_dir, "training_history.json")
        with open(history_path, "w") as f:
            json.dump(model.training_history, f, indent=4)
        logging.info(f"训练历史已保存至: {history_path}")

        return model
    except Exception as e:
        logging.error(f"{model_name}模型训练失败: {str(e)}", exc_info=True)
        raise


def plot_training_history(history, save_path, title):
    """绘制训练历史并保存"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))

    # 绘制损失曲线
    ax1.plot(history['train_loss'], label='训练损失')
    ax1.plot(history['val_loss'], label='验证损失')
    ax1.set_title(f'{title} - 损失曲线')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.legend()
    ax1.grid(True)

    # 绘制准确率曲线
    ax2.plot(history['train_acc'], label='训练准确率')
    ax2.plot(history['val_acc'], label='验证准确率')
    ax2.set_title(f'{title} - 准确率曲线')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Accuracy')
    ax2.legend()
    ax2.grid(True)

    plt.tight_layout()
    save_figure(fig, save_path)


def example_cnn_mnist():
    """CNN示例：MNIST手写数字分类"""
    model_name = "CNN"
    task_name = "MNIST手写数字分类"
    logging.info(f"\n=== {model_name} {task_name} Example ===")

    config = CONFIG["cnn"]
    model_dir = model_dirs["cnn"]
    plots_dir = os.path.join(model_dir, "plots")
    data_dir = os.path.join(model_dir, "data")
    models_dir = os.path.join(model_dir, "models")

    # 生成模拟MNIST数据 (batch_size, channels, height, width)
    try:
        # 优化数据生成，使用更接近真实MNIST的数据分布
        x_train = np.random.normal(0.1307, 0.3081,
                                   (config["n_train_samples"], 1, 28, 28))  # MNIST均值和标准差
        x_train = np.clip(x_train, 0, 1)  # 确保值在0-1之间

        y_train = np.zeros((config["n_train_samples"], 10))
        y_train[np.arange(config["n_train_samples"]),
        np.random.randint(0, 10, config["n_train_samples"])] = 1

        x_val = np.random.normal(0.1307, 0.3081,
                                 (config["n_val_samples"], 1, 28, 28))
        x_val = np.clip(x_val, 0, 1)

        y_val = np.zeros((config["n_val_samples"], 10))
        y_val[np.arange(config["n_val_samples"]),
        np.random.randint(0, 10, config["n_val_samples"])] = 1
        logging.info("MNIST模拟数据生成完成")
    except Exception as e:
        logging.error(f"MNIST数据生成失败: {str(e)}")
        return None

    # 创建CNN模型
    try:
        model = NeuralNetwork()
        model.add(Conv2D(in_channels=1, out_channels=8, kernel_size=3, padding=1))
        model.add(ReLU())
        model.add(MaxPool2D(pool_size=2, stride=2))
        model.add(Dropout(rate=0.25))  # 添加Dropout防止过拟合

        model.add(Conv2D(in_channels=8, out_channels=16, kernel_size=3, padding=1))
        model.add(ReLU())
        model.add(MaxPool2D(pool_size=2, stride=2))
        model.add(Dropout(rate=0.25))  # 添加Dropout防止过拟合

        model.add(Flatten())
        model.add(Dense(input_size=16 * 7 * 7, output_size=64))
        model.add(ReLU())
        model.add(Dropout(rate=0.5))  # 添加Dropout防止过拟合

        model.add(Dense(input_size=64, output_size=10))
        model.add(Sigmoid())

        # 设置损失函数
        model.set_loss(CrossEntropy())
        logging.info("CNN模型创建完成")
    except Exception as e:
        logging.error(f"CNN模型创建失败: {str(e)}")
        return None

    # 训练模型 - 传入模型保存目录
    model = train_model(
        model, model_name,
        x_train, y_train, x_val, y_val,
        config, data_dir, models_dir
    )
    if model is None:
        return None

    # 可视化并保存卷积核
    try:
        conv1_params = model.layers[0].get_params()
        if conv1_params and 'kernel' in conv1_params and conv1_params['kernel'] is not None:
            fig = plt.figure(figsize=(10, 6))
            CNNVisualizer.plot_filters(conv1_params['kernel'], title="First Convolutional Layer Filters")
            save_figure(fig, os.path.join(plots_dir, "conv1_filters.png"))
    except Exception as e:
        logging.warning(f"卷积核可视化失败: {str(e)}")

    # 可视化并保存特征图
    try:
        feature_maps = model.get_layer_outputs(x_val[:1], layer_indices=[1, 3])

        if len(feature_maps) > 0 and feature_maps[0] is not None:
            fig = plt.figure(figsize=(12, 8))
            CNNVisualizer.plot_feature_maps(feature_maps[0], title="Feature Maps after First Conv Layer")
            save_figure(fig, os.path.join(plots_dir, "feature_maps_after_first_conv.png"))

        if len(feature_maps) > 1 and feature_maps[1] is not None:
            fig = plt.figure(figsize=(12, 8))
            CNNVisualizer.plot_feature_maps(feature_maps[1], title="Feature Maps after Second Conv Layer")
            save_figure(fig, os.path.join(plots_dir, "feature_maps_after_second_conv.png"))
    except Exception as e:
        logging.warning(f"特征图可视化失败: {str(e)}")

    # 绘制并保存训练历史
    try:
        plot_training_history(
            model.training_history,
            os.path.join(plots_dir, "training_history.png"),
            "CNN训练历史"
        )
    except Exception as e:
        logging.warning(f"训练历史可视化失败: {str(e)}")

    logging.info(f"{model_name}结果已保存至: {model_dir}")
    return model


def example_rnn_sequence():
    """RNN示例：序列预测"""
    model_name = "RNN"
    task_name = "序列预测"
    logging.info(f"\n=== {model_name} {task_name} Example ===")

    config = CONFIG["rnn"]
    model_dir = model_dirs["rnn"]
    plots_dir = os.path.join(model_dir, "plots")
    data_dir = os.path.join(model_dir, "data")
    models_dir = os.path.join(model_dir, "models")

    # 生成模拟序列数据
    def generate_sequence_data(n_samples, seq_length, input_dim, output_dim):
        # 优化数据生成，使用更有意义的序列模式
        x = np.random.randn(n_samples, seq_length, input_dim) * 0.5
        # 简单序列：输出是输入的累加加上一些模式
        y = np.zeros((n_samples, seq_length, output_dim))
        for i in range(n_samples):
            # 为每个样本添加独特的趋势
            trend = np.linspace(0, 0.5, seq_length)[:, np.newaxis]
            for t in range(seq_length):
                y[i, t] = np.sum(x[i, :t + 1], axis=0) * 0.5 + trend[t]
        return x, y

    try:
        x_train, y_train = generate_sequence_data(
            config["n_train_samples"],
            config["seq_length"],
            config["input_dim"],
            config["output_dim"]
        )
        x_val, y_val = generate_sequence_data(
            config["n_val_samples"],
            config["seq_length"],
            config["input_dim"],
            config["output_dim"]
        )
        logging.info("RNN序列数据生成完成")
    except Exception as e:
        logging.error(f"RNN序列数据生成失败: {str(e)}")
        return None

    # 创建RNN模型
    try:
        model = NeuralNetwork()
        model.add(RNN(
            input_size=config["input_dim"],
            hidden_size=32,
            output_size=config["output_dim"],
            return_sequences=True
        ))
        model.add(Tanh())
        model.add(Dropout(rate=0.2))  # 添加Dropout防止过拟合

        # 设置损失函数
        model.set_loss(MSE())
        logging.info("RNN模型创建完成")
    except Exception as e:
        logging.error(f"RNN模型创建失败: {str(e)}")
        return None

    # 训练模型
    model = train_model(
        model, model_name,
        x_train, y_train, x_val, y_val,
        config, data_dir, models_dir
    )
    if model is None:
        return None

    # 可视化并保存预测结果
    try:
        y_pred = model.predict(x_val[:5])
        fig = plt.figure(figsize=(12, 8))
        RNNVisualizer.plot_sequence_prediction(
            y_val[:5], y_pred[:5],
            title="RNN序列预测"
        )
        save_figure(fig, os.path.join(plots_dir, "sequence_prediction.png"))
    except Exception as e:
        logging.warning(f"RNN预测结果可视化失败: {str(e)}")

    # 绘制并保存训练历史
    try:
        plot_training_history(
            model.training_history,
            os.path.join(plots_dir, "training_history.png"),
            "RNN训练历史"
        )
    except Exception as e:
        logging.warning(f"RNN训练历史可视化失败: {str(e)}")

    # 保存示例预测数据
    try:
        np.save(os.path.join(data_dir, "y_val_examples.npy"), y_val[:5])
        np.save(os.path.join(data_dir, "y_pred_examples.npy"), y_pred[:5])
        logging.info("RNN预测示例数据保存完成")
    except Exception as e:
        logging.warning(f"RNN预测示例数据保存失败: {str(e)}")

    logging.info(f"{model_name}结果已保存至: {model_dir}")
    return model


def example_lstm_text():
    """LSTM示例：文本分类"""
    model_name = "LSTM"
    task_name = "文本分类"
    logging.info(f"\n=== {model_name} {task_name} Example ===")

    config = CONFIG["lstm"]
    model_dir = model_dirs["lstm"]
    plots_dir = os.path.join(model_dir, "plots")
    data_dir = os.path.join(model_dir, "data")
    models_dir = os.path.join(model_dir, "models")

    # 生成模拟文本数据
    def generate_text_data(n_samples, seq_length, vocab_size, num_classes):
        # 优化文本数据生成，使类别更有区分度
        x = np.random.randint(0, vocab_size, (n_samples, seq_length))
        y = np.zeros((n_samples, num_classes))

        # 使类别与序列特征更相关
        for i in range(n_samples):
            # 计算序列中高频词(> vocab_size*0.7)的比例
            high_freq_ratio = np.mean(x[i] > vocab_size * 0.7)

            # 根据比例分配类别
            if high_freq_ratio < 0.3:
                class_idx = 0
            elif high_freq_ratio < 0.6:
                class_idx = 1
            else:
                class_idx = 2 % num_classes

            y[i, class_idx] = 1
        return x, y

    # 生成数据
    try:
        x_train, y_train = generate_text_data(
            config["n_train_samples"],
            config["seq_length"],
            config["vocab_size"],
            config["num_classes"]
        )
        x_val, y_val = generate_text_data(
            config["n_val_samples"],
            config["seq_length"],
            config["vocab_size"],
            config["num_classes"]
        )
        logging.info("文本数据生成完成")
    except Exception as e:
        logging.error(f"文本数据生成失败: {str(e)}")
        return None

    # 将整数序列转换为独热编码 - 使用向量化操作提高效率
    def one_hot_encode(x, vocab_size):
        n_samples, seq_length = x.shape
        x_one_hot = np.zeros((n_samples, seq_length, vocab_size), dtype=np.float32)
        # 使用向量化操作替代嵌套循环
        x_one_hot[np.arange(n_samples)[:, None], np.arange(seq_length), x] = 1
        return x_one_hot

    try:
        x_train = one_hot_encode(x_train, config["vocab_size"])
        x_val = one_hot_encode(x_val, config["vocab_size"])
        logging.info("文本数据独热编码完成")
    except Exception as e:
        logging.error(f"文本数据独热编码失败: {str(e)}")
        return None

    # 创建LSTM模型
    try:
        model = NeuralNetwork()
        model.add(LSTM(
            input_size=config["vocab_size"],
            hidden_size=64,
            output_size=config["num_classes"],
            return_sequences=False
        ))
        model.add(Dropout(rate=0.5))  # 添加Dropout防止过拟合
        model.add(Sigmoid())

        # 设置损失函数
        model.set_loss(CrossEntropy())
        logging.info("LSTM模型创建完成")
    except Exception as e:
        logging.error(f"LSTM模型创建失败: {str(e)}")
        return None

    # 训练模型
    model = train_model(
        model, model_name,
        x_train, y_train, x_val, y_val,
        config, data_dir, models_dir
    )
    if model is None:
        return None

    # 绘制并保存训练历史
    try:
        plot_training_history(
            model.training_history,
            os.path.join(plots_dir, "training_history.png"),
            "LSTM训练历史"
        )
    except Exception as e:
        logging.warning(f"LSTM训练历史可视化失败: {str(e)}")

    # 保存一些预测示例
    try:
        y_pred = model.predict(x_val[:10])
        np.save(os.path.join(data_dir, "y_val_examples.npy"), y_val[:10])
        np.save(os.path.join(data_dir, "y_pred_examples.npy"), y_pred[:10])
        logging.info("LSTM预测示例数据保存完成")
    except Exception as e:
        logging.warning(f"LSTM预测示例数据保存失败: {str(e)}")

    logging.info(f"{model_name}结果已保存至: {model_dir}")
    return model


def example_gru_time_series():
    """GRU示例：时间序列预测"""
    model_name = "GRU"
    task_name = "时间序列预测"
    logging.info(f"\n=== {model_name} {task_name} Example ===")

    config = CONFIG["gru"]
    model_dir = model_dirs["gru"]
    plots_dir = os.path.join(model_dir, "plots")
    data_dir = os.path.join(model_dir, "data")
    models_dir = os.path.join(model_dir, "models")

    # 生成模拟时间序列数据
    def generate_time_series(n_samples, seq_length, input_dim, output_dim):
        x = np.zeros((n_samples, seq_length, input_dim))
        y = np.zeros((n_samples, seq_length, output_dim))

        # 生成更复杂的时间序列，混合正弦波和噪声
        for i in range(n_samples):
            phase = np.random.rand() * 2 * np.pi
            freq = np.random.uniform(0.1, 0.5)
            amplitude = np.random.uniform(0.5, 1.5)

            for t in range(seq_length):
                # 添加一些噪声和趋势
                noise = np.random.normal(0, 0.05)
                trend = 0.01 * t
                x[i, t] = amplitude * np.sin(phase + freq * t) + trend + noise
                y[i, t] = amplitude * np.sin(phase + freq * (t + 1)) + trend + 0.01 + noise * 0.5
        return x, y

    try:
        x_train, y_train = generate_time_series(
            config["n_train_samples"],
            config["seq_length"],
            config["input_dim"],
            config["output_dim"]
        )
        x_val, y_val = generate_time_series(
            config["n_val_samples"],
            config["seq_length"],
            config["input_dim"],
            config["output_dim"]
        )
        logging.info("GRU时间序列数据生成完成")
    except Exception as e:
        logging.error(f"GRU时间序列数据生成失败: {str(e)}")
        return None

    # 创建GRU模型
    try:
        model = NeuralNetwork()
        model.add(GRU(
            input_size=config["input_dim"],
            hidden_size=32,
            output_size=config["output_dim"],
            return_sequences=True
        ))
        model.add(Tanh())
        model.add(Dropout(rate=0.2))  # 添加Dropout防止过拟合

        # 设置损失函数
        model.set_loss(MSE())
        logging.info("GRU模型创建完成")
    except Exception as e:
        logging.error(f"GRU模型创建失败: {str(e)}")
        return None

    # 训练模型
    model = train_model(
        model, model_name,
        x_train, y_train, x_val, y_val,
        config, data_dir, models_dir
    )
    if model is None:
        return None

    # 可视化并保存预测结果
    try:
        y_pred = model.predict(x_val[:5])
        fig = plt.figure(figsize=(12, 8))
        RNNVisualizer.plot_sequence_prediction(
            y_val[:5], y_pred[:5],
            title="GRU时间序列预测"
        )
        save_figure(fig, os.path.join(plots_dir, "time_series_prediction.png"))
    except Exception as e:
        logging.warning(f"GRU预测结果可视化失败: {str(e)}")

    # 绘制并保存训练历史
    try:
        plot_training_history(
            model.training_history,
            os.path.join(plots_dir, "training_history.png"),
            "GRU训练历史"
        )
    except Exception as e:
        logging.warning(f"GRU训练历史可视化失败: {str(e)}")

    # 保存示例预测数据
    try:
        np.save(os.path.join(data_dir, "y_val_examples.npy"), y_val[:5])
        np.save(os.path.join(data_dir, "y_pred_examples.npy"), y_pred[:5])
        np.save(os.path.join(data_dir, "x_val_examples.npy"), x_val[:5])
        logging.info("GRU预测示例数据保存完成")
    except Exception as e:
        logging.warning(f"GRU预测示例数据保存失败: {str(e)}")

    logging.info(f"{model_name}结果已保存至: {model_dir}")
    return model


def main():
    """运行所有示例"""
    logging.info(f"所有结果将保存至: {base_results_dir}\n")

    # 运行各个模型示例
    models = {}
    # models["cnn"] = example_cnn_mnist()
    models["rnn"] = example_rnn_sequence()
    models["lstm"] = example_lstm_text()
    models["gru"] = example_gru_time_series()

    logging.info(f"\n=== 所有示例运行完成 ===")
    logging.info(f"所有结果已保存至: {base_results_dir}")
    logging.info(f"可用模型: {', '.join([k for k, v in models.items() if v is not None])}")


if __name__ == "__main__":
    main()