import numpy as np
import matplotlib.pyplot as plt
from base_nn import Layer, Activation, Loss, Dropout
from cnn import Conv2D, MaxPool2D, AvgPool2D
from rnn import RNN, LSTM, GRU


class NeuralNetwork:
    """神经网络模型"""

    def __init__(self, layers=None):
        self.layers = layers if layers is not None else []
        self.loss = None
        self.accuracy = None
        self.train_loss_history = []
        self.train_acc_history = []
        self.val_loss_history = []
        self.val_acc_history = []
        self.training_history = {
            'train_loss': self.train_loss_history,
            'train_acc': self.train_acc_history,
            'val_loss': self.val_loss_history,
            'val_acc': self.val_acc_history
        }

    def add(self, layer):
        """添加层到网络"""
        self.layers.append(layer)

    def set_loss(self, loss):
        """设置损失函数"""
        self.loss = loss

    def forward(self, x, training=True):
        """前向传播"""
        for layer in self.layers:
            if isinstance(layer, (RNN, LSTM, GRU)):
                # RNN类层需要特殊处理
                if isinstance(layer, RNN):
                    x, _ = layer.forward(x)
                elif isinstance(layer, LSTM):
                    x, _, _ = layer.forward(x)
                elif isinstance(layer, GRU):
                    x, _ = layer.forward(x)
            elif isinstance(layer, Dropout):
                # Dropout层需要训练模式
                x = layer.forward(x, training)
            else:
                # 其他层
                x = layer.forward(x)
        return x

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        for layer in reversed(self.layers):
            output_gradient = layer.backward(output_gradient, learning_rate)
        return output_gradient

    def train(self, x_train, y_train, x_val=None, y_val=None, epochs=10, batch_size=32, learning_rate=0.01):
        """训练网络"""
        if self.loss is None:
            raise ValueError("Loss function not set")

        num_samples = x_train.shape[0]
        num_batches = num_samples // batch_size

        for epoch in range(epochs):
            # 打乱数据
            indices = np.random.permutation(num_samples)
            x_train_shuffled = x_train[indices]
            y_train_shuffled = y_train[indices]

            epoch_loss = 0
            epoch_acc = 0

            # 训练批次
            for i in range(num_batches):
                # 获取批次数据
                start = i * batch_size
                end = start + batch_size
                x_batch = x_train_shuffled[start:end]
                y_batch = y_train_shuffled[start:end]

                # 前向传播
                y_pred = self.forward(x_batch, training=True)

                # 计算损失
                loss = self.loss.compute(y_batch, y_pred)
                epoch_loss += loss

                # 计算准确率
                acc = self._compute_accuracy(y_batch, y_pred)
                epoch_acc += acc

                # 反向传播
                output_gradient = self.loss.gradient(y_batch, y_pred)
                self.backward(output_gradient, learning_rate)

            # 计算平均损失和准确率
            avg_loss = epoch_loss / num_batches
            avg_acc = epoch_acc / num_batches

            self.train_loss_history.append(avg_loss)
            self.train_acc_history.append(avg_acc)

            # 验证集评估
            val_loss = None
            val_acc = None
            if x_val is not None and y_val is not None:
                y_val_pred = self.forward(x_val, training=False)
                val_loss = self.loss.compute(y_val, y_val_pred)
                val_acc = self._compute_accuracy(y_val, y_val_pred)

                self.val_loss_history.append(val_loss)
                self.val_acc_history.append(val_acc)

            # 打印训练信息
            print(f"Epoch {epoch + 1}/{epochs}")
            print(f"Train Loss: {avg_loss:.4f}, Train Acc: {avg_acc:.4f}")
            if val_loss is not None and val_acc is not None:
                print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
            print("-" * 50)

    def predict(self, x):
        """预测"""
        return self.forward(x, training=False)

    def evaluate(self, x_test, y_test):
        """评估网络"""
        y_pred = self.predict(x_test)
        loss = self.loss.compute(y_test, y_pred)
        acc = self._compute_accuracy(y_test, y_pred)
        return loss, acc

    def _compute_accuracy(self, y_true, y_pred):
        """计算准确率"""
        if y_true.ndim == 1:
            # 二分类
            y_pred_classes = (y_pred > 0.5).astype(int)
        else:
            # 多分类
            y_pred_classes = np.argmax(y_pred, axis=1)
            y_true_classes = np.argmax(y_true, axis=1)

        return np.mean(y_pred_classes == y_true_classes)

    def get_layer_outputs(self, x, layer_indices=None):
        """获取指定层的输出"""
        outputs = []
        current_output = x

        for i, layer in enumerate(self.layers):
            if isinstance(layer, (RNN, LSTM, GRU)):
                if isinstance(layer, RNN):
                    current_output, _ = layer.forward(current_output)
                elif isinstance(layer, LSTM):
                    current_output, _, _ = layer.forward(current_output)
                elif isinstance(layer, GRU):
                    current_output, _ = layer.forward(current_output)
            else:
                current_output = layer.forward(current_output)

            if layer_indices is None or i in layer_indices:
                outputs.append(current_output)

        return outputs

    def get_layer_params(self, layer_indices=None):
        """获取指定层的参数"""
        params = []

        for i, layer in enumerate(self.layers):
            if layer_indices is None or i in layer_indices:
                params.append(layer.get_params())

        return params

    def plot_training_history(self):
        """绘制训练历史"""
        plt.figure(figsize=(12, 4))

        # 绘制损失曲线
        plt.subplot(1, 2, 1)
        plt.plot(self.train_loss_history, label='Train Loss')
        if self.val_loss_history:
            plt.plot(self.val_loss_history, label='Val Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training Loss History')
        plt.legend()

        # 绘制准确率曲线
        plt.subplot(1, 2, 2)
        plt.plot(self.train_acc_history, label='Train Acc')
        if self.val_acc_history:
            plt.plot(self.val_acc_history, label='Val Acc')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.title('Training Accuracy History')
        plt.legend()

        plt.tight_layout()
        plt.show()

    def save_model(self, filename):
        """保存模型参数"""
        model_params = []
        for layer in self.layers:
            model_params.append(layer.get_params())

        np.savez(filename, *model_params)

    def load_model(self, filename):
        """加载模型参数"""
        data = np.load(filename, allow_pickle=True)
        for i, layer in enumerate(self.layers):
            layer.set_params(data[f'arr_{i}'].item())


class CNNVisualizer:
    """CNN可视化工具"""

    @staticmethod
    def plot_filters(filters, title="Convolutional Filters"):
        """绘制卷积核"""
        if len(filters.shape) == 4:
            # (out_channels, in_channels, height, width)
            out_channels, in_channels, height, width = filters.shape

            # 如果输入通道大于1，取第一个输入通道的卷积核
            if in_channels > 1:
                filters = filters[:, 0, :, :]

            # 计算子图数量
            n_filters = out_channels
            n_cols = int(np.ceil(np.sqrt(n_filters)))
            n_rows = int(np.ceil(n_filters / n_cols))

            # 创建figure对象并保存为变量
            fig = plt.figure(figsize=(n_cols * 2, n_rows * 2))
            plt.suptitle(title)

            for i in range(n_filters):
                plt.subplot(n_rows, n_cols, i + 1)
                plt.imshow(filters[i], cmap='gray')
                plt.axis('off')
                plt.title(f'Filter {i + 1}')

            plt.tight_layout()
            plt.show()
            # 返回figure对象
            return fig

    @staticmethod
    def plot_feature_maps(feature_maps, title="Feature Maps"):
        """绘制特征图"""
        if len(feature_maps.shape) == 4:
            # (batch_size, channels, height, width)
            batch_size, channels, height, width = feature_maps.shape

            # 取第一个样本的特征图
            feature_maps = feature_maps[0]

            # 计算子图数量
            n_maps = channels
            n_cols = int(np.ceil(np.sqrt(n_maps)))
            n_rows = int(np.ceil(n_maps / n_cols))

            # 创建figure对象并保存为变量
            fig = plt.figure(figsize=(n_cols * 2, n_rows * 2))
            plt.suptitle(title)

            for i in range(n_maps):
                plt.subplot(n_rows, n_cols, i + 1)
                plt.imshow(feature_maps[i], cmap='viridis')
                plt.axis('off')
                plt.title(f'Map {i + 1}')

            plt.tight_layout()
            plt.show()
            # 返回figure对象
            return fig

class RNNVisualizer:
    """RNN可视化工具"""

    @staticmethod
    def plot_hidden_states(hidden_states, title="Hidden States"):
        """绘制隐藏状态"""
        if len(hidden_states) > 0:
            # 取最后一个时间步的隐藏状态
            last_hidden = hidden_states[-1]

            # 如果是批次数据，取第一个样本
            if last_hidden.ndim == 3:
                last_hidden = last_hidden[0]

            plt.figure(figsize=(10, 6))
            plt.plot(last_hidden.flatten())
            plt.xlabel('Hidden Unit')
            plt.ylabel('Activation')
            plt.title(title)
            plt.grid(True)
            plt.show()

    @staticmethod
    def plot_weights(weights, title="Weights"):
        """绘制权重矩阵"""
        plt.figure(figsize=(10, 8))
        plt.imshow(weights, cmap='coolwarm')
        plt.colorbar()
        plt.xlabel('Input Units')
        plt.ylabel('Output Units')
        plt.title(title)
        plt.show()

    @staticmethod
    def plot_sequence_prediction(y_true, y_pred, title="Sequence Prediction"):
        """绘制序列预测结果"""
        plt.figure(figsize=(12, 6))

        # 如果是多输出，取第一个输出维度
        if y_true.ndim == 3:
            y_true = y_true[:, :, 0]
            y_pred = y_pred[:, :, 0]

        # 取第一个样本
        y_true = y_true[0]
        y_pred = y_pred[0]

        plt.plot(y_true, label='True')
        plt.plot(y_pred, label='Predicted')
        plt.xlabel('Time Step')
        plt.ylabel('Value')
        plt.title(title)
        plt.legend()
        plt.grid(True)
        plt.show()
