import numpy as np
import matplotlib.pyplot as plt
from abc import ABC, abstractmethod


class Layer(ABC):
    """神经网络层的基类"""

    def __init__(self):
        self.input = None
        self.output = None

    @abstractmethod
    def forward(self, input_data):
        """前向传播"""
        pass

    @abstractmethod
    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        pass

    def get_params(self):
        """获取层的参数"""
        return {}

    def set_params(self, params):
        """设置层的参数"""
        pass


class Activation(ABC):
    """激活函数的基类"""

    @abstractmethod
    def forward(self, x):
        """前向传播"""
        pass

    @abstractmethod
    def backward(self, x, grad_output):
        """反向传播"""
        pass

    # 添加以下方法
    def get_params(self):
        """获取激活函数的参数（激活函数通常无参数，返回空字典）"""
        return {}


class Loss(ABC):
    """损失函数的基类"""

    @abstractmethod
    def compute(self, y_true, y_pred):
        """计算损失"""
        pass

    @abstractmethod
    def gradient(self, y_true, y_pred):
        """计算损失梯度"""
        pass


# 激活函数实现
class ReLU(Activation):
    """ReLU激活函数"""

    def forward(self, x):
        return np.maximum(0, x)

    def backward(self, x, grad_output):
        relu_grad = (x > 0).astype(float)
        return grad_output * relu_grad


class Sigmoid(Activation):
    """Sigmoid激活函数"""

    def forward(self, x):
        return 1 / (1 + np.exp(-x))

    def backward(self, x, grad_output):
        sigmoid = self.forward(x)
        return grad_output * sigmoid * (1 - sigmoid)


class Tanh(Activation):
    """Tanh激活函数"""

    def forward(self, x):
        return np.tanh(x)

    def backward(self, x, grad_output):
        tanh = self.forward(x)
        return grad_output * (1 - tanh ** 2)


# 损失函数实现
class MSE(Loss):
    """均方误差损失"""

    def compute(self, y_true, y_pred):
        return np.mean((y_true - y_pred) ** 2)

    def gradient(self, y_true, y_pred):
        return 2 * (y_pred - y_true) / y_true.size


class CrossEntropy(Loss):
    """交叉熵损失"""

    def compute(self, y_true, y_pred):
        # 添加epsilon以避免log(0)
        epsilon = 1e-12
        y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
        return -np.mean(y_true * np.log(y_pred))

    def gradient(self, y_true, y_pred):
        epsilon = 1e-12
        y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
        return (y_pred - y_true) / y_true.size


# 基础神经网络层
class Dense(Layer):
    """全连接层"""

    def __init__(self, input_size, output_size):
        super().__init__()
        # 初始化权重和偏置
        self.weights = np.random.randn(input_size, output_size) * np.sqrt(2 / input_size)
        self.bias = np.zeros((1, output_size))

    def forward(self, input_data):
        self.input = input_data
        self.output = np.dot(input_data, self.weights) + self.bias
        return self.output

    def backward(self, output_gradient, learning_rate):
        # 计算权重和偏置的梯度
        weights_gradient = np.dot(self.input.T, output_gradient)
        bias_gradient = np.sum(output_gradient, axis=0, keepdims=True)

        # 更新参数
        self.weights -= learning_rate * weights_gradient
        self.bias -= learning_rate * bias_gradient

        # 返回输入的梯度
        return np.dot(output_gradient, self.weights.T)

    def get_params(self):
        return {
            'weights': self.weights.copy(),
            'bias': self.bias.copy()
        }

    def set_params(self, params):
        self.weights = params['weights']
        self.bias = params['bias']


class Flatten(Layer):
    """展平层"""

    def forward(self, input_data):
        self.input_shape = input_data.shape
        self.output = input_data.reshape(input_data.shape[0], -1)
        return self.output

    def backward(self, output_gradient, learning_rate):
        return output_gradient.reshape(self.input_shape)


class Dropout(Layer):
    """Dropout层"""

    def __init__(self, rate):
        super().__init__()
        self.rate = rate
        self.mask = None

    def forward(self, input_data, training=True):
        if training:
            self.mask = np.random.binomial(1, 1 - self.rate, size=input_data.shape) / (1 - self.rate)
            return input_data * self.mask
        return input_data

    def backward(self, output_gradient, learning_rate):
        return output_gradient * self.mask
