from abc import ABC, abstractmethod
import numpy as np
from method import Xavier, He

from tools import *


class LayerBase(ABC):

    """ 
        @抽象基类，所有层需要重载覆盖父类函数才可以定义
        @需要覆盖的函数:

            前向传播
            forward(self, x)

            后向传播
            backward(self, dout)

            参数更新，传进参数更新的对象，例如SGD,AdaGrad
            update(self, learning_object)

            参数保存
            saveParams(self)

            参数加载
            loadParams(self)
    """

    def __init__(self) -> None:
        pass

    @abstractmethod
    def forward(self, x):
        pass

    @abstractmethod
    def backward(self, dout):
        pass

    def __call__(self, x):
        return self.forward(x)

    @abstractmethod
    def update(self, learning_object):
        pass

    @abstractmethod
    def saveParams(self):
        pass

    @abstractmethod
    def loadParams(self, layer_params):
        pass


class AffineLayer(LayerBase):
    """
        全连接层
        @params
            output_size: 输出size
            method: 参数初始化方法
    """

    def __init__(self, output_size, method) -> None:
        self.method = method
        self.output_size = output_size
        self.is_init_params = False  # 参数是否已经初始化

        # 梯度
        self.db = None
        self.dW = None

    # 参数的形状会根据x以及输出形状动态生成
    def ininParams(self, x, output_size, method):
        self.init_method = eval(method)()
        self.w, self.b = self.init_method.getAffineParams(
            x.shape, output_size)
        self.is_init_params = True

    def forward(self, x):
        # 第一次调用需要初始化参数
        if not self.is_init_params:
            self.ininParams(x, self.output_size, self.method)

        # 保存x形状，方便反向传播
        self.x_shape = x.shape

        # 将高维度的数据转为二维
        x = x.reshape(x.shape[0], -1)
        self.x = x

        # 计算点乘
        return np.dot(self.x, self.w)+self.b

    def backward(self, dout):
        self.dW = np.dot(self.x.T, dout)
        self.db = np.sum(dout, axis=0)

        # 转为传进前向传播参数的形状
        return np.dot(dout, self.w.T).reshape(self.x_shape)  # dx

    # 根据更新方法进行动态调用函数
    def update(self, learning_object: object):
        self.w = learning_object(self.w, self.dW, self.__str__()+"w")
        self.b = learning_object(self.b, self.db, self.__str__()+"b")

    # 参数保存
    def saveParams(self):
        params_dict = {}
        layer_params = {}
        layer_params['w'] = self.w
        layer_params['b'] = self.b
        layer_params['method'] = self.method
        layer_params['output_size'] = self.output_size
        params_dict[self.__class__.__name__] = layer_params
        return params_dict

    # 参数加载
    def loadParams(self, layer_params):
        self.w = layer_params['w']
        self.b = layer_params['b']
        self.is_init_params = True


class ReluLayer(LayerBase):
    """
        Relu层
    """

    def __init__(self) -> None:
        self.mask = None

    def forward(self, x):
        self.mask = (x <= 0)
        x[self.mask] = 0
        return x

    def backward(self, dout):
        dout[self.mask] = 0
        return dout

    def update(self, learning_object: object):
        pass

    def saveParams(self):
        params_dict = {}
        params_dict[self.__class__.__name__] = {}
        return params_dict

    def loadParams(self, layer_params):
        pass


class SoftMaxLayer(LayerBase):

    """
        SoftMax层
    """

    # 要处理溢出问题
    def forward(self, x):
        if x.ndim == 2:
            x = x.T
            x = x - np.max(x, axis=0)
            y = np.exp(x) / np.sum(np.exp(x), axis=0)
            return y.T

        x = x - np.max(x)  # 溢出对策
        return np.exp(x) / np.sum(np.exp(x))

    def backward(self):
        pass

    def update(self, learning_object: object):
        pass

    def saveParams(self):
        pass

    def loadParams(self, layer_params):
        pass


class SoftMaxLossLayer(LayerBase):
    """
        SoftMax + 交叉熵 层
    """

    def __init__(self) -> None:
        self.train_result = None
        self.target = None
        self.softmax_layer = SoftMaxLayer()

    def cross_entropy_error(self, y, t):
        if y.ndim == 1:
            t = t.reshape(1, t.size)
            y = y.reshape(1, y.size)

        # 监督数据是one-hot-vector的情况下，转换为正确解标签的索引
        if t.size == y.size:
            t = t.argmax(axis=1)

        batch_size = y.shape[0]
        return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size

    def forward(self, x):
        self.train_result = self.softmax_layer(x)
        self.loss = self.cross_entropy_error(self.train_result, self.target)
        return self.loss

    def backward(self, dout):
        batch_size = self.target.shape[0]
        if self.target.size == self.train_result.size:  # 监督数据是one-hot-vector的情况
            dx = (self.train_result - self.target) / batch_size
        else:
            dx = self.train_result.copy()
            dx[np.arange(batch_size), self.target] -= 1
            dx = dx / batch_size
        return dx

    def __call__(self, x, t):
        self.target = t
        return self.forward(x)

    def update(self, learning_object: object):
        pass

    def saveParams(self):
        params_dict = {}
        layer_params = {}
        layer_params['is_last_layer'] = 1
        params_dict[self.__class__.__name__] = layer_params
        return params_dict

    def loadParams(self, layer_params):
        pass


class ConvolutionLayer(LayerBase):
    """
        卷积层
        @params:
            stride 步幅
            pad 填充
            method 参数初始化方法
            filter_size 滤波器数目
            channels 传进图片的通道数
    """

    def __init__(self, stride, pad, method, filter_size, filter_num, channels):
        self.stride = stride
        self.pad = pad
        self.method = method
        self.filter_size = filter_size
        self.filter_num = filter_num
        self.channels = channels

        self.initParams(method, filter_size, filter_num, channels)

        # 中间数据（backward时使用）
        self.x = None
        self.col = None
        self.col_W = None

        # 权重和偏置参数的梯度
        self.dW = None
        self.db = None

    def initParams(self, method, fiter_size, filter_num, channels):
        self.init_method = eval(method)()
        self.W, self.b = self.init_method.getParams(
            fiter_size, filter_num, channels)

    def forward(self, x):
        FN, C, FH, FW = self.W.shape
        N, C, H, W = x.shape
        out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
        out_w = 1 + int((W + 2*self.pad - FW) / self.stride)

        col = im2col(x, FH, FW, self.stride, self.pad)
        col_W = self.W.reshape(FN, -1).T

        out = np.dot(col, col_W) + self.b

        out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)

        self.x = x
        self.col = col
        self.col_W = col_W

        return out

    def backward(self, dout):
        FN, C, FH, FW = self.W.shape
        dout = dout.transpose(0, 2, 3, 1).reshape(-1, FN)

        self.db = np.sum(dout, axis=0)
        self.dW = np.dot(self.col.T, dout)
        self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)

        dcol = np.dot(dout, self.col_W.T)
        dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)

        return dx

    def update(self, learning_object: object):
        self.W = learning_object(self.W, self.dW, self.__str__()+"w")
        self.b = learning_object(self.b, self.db, self.__str__()+"b")

    def saveParams(self):
        params_dict = {}
        layer_params = {}
        layer_params['w'] = self.W
        layer_params['b'] = self.b
        layer_params['stride'] = self.stride
        layer_params['pad'] = self.pad
        layer_params['method'] = self.method
        layer_params['filter_size'] = self.filter_size
        layer_params['filter_num'] = self.filter_num
        layer_params['channels'] = self.channels
        params_dict[self.__class__.__name__] = layer_params
        return params_dict

    def loadParams(self, layer_params):
        self.W = layer_params['w']
        self.b = layer_params['b']


class PoolingLayer(LayerBase):
    """
        池化层
        @params:
            pool_h 池化核高
            pool_w 池化核宽
            stride 步幅
            pad 填充
    """

    def __init__(self, pool_h, pool_w, stride, pad):
        self.pool_h = pool_h
        self.pool_w = pool_w
        self.stride = stride
        self.pad = pad

        self.x = None
        self.arg_max = None

    def forward(self, x):
        N, C, H, W = x.shape
        out_h = int(1 + (H - self.pool_h) / self.stride)
        out_w = int(1 + (W - self.pool_w) / self.stride)

        col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
        col = col.reshape(-1, self.pool_h*self.pool_w)
        # 求一行最大数的索引
        arg_max = np.argmax(col, axis=1)
        out = np.max(col, axis=1)  # 求一行最大的数
        out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)

        self.x = x
        self.arg_max = arg_max

        return out

    def backward(self, dout):
        dout = dout.transpose(0, 2, 3, 1)

        pool_size = self.pool_h * self.pool_w
        dmax = np.zeros((dout.size, pool_size))
        dmax[np.arange(self.arg_max.size),
             self.arg_max.flatten()] = dout.flatten()
        dmax = dmax.reshape(dout.shape + (pool_size,))

        dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)
        dx = col2im(dcol, self.x.shape, self.pool_h,
                    self.pool_w, self.stride, self.pad)

        return dx

    def update(self, learning_object: object):
        pass

    def saveParams(self):
        params_dict = {}
        layer_params = {}
        layer_params['pool_h'] = self.pool_h
        layer_params['pool_w'] = self.pool_w
        layer_params['stride'] = self.stride
        layer_params['pad'] = self.pad
        params_dict[self.__class__.__name__] = layer_params
        return params_dict

    def loadParams(self, layer_params):
        pass


class DropOutLayer(LayerBase):
    """
        DropOut层
        @params
            dropout_ratio 丢弃参数的数量
    """

    def __init__(self, dropout_ratio=0.5):
        self.dropout_ratio = dropout_ratio
        self.mask = None

    # 训练时直接抛弃数据
    # 测试时则使参数降低一定比例
    def forward(self, x, train_flg=True):
        if train_flg:
            self.mask = np.random.rand(*x.shape) > self.dropout_ratio
            return x * self.mask
        else:
            return x * (1.0 - self.dropout_ratio)

    def backward(self, dout):
        return dout * self.mask

    def update(self, learning_object):
        pass

    def saveParams(self):
        params_dict = {}
        layer_params = {}
        layer_params['dropout_ratio'] = self.dropout_ratio
        params_dict[self.__class__.__name__] = layer_params
        return params_dict

    def loadParams(self, layer_params):
        pass

# 制作相应的layer，可根据实际传进的参数动态返回对象


class LayerFactory(object):
    """
        produce(layer,params_set)
        layer: 层的名称，应是字符串
        params_set: 该层构造所需要的所有参数，应是一个字典
    """

    def produce(self, layer: str, params_set) -> object:
        if params_set is None:
            params_set = {}
        return eval(layer)(**params_set)
