__all__ = [
    "Layer",
    "ReshapeLayer",
    "FuncLayer",
    "FullConnectedLayer",
    "MeanPoolingLayer",
    "ConvolutionLayer",
]

import functools
import numpy as np
import warnings
from abc import abstractmethod
from typing import Optional, Iterable

from funcs import Func


class Layer:
    @abstractmethod
    def __call__(self, x: np.ndarray) -> np.ndarray:
        pass

    @abstractmethod
    def backward(self, d: np.ndarray) -> np.ndarray:
        pass


# 变形层


class ReshapeLayer(Layer):
    def __init__(self, from_shape, to_shape):
        self.from_shape = from_shape
        self.to_shape = to_shape

    def __call__(self, x: np.ndarray) -> np.ndarray:
        if self.from_shape is None:
            self.from_shape = x.shape
        return x.reshape(self.to_shape)

    def backward(self, d: np.ndarray) -> np.ndarray:
        return d.reshape(self.from_shape)


# 激活函数层


class FuncLayer(Layer):
    def __init__(self, activate_fn: Func):
        self.f = activate_fn
        self.z: np.ndarray = None

    def __call__(self, x: np.ndarray) -> np.ndarray:
        self.z = x
        return self.f(x)

    def backward(self, dc_da: np.ndarray) -> np.ndarray:
        da_dz = self.f.derivate(self.z)
        if self.f.jacobin:
            # 如果求导结果只能表示成雅克比矩阵，得使用矩阵乘法
            dc_dz = dc_da.dot(da_dz.T)
        else:
            # 求导结果为对角矩阵，可以采用哈达马积（逐值相乘）来简化运算
            dc_dz = dc_da * da_dz
        return dc_dz


# 全连接层


class FullConnectedLayer(Layer):
    def __init__(self, input_size, output_size):
        self.i_size = input_size
        self.o_size = output_size
        if self.i_size is not None:
            self.__init(self.i_size)

    def __init(self, input_size):
        self.i_size = input_size
        self.w = np.random.normal(
            loc=0.0, scale=1.0, size=(self.i_size, self.o_size))
        self.b = np.random.normal(loc=0.0, scale=1.0, size=(1, self.o_size))
        self.x: np.ndarray = None  # input

    def __call__(self, x: np.ndarray) -> np.ndarray:
        x = x.reshape(1, -1)
        # 如果 self.i_size 还没有确定，则根据x.shape来初始化
        if self.i_size is None:
            self.__init(x.shape[1])
        self.x = x
        self.z = x.dot(self.w) + self.b
        return self.z

    def backward(self, dc_dz: np.ndarray) -> np.ndarray:
        dc_dx = dc_dz.dot(self.w.T)
        self.w += self.x.T.dot(dc_dz)
        self.b += dc_dz
        return dc_dx


# 池化层
# 池化层的难点在于处理正反向传播时索引的对应关系
# 均值池化层实现起来比最大值池化层更简单（尤其是涉及到多个channel的）


class MeanPoolingLayer(Layer):
    def __init__(self, kernel_size: int, stride: int):
        self.ks = kernel_size
        self.kernel_shape = (kernel_size, kernel_size)
        self.channels: int = None
        self.stride = stride
        self.input_shape: tuple = None  # row_cnt,col_cnt,channels
        self.target_shape: tuple = None  # 目标的shape

    def __call__(self, mat: np.ndarray) -> np.ndarray:
        self.input_shape = mat.shape
        self.channels = mat.shape[2]
        row, col = mat.shape[0], mat.shape[1]
        (kr, kc), s = self.kernel_shape, self.stride
        self.target_shape = ((row - kr) // s + 1, (col - kc) // s + 1, self.channels)
        target = np.zeros(self.target_shape)
        for i in range(self.target_shape[0]):
            for j in range(self.target_shape[1]):
                r, c = i * s, j * s
                target[i, j] = np.average(mat[r:r + kr, c:c + kc], axis=(0, 1))
        return target

    def backward(self, d_out: np.ndarray) -> np.ndarray:
        d_input = np.zeros(self.input_shape)
        n = self.kernel_shape[0] * self.kernel_shape[1]
        d_mat = d_out / n  # mean-pooling 求导后恰好是 1/n
        (kr, kc), s = self.kernel_shape, self.stride
        for i in range(self.target_shape[0]):
            for j in range(self.target_shape[1]):
                r, c = i * s, j * s
                d_input[r:r + kr, c:c + kc] += d_mat[i, j]
        return d_input


# 卷积层

class ConvolutionLayer(Layer):
    def __init__(self, in_channels, out_channels, kernel_size, stride):
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.ks = kernel_size
        self.kernel_shape = (kernel_size, kernel_size)
        self.stride = stride
        self.x: Optional[np.ndarray] = None  # input
        # 卷积核: row,col,channel 顺序
        # 共有 out_channels 个 (row,col,in_channel) 的 kernels
        self.kernel = np.random.normal(loc=0.0, scale=1.0, size=(
            kernel_size, kernel_size, in_channels, out_channels,))
        # 每个卷积核共用一个 bias, 总共有 out_channels 个 biases
        self.bias = np.random.normal(loc=0.0, scale=1.0, size=(out_channels,))

    def check_x_mat_shape(self, x_mat):
        '''
            要求卷积核在卷积过程中可以把矩阵铺满(stride空隙不算)
            右侧（下侧）不能有多余的列（行）
            如 28x28 不能用(5x5,stride=2)的卷积核，因为它只能覆盖(27x27)
        '''
        row, col = x_mat.shape[0], x_mat.shape[1]
        k, s = self.ks, self.stride
        assert (row - k) // s * s + k == row
        assert (col - k) // s * s + k == col

    def __call__(self, x_mat: np.ndarray) -> np.ndarray:
        self.check_x_mat_shape(x_mat)
        self.x = x_mat
        return self.__conv(
            stride=self.stride,
            mat=x_mat,
            kernel=self.kernel,
            bias=None,
            einsum_formula="ijk,ijkl->l",
            out_ele_shape=[self.out_channels]
        )

    def backward(self, dc_dz: np.ndarray) -> np.ndarray:
        # 反向卷积的目标是dc_dz补0之后的矩阵（张量）
        # （padding + dilation）
        # 补0规则为：边缘padding kernel_size-1 层0；间隔处补 stride-1 层0
        # 只看横向，如果dc_dz有c列，那该矩阵有 2kernel_size+(m-1)stride-1 列
        # 反向卷积的stride固定为1
        (kr, kc, in_ch, out_ch), s = self.kernel.shape, self.stride
        dc_dz_with_zeros_shape = (
            2 * kr + (dc_dz.shape[0] - 1) * s - 1,
            2 * kc + (dc_dz.shape[1] - 1) * s - 1,
            dc_dz.shape[2]
        )
        D = np.zeros(dc_dz_with_zeros_shape)  # 为了简化，用D表示补充0之后的张量
        for i in range(dc_dz.shape[0]):
            for j in range(dc_dz.shape[1]):
                D[kr + i * s - 1, kc + j * s - 1] = dc_dz[i, j]
        # 求 dc_da（a指的是该层的输入self.x,因为习惯上称呼上一层的激活值为a[l-1]）
        # 注意stride(步长)是1
        # kernel[i,j,k,l]在正向推导时i表示row，j表示col，k表示in_ch，l表示out_ch
        # 反向推导时i表示row，j表示col，l表示in_ch，k表示out_ch，其余计算步骤和正向推导一致
        dc_da = self.__conv(
            stride=1,
            mat=D,
            kernel=self.kernel[::-1, ::-1],  # 注意不能漏了反向传播中卷积核的180度旋转 rot180(w)
            bias=None,
            einsum_formula="ijl,ijkl->k",
            out_ele_shape=[in_ch])
        # 求 dc_dw(即dc_d kernel)
        # 也是卷积，只不过是用 rot180(a_input) 对 D 卷积
        dc_dw = self.__conv(
            stride=1,
            mat=D,
            kernel=self.x[::-1, ::-1],
            bias=None,
            einsum_formula="ijl,ijk->kl",
            out_ele_shape=[in_ch, out_ch])
        # 求 dc_db
        dc_db = np.einsum("ijk->k", dc_dz)
        # 更新w（kernel）和b（bias），并返回 dc_da
        self.kernel += dc_dw
        self.bias += dc_db
        return dc_da

    def __conv(self,
               stride: int,
               mat: np.ndarray,  # shape=(row, col, in_ch)
               kernel: np.ndarray,  # shape=(k_row, k_col, in_ch, out_ch)
               bias: np.ndarray = None,  # shape=(out_ch,)
               einsum_formula: str = "ijk,ijkl->l",
               out_ele_shape: Iterable[int] = None) -> np.ndarray:
        '''
            einsum_formula:
                卷积核kernel对mat的某个子矩阵进行全卷积要使用这个爱因斯坦求和约定式子进行计算。
                卷积结束后得到一个 shape=(I,J) 的结果矩阵。
                矩阵的每一个元素不一定是值，有可能是一个张量,这需要要看 einsum_formula 的设置。
                结果矩阵本质上可以写成 shape=(I,J,...) 的张量

            out_ele_shape:
                注意 out_ele_shape 要与 einsum_formula 相对应
                out_ele_shape 表示作卷积后，结果矩阵中每个元素的shape
                out_ele_shape 会被用来构造结果张量。
                -------------------------------------------------------------
                举个例子：
                    "ijk,ijl->kl"，用这个式子卷积后结果矩阵的每个元素都是 shape=(K,L)
                    的矩阵，那么结果其实是一个 (I,J,K,L) 的4维张量，此时应该设置
                    out_ele_shape=[K,L]
                -------------------------------------------------------------
                如果是单通道卷积，则每个元素就是一个数值，应该设置 out_ele_shape=[]
                默认设置是针对正向传播的，此时out_ele_shape可以设置为None（只是为了方便）
        '''
        # 卷积运算 sub_np_tensor * kernel_np_tensor + bias
        if bias is None:
            def f(m):
                return np.einsum(
                    einsum_formula, m, kernel)
        else:
            def f(m):
                return np.einsum(einsum_formula, m, kernel) + bias
        row, col = mat.shape[0], mat.shape[1]
        s = stride  # 简写
        (kr, kc, *omit), s = kernel.shape, stride

        # out_ele_shape 默认为 (kernel.shape[2],)
        # 针对正向推导
        if out_ele_shape is None:
            assert len(kernel.shape) == 3
            out_ch = kernel.shape[-1]
            out_ele_shape = (out_ch,)

        target_shape = ((row - kr) // s + 1, (col - kc) // s + 1, *out_ele_shape)
        target = np.zeros(target_shape)
        for i in range(target_shape[0]):
            for j in range(target_shape[1]):
                r, c = i * s, j * s
                target[i, j] = f(mat[r:r + kr, c:c + kc])
        return target


# @deprecated:

# 2019-9-29 更新：
#
# 发现我以前对多通道卷积的理解有误

# 我之前以为输入 size 为 (inCh, width, height) ，输出通道为 outCh 的卷积层
# 使用 outCh 个 (kernelWidth,kernelHeight) 的卷积核，其中输入的的每一个通道
# 共享同样的卷积核参数。在 MNIST 数据集上做测试，这样虽然参数少，但确实有一定效果。
#
# 但实际上，正确的理解是“使用 outCh 个 (inCh, kernelWidth, kernelHeight) 的
# 卷积核”（Tensorflow，BigDL 里都是这样实现的，最近才注意到，于是我不得不重新思考），
# 参数总数整整多了 inCh 倍。
# ————————————————
# 版权声明：本文为CSDN博主「bitosky」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。
# 原文链接：https://blog.csdn.net/qq_36393962/article/details/99354969

class Deprecated_ConvolutionLayer(Layer):
    """deprecated
    """

    def __init__(self, in_channels, out_channels, kernel_size, stride):
        warnings.warn("{} is deprecated".format(self.__class__.__name__), DeprecationWarning)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.ks = kernel_size
        self.kernel_shape = (kernel_size, kernel_size)
        self.stride = stride
        self.x: Optional[np.ndarray] = None  # input
        # 卷积核: row,col,channel 顺序
        self.kernel = np.random.normal(loc=0.0, scale=1.0, size=(
            kernel_size, kernel_size, out_channels))
        self.bias = np.random.normal(loc=0.0, scale=1.0, size=(out_channels,))

    def check_x_mat_shape(self, x_mat):
        '''
            要求卷积核在卷积过程中可以把矩阵铺满(stride空隙不算)
            右侧（下侧）不能有多余的列（行）
            如 28x28 不能用(5x5,stride=2)的卷积核，因为它只能覆盖(27x27)
        '''
        row, col = x_mat.shape[0], x_mat.shape[1]
        k, s = self.ks, self.stride
        assert (row - k) // s * s + k == row
        assert (col - k) // s * s + k == col

    def __call__(self, x_mat: np.ndarray) -> np.ndarray:
        self.check_x_mat_shape(x_mat)
        self.x = x_mat
        return self.__conv(
            stride=self.stride,
            mat=x_mat,
            kernel=self.kernel,
            bias=None)

    def backward(self, dc_dz: np.ndarray) -> np.ndarray:
        # 反向卷积的目标是dc_dz补0之后的矩阵（张量）
        # （padding + dilation）
        # 补0规则为：边缘padding kernel_size-1 层0；间隔处补 stride-1 层0
        # 只看横向，如果dc_dz有c列，那该矩阵有 2kernel_size+(m-1)stride-1 列
        # 反向卷积的stride固定为1
        (kr, kc, ch), s = self.kernel.shape, self.stride
        dc_dz_with_zeros_shape = (
            2 * kr + (dc_dz.shape[0] - 1) * s - 1,
            2 * kc + (dc_dz.shape[1] - 1) * s - 1,
            dc_dz.shape[2]
        )
        D = np.zeros(dc_dz_with_zeros_shape)  # 为了简化，用D表示补充0之后的张量
        for i in range(dc_dz.shape[0]):
            for j in range(dc_dz.shape[1]):
                D[kr + i * s - 1, kc + j * s - 1] = dc_dz[i, j]
        # 求 dc_da（a指的是该层的输入self.x,因为习惯上称呼上一层的激活值为a[l-1]）
        # 注意stride(步长)是1，而且是每个通道卷积核分别对对应通道mat卷积，最后加和到一个通道
        # 卷积结果 dc_da_map 要加到 dc_da 结果的每一层
        # (仔细在草稿纸上计算不难证明这点)
        dc_da_map = self.__conv(
            stride=1,
            mat=D,
            kernel=self.kernel[::-1, ::-1],  # 注意不能漏了反向传播中卷积核的180度旋转 rot180(w)
            bias=None,
            einsum_formula="ijk,ijk->",
            out_channels=1)
        dc_da = np.repeat(dc_da_map.reshape(
            (dc_da_map.shape[0], -1, 1)), self.in_channels, axis=2)
        # 求 dc_dw(即dc_d kernel)
        # 也是卷积，只不过是用 rot180(a_input) 对 D 卷积
        dc_dw = self.__conv(
            stride=1,
            mat=D,
            kernel=self.x[::-1, ::-1],
            bias=None,
            einsum_formula="ijk,ijl->k",
            out_channels=self.kernel.shape[2])
        # 求 dc_db
        dc_db = np.einsum("ijk->k", dc_dz)
        # 更新w（kernel）和b（bias），并返回 dc_da
        self.kernel += dc_dw
        self.bias += dc_db
        return dc_da

    def __conv(self,
               stride: int,
               mat: np.ndarray,
               kernel: np.ndarray,
               bias: np.ndarray = None,
               einsum_formula: str = "ijk,ijl->l",
               out_channels: int = None):
        '''
            注意 out_channels 要与 einsum_formula 相对应
        '''
        # 卷积运算 sub_np_tensor * kernel_np_tensor + bias
        if bias is None:
            def f(m):
                return np.einsum(
                    einsum_formula, m, kernel)
        else:
            def f(m):
                return np.einsum(einsum_formula, m, kernel) + bias
        row, col = mat.shape[0], mat.shape[1]
        (kr, kc, ch), s = kernel.shape, stride
        # out_channels 默认为 kernel.shape[2]
        out_channels = ch if out_channels is None else out_channels
        target_shape = ((row - kr) // s + 1, (col - kc) // s + 1, out_channels)
        target = np.zeros(target_shape)
        for i in range(target_shape[0]):
            for j in range(target_shape[1]):
                r, c = i * s, j * s
                target[i, j] = f(mat[r:r + kr, c:c + kc])
        return target


# if __name__ == "__main__":
#     Deprecated_ConvolutionLayer(1, 1, 1, 1)

if __name__ == "__main__":
    # pl = MeanPoolingLayer(2,2)
    # # pl2 = MeanPoolingLayer2(3,1)
    # a = np.array(
    #     [
    #         [[1, 1], [2, 2], [3, 3], [4, 4]],
    #         [[0, 0], [1, 1], [0, 0], [1, 1]],
    #         [[5, 5], [0, 0], [9, 9], [1, 1]],
    #         [[6, 6], [3, 3], [7, 7], [1, 1]]
    #     ]
    # )
    # print("+++++++++++++++++")
    # print(a[::-1, ::-1])
    # print("+++++++++++++++++")
    # d = np.array(
    #     [
    #         [[0.1, 11], [0.2, 0.2]],
    #         [[0.3, 0.3], [0.4, 0.4]]
    #     ]
    # )

    # print("==========")
    # tmp1 = pl(a)
    # tmp2 = pl.backward(d)
    # # tmp12 = pl2(a)
    # # tmp22 = pl2.backward(d)
    # print(tmp1)
    # # print("+")
    # # print(tmp12)
    # # print(tmp1==tmp12)
    # print("==========")
    # print(tmp2)
    # # print("+")
    # # print(tmp22)
    # # print(tmp2==tmp22)
    # print("==========")

    # m1 = np.array(
    #     [
    #         [[1, 2], [1, 2]],
    #         [[1, 2], [1, 2]]
    #     ]
    # )
    # m2 = np.array(
    #     [
    #         [[1, 2, 3], [1, 2, 0]],
    #         [[1, 2, 3], [1, 2, 3]]
    #     ]
    # )

    # print(np.einsum("ijk,ijl->l", m1, m2))

    # x = np.array([
    #     [1, 2, 3],
    #     [4, 5, 6]
    # ])
    # xx = np.insert(x, [1, 2, 3], 0, axis=1)
    # print(xx)
    # print(np.repeat(x.reshape((x.shape[0], -1, 1)), 2, axis=2))
    # print(x.reshape((-1,)))

    a = np.array(
        [
            [[1, 1, 3], [2, 2, 3], [3, 3, 5], [4, 4, 5]],
            [[0, 0, 3], [1, 1, 3], [0, 0, 5], [1, 1, 5]],
            [[5, 5, 3], [0, 0, 3], [9, 9, 5], [1, 1, 5]],
            [[6, 6, 3], [3, 3, 3], [7, 7, 5], [1, 1, 5]]
        ]
    )

    label = np.array([[1, 0, 1, 1]])

    from funcs import sigmoid
    from lossfuncs import sse
    from nn import NN

    # conv = ConvolutionLayer(2,1,2,1)
    my_nn = NN((4, 4, 3), (1, 4))
    my_nn.set_layers([
        ConvolutionLayer(3, 6, 1, 1),
        FuncLayer(sigmoid),
        ConvolutionLayer(6, 2, 2, 1),
        FuncLayer(sigmoid),
        ReshapeLayer(None, (1, -1)),
        FullConnectedLayer(None, 4),
        FuncLayer(sigmoid),
    ])
    y1 = my_nn.forward(a)
    for i in range(20000):
        my_nn.train(a, label, sse, 0.1)
    y2 = my_nn.forward(a)
    print("before:", y1)
    print("after:", y2)
