# coding:utf-8
# Author : hiicy redldw
# Date : 2019/03/07
import numpy as np

"""
"""
'''
conv
pool
conv
pool
dense
dense
'''


class CNNBP(object):
    def __init__(self, units: list, types: list):
        """
        units:
        types:
        """
        self.units = units
        self.numOflayers = len(units)


class Conv2D(object):
    def __init__(self, filters, kernel_size=(3, 3), shape=None, stride=1, pad="valid"):
        self.shape = shape
        self.batchsize = shape[0]
        self.filters = filters
        self.kernel_size = kernel_size
        self.pad = pad
        self.stride = stride

        # weights:channel*filter
        # 初始化权重，偏置
        self.weights = np.random.standard_normal((kernel_size[0], kernel_size[1], shape[-1], self.filters))
        self.biass = np.random.standard_normal(self.filters)

        # FIXME:输出shape？改进：不加总数量
        if self.pad == "valid":
            self.eta = np.zeros((shape[0], (shape[1] - kernel_size[0] + 1) // stride,
                                 (shape[2] - kernel_size[0] + 1) // stride, self.filters))
        if self.pad == "same":#REW:特征图大小的计算
            self.eta = np.zeros((shape[0], (shape[1] // stride),
                                 shape[2] // stride, self.filters))
        print('self.eta.shape', self.eta.shape)
        self.dw = np.zeros(self.weights.shape)
        self.db = np.zeros(self.biass.shape)
        self.output_shape = self.eta.shape

        if (shape[1] - kernel_size[0]) % stride != 0:
            print('input tensor width can\'t fit stride')
        if (shape[2] - kernel_size[1]) % stride != 0:
            print('input tensor height can\'t fit stride')

    def forward(self, x):  # x 四维
        # REW:一个通道一个权重
        col_weights = self.weights.reshape([-1, self.filters])
        print("x.shape", x.shape)
        if self.pad == 'same':
            # x是多维张量
            x = np.pad(x, (
                (0, 0), (self.kernel_size[0] // 2, self.kernel_size[0] // 2),
                (self.kernel_size[1] // 2, self.kernel_size[1] // 2), (0, 0)),
                       'constant', constant_values=0)
        print("pad x shape", x.shape)
        self.col_image = []
        conv_out = np.zeros(self.eta.shape)  # 输出的形态
        for i in range(self.batchsize):
            img_i = x[i][np.newaxis, :]  # np.newaxis添加维度
            # 得到该图划分后的各个小图，未卷积
            self.col_image_i = im2col(img_i, self.kernel_size, self.stride)
            print('self.col_image_i.shape', self.col_image_i.shape)
            # FIXME:卷积转成矩阵乘法的trick,并转化成输出shape
            conv_out[i] = np.reshape(np.dot(self.col_image_i, col_weights) + self.biass, self.eta[0].shape)
            self.col_image.append(self.col_image_i)
        self.col_image = np.array(self.col_image)
        return conv_out

    def gradient(self, eta):
        self.eta = eta
        # eta == delta
        col_eta = np.reshape(eta, [self.batchsize, -1, self.filters])
        # 本层的求导操作
        for i in range(self.batchsize):
            self.dw += np.dot(self.col_image[i].T, col_eta[i]).reshape(self.weights.shape)
        self.db += np.sum(col_eta[i], axis=(0, 1))  # 先按0求和，再降维后按1求和

        if self.pad == 'valid':  # FAQ:
            pad_eta = np.pad(self.eta, (
                (0, 0), (self.kernel_size[0] - 1, self.kernel_size[0] - 1),
                (self.kernel_size[1] - 1, self.kernel_size[1] - 1), (0, 0)),
                             'constant', constant_values=0)
        if self.pad == 'same':
            pad_eta = np.pad(self.eta, (
                (0, 0), (self.kernel_size // 2, self.kernel_size // 2),
                (self.kernel_size // 2, self.kernel_size // 2), (0, 0)),
                             'constant', constant_values=0)

        flip_weights = np.flipud(np.fliplr(self.weights))  # 左右，上下翻转180度
        flip_weights = flip_weights.swapaxes(2, 3)  # FAQ:交换？
        col_flip_weights = flip_weights.reshape([-1, self.shape[-1]])  # 反向 按输入通道来
        col_pad_eta = np.array(
            [im2col(pad_eta[i][np.newaxis, :], self.kernel_size, self.stride) for i in range(self.batchsize)])
        next_eta = np.dot(col_pad_eta, col_flip_weights)
        next_eta = np.reshape(next_eta, self.shape)
        return next_eta

    def backward(self, lr=0.00001, weight_decay=0.0004):
        # weight_decay = L2 regularization
        self.weights *= (1 - weight_decay)
        self.biass *= (1 - weight_decay)
        self.weights -= lr * self.dw
        self.biass -= lr * self.db

        self.dw = np.zeros(self.weights.shape)
        self.db = np.zeros(self.biass.shape)


# REW:卷积操作，把图片按卷积核大小进行划分
def im2col(image, ksize, stride):
    image_col = []
    for i in range(0, image.shape[1] - ksize[0] + 1, stride):
        for j in range(0, image.shape[2] - ksize[1] + 1, stride):
            # 一张图降成一维
            col = image[:, i:i + ksize[0], j:j + ksize[1], :].reshape([-1])  # 3*3*3没用到filter
            image_col.append(col)
    image_col = np.array(image_col)
    return image_col


class AvgPooling:
    def __init__(self, shape, ksize=3,stride=2):
        self.input_shape = shape
        self.ksize = ksize
        self.stride = stride
        self.output_channels = shape[-1]
        # FAQ:
        self.integral = np.zeros(shape)
        self.index = np.zeros(shape)

    def gradient(self,eta):
        # 按row，col 分别upsample
        next_eta=np.repeat(eta,self.stride,axis=1)
        next_eta = np.repeat(next_eta,self.stride,axis=2)

        next_eta = next_eta*self.index

        return next_eta/(self.ksize*self.ksize)

    def forward(self,x):
        for b in range(x.shape[0]):
            for c in range(self.output_channels):
                for i in range(x.shape[1]):
                    row_sum=0
                    for j in range(x.shape[2]):
                        # FAQ:
                        row_sum+=x[b,i,j,c]
                        if i==0:
                            self.integral[b,i,j,c]=row_sum
                        else:
                            self.integral[b,i,j,c]=self.integral[b,i-1,j,c]+row_sum
        out = np.zeros((x.shape[0],x.shape[1]//self.stride,
                        x.shape[2]//self.stride,self.output_channels))

        # integral calculate pooling
        for b in range(x.shape[0]):
            for c in range(self.output_channels):
                for i in range(0,x.shape[1],self.stride):
                    for j in range(0,x.shape[2],self.stride):
                        self.index[b,i:i+self.ksize,j:j+self.ksize,c]=1
                        if i==0 and j==0:
                            out[b,i//self.stride,j//self.stride,c]=self.integral[b,self.ksize-1,self.ksize-1,c]
                        elif i==0:
                            #REW:i//self.stride 一定范围
                            out[b,i//self.stride,j//self.stride,c]=self.integral[b,1,j+self.ksize-1,c] - \
                                                                    self.integral[b,1,j-1,c]
                        elif j==0:
                            out[b,i//self.stride,j//self.stride,c]=self.integral[b,i+self.ksize-1,1,c] - \
                                                                    self.integral[b,i-1,1,c]
                        else:
                            out[b, i // self.stride, j // self.stride, c] = self.integral[
                                                                                b, i + self.ksize - 1, j+self.ksize-1, c] - \
                                                                            self.integral[
                                                                                b, i - 1, j+self.ksize-1, c] - \
                                                                            self.integral[
                                                                                b,i+self.ksize-1,j-1,c] - \
                                                                            self.integral[b,i-1,j-1,c]
        out //= (self.ksize*self.ksize)
        return out


class MaxPooling:
    def __init__(self, shape, ksize=2, stride=2):
        self.input_shape = shape
        self.ksize = ksize
        self.stride = stride
        self.output_channels = shape[-1]
        self.index = np.zeros(shape)
        self.output_shape = [shape[0], shape[1] / self.stride, shape[2] / self.stride, self.output_channels]

    def forward(self, x):
        out = np.zeros([x.shape[0], x.shape[1] / self.stride, x.shape[2] / self.stride, self.output_channels])

        for b in range(x.shape[0]):
            for c in range(self.output_channels):
                for i in range(0, x.shape[1], self.stride):
                    for j in range(0, x.shape[2], self.stride):
                        # REW: i:i+self.ksize 移动trick
                        out[b, i / self.stride, j / self.stride, c] = np.max(
                            x[b, i:i + self.ksize, j:j + self.ksize, c])  # 选择最大值 记录最大值索引
                        index = np.argmax(x[b, i:i + self.ksize, j:j + self.ksize, c])
                        self.index[b, i + index / self.stride, j + index % self.stride, c] = 1
        return out

    def gradient(self, eta):
        return np.repeat(np.repeat(eta, self.stride, axis=1), self.stride, axis=2) * self.index

class Dense:
    def __init__(self, units, activation):
        pass


img = np.ones((1, 32, 32, 3))
img *= 2
conv = Conv2D(12, (3, 3), img.shape)
next = conv.forward(img)
next1 = next.copy() + 1
conv.gradient(next1 - next)
print('conv.dw', conv.dw, conv.dw.shape, '\n\t\n')
print('conv.db', conv.db)
conv.backward()
print('conv.weights', conv.weights, '\n\n\n')
print("conv.biass", conv.biass, '\n\n\n')
