'''
 * @ author     ：廖传港
 * @ date       ：Created in 2020/12/4 19:01
 * @ description：
 * @ modified By：
 * @ ersion     : 
 * @File        : model.py 
'''

import numpy as np

from numpy.linalg import norm


myDebug = 0
step = 0.1
# learn = 0.000000001
learn = 0.0000001
# learn = 0.01

# 激活函数
class ActivationFunc:
    # 激活函数名称
    def __init__(self, funcName):
        self.funcName = funcName
        # #如果传输过来的是一个线性的函数，原数返回
        if funcName == 'linear':
            self.forwardFunc = self.forward_linear
            self.backwardFunc = self.backward_linear
        # s型的
        if funcName == 'sigmoid':
            self.forwardFunc = self.forward_sigmoid
            self.backwardFunc = self.backward_sigmoid

        if funcName == 'relu':
            self.forwardFunc = self.forward_relu
            self.backwardFunc = self.backward_relu

    def forward_linear(self, y0):
        return y0

    def backward_linear(self, y):
        return 1

    def forward_sigmoid(self, y0):
        return 1 / (1 + np.exp(-y0))
    def backward_sigmoid(self, y):
        return y * (1 - y)

    def forward_relu(self, y0):
        if y0 > 0:
            return y0
        return 0

    def backward_relu(self, y):
        if y > 0:
            return 1
        return 0

    def y0_y(self, y0):
        if (type(y0) is np.ndarray):  # and (self.funcName!='linear'):
            y = np.zeros(y0.shape)
            for i in range(len(y)):
                y[i] = self.forwardFunc(y0[i])
            return y
        else:
            return self.forwardFunc(y0)

    def dy_y0(self, y):
        if (type(y) is np.ndarray):  # and (self.funcName!='linear'):
            dy0 = np.zeros(y.shape)
            for i in range(len(y)):
                dy0[i] = self.backwardFunc(y[i])
            return dy0
        else:
            return self.backwardFunc(y)

#全连接层
class DDense:

    def __init__(self, outShape, activateFunc='linear'):
        self.activate = ActivationFunc(activateFunc)
        self.outShape = outShape
        self.bInitilize = False

    # 权值的初始化
    def InitlizeWeights(self, inShape):
        self.bInitilize = True
        self.inShape = inShape
        # w为权值
        self.W = np.random.rand(self.inShape, self.outShape)
        self.W = self.W / np.sum(self.W, axis=0)
        self.b = 0.1 * np.random.rand(self.outShape)

     # 向前传播过程
    def Forward(self, X):
        if self.bInitilize == False:
            self.InitlizeWeights(X.shape[0])
        self.X = X
        self.Y0 = np.dot(X, self.W) + self.b
        self.Y = self.activate.y0_y(self.Y0)
        return self.Y

    def FixedBackward_dy0(self, dy0):
        if (self.outShape > 1):  # and (self.funcName!='linear'):
            idx = np.where(self.Y0 > 2)
            dy0[idx] = 0.1
            return dy0

        else:
            if self.Y0 > 2:
                return 0.1
            return dy0


    # 误差的反向传播
    def Backward(self, dy):
        self.dy0 = self.activate.dy_y0(self.Y) * dy
        self.dw = np.outer(self.X, self.dy0)
        self.db = self.dy0
        self.dx = np.dot(self.W, self.dy0)
        return self.dx

    def Learn(self):

        self.W = self.W - step * self.dw
        self.b = self.b - step * 0.1 * self.db

# 卷积核
class DFilter:

    def __init__(self, fW, fH, stride):
        self.fW = fW
        self.fH = fH
        self.stride = stride
        self.bInitilize = False

    #权重的初始化
    def InitlizeWeights(self, xShape):
        self.bInitilize = True
        self.xW = xShape[0]
        self.xH = xShape[1]
        self.yW = (int)((self.xW - self.fW) / self.stride + 1)
        self.yH = (int)((self.xH - self.fH) / self.stride + 1)
        self.Y = np.random.rand(self.yW, self.yH)
        self.W = np.random.rand(self.fW, self.fH)
        self.W = self.W / np.sum(self.W)
        self.b = 0.05 * np.random.random()
        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))
    # 卷积前馈传播
    def Forward(self, X):
        if self.bInitilize == False:
            self.InitlizeWeights(X.shape)
        stride = self.stride
        for i in range(self.yW):
            for j in range(self.yH):
                self.Y[i, j] = np.sum(
                    X[i * stride:i * stride + self.fW,
                    j * stride:j * stride + self.fH] * self.W) + self.b     #公式：Y[]=感受视野*卷积核
        return self.Y

    # 卷积反馈传播
    def Backward(self, dy, X):
        self.dx = np.zeros((self.xW, self.xH))
        stride = self.stride
        for i in range(self.yW):
            for j in range(self.yH):
                self.db += dy[i][j]
                self.dW += dy[i][j] * X[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH]
                self.dx[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH] += dy[i][j] * self.W
        return self.dx

    #学习，注意初值初始化为零
    def Learn(self):
        self.W = self.W - step * self.dW
        self.b = self.b - step * 0.1 * self.db
        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))

class CNN2D:

    def __init__(self, fW, fH, stride, nFilter):
        self.bInitilize = False
        self.fW = fW
        self.fH = fH
        self.stride = stride
        self.nFilter = nFilter
        self.filters = []
        for i in range(nFilter):
            self.filters.append(DFilter(fW, fH, stride))

    def InitlizeWeights(self, xShape):
        self.bInitilize = True
        self.xW = xShape[0]
        self.xH = xShape[1]
        self.yW = (int)((self.xW - self.fW) / self.stride + 1)
        self.yH = (int)((self.xH - self.fH) / self.stride + 1)
        self.Y = np.random.rand(self.nFilter, self.yW, self.yH)
        self.dy = np.random.rand(self.nFilter, self.yW, self.yH)

    def Forward(self, X):

        if self.bInitilize == False:
            self.InitlizeWeights(X.shape)
        self.X = X
        for i in range(self.nFilter):
            self.Y[i] = self.filters[i].Forward(X)    #卷积
        return self.Y

    def Backward(self, dy):
        self.dx = np.zeros((self.xW, self.xH))
        for i in range(self.nFilter):
            self.dx += self.filters[i].Backward(dy[i])
        return self.dx
    def Learn(self):
        for i in range(self.nFilter):
            self.filters[i].Learn()

# # 二维的CNN
# class CNN2D:
#可以传输多层图片
class CNN2D_MultiLayer:

    # CNN2D_MultiLayer(4, 4, 2, 10)-->(,,步幅,卷积核的数量)
    def __init__(self, fW, fH, stride, nFilter):
        self.bInitilize = False
        self.fW = fW
        self.fH = fH
        self.stride = stride
        self.nFilter = nFilter
        self.filters = []  #过滤器
        for i in range(nFilter):
            self.filters.append(DFilter(fW, fH, stride))

    def InitlizeWeights(self, xShape):
        self.bInitilize = True
        print("xShape",xShape)
        self.nLayer = xShape[0]
        self.xW = xShape[1]
        self.xH = xShape[2]
        self.yW = (int)((self.xW - self.fW) / self.stride + 1)
        self.yH = (int)((self.xH - self.fH) / self.stride + 1)
        self.Y = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)
        self.dy = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)

    def Forward(self, X):
        self.X = X
        self.originalXShape = X.shape
        if len(X.shape) == 2:
            self.X = X.reshape(1, X.shape[0], X.shape[1])
        if self.bInitilize == False:
            self.InitlizeWeights(self.X.shape)
        for n in range(self.nLayer):
            for i in range(self.nFilter):
                self.Y[n * self.nFilter + i] = self.filters[i].Forward(self.X[n])
        return self.Y

    def Backward(self, dy):
        self.dx = np.zeros((self.nLayer, self.xW, self.xH))
        for n in range(self.nLayer):
            for i in range(self.nFilter):
                self.dx += self.filters[i].Backward(dy[n * self.nFilter + i], self.X[n])

        self.dx = self.dx.reshape(self.originalXShape)
        return self.dx

    def Learn(self):
        for i in range(self.nFilter):
            self.filters[i].Learn()

#偏平化张量（就是把图片的数据由多维拉成一维）
class DFlatten:

    # 扁平化
    def Forward(self, X):
        if myDebug == 1:
            print('flatten forward', X.shape)

        self.xShape = X.shape
        self.Y = X.reshape(-1)
        return self.Y
    # 还原矩阵形状
    def Backward(self, dy):
        self.dx = dy.reshape(self.xShape)
        return self.dx

    def Learn(self):
        nothing = 1

#最大池化的一层的函数
class DMaxPooling2D_OneLayer:

    def __init__(self, nW, nH):
        self.nW = nW
        self.nH = nH

    #真正的池化的地方
    def Forward(self, X):
        self.X = X
        self.yW = int(X.shape[0] / self.nW)
        self.yH = int(X.shape[1] / self.nH)
        self.Y = np.random.rand(self.yW, self.yH)
        for i in range(self.yW):
            for j in range(self.yH):
                self.Y[i, j] = X[i * self.nW:(i + 1) * self.nW, j * self.nW:(j + 1) * self.nH].max()
        return self.Y

    def Backward(self, dy):
        self.dx = np.zeros(self.X.shape)
        for i in range(self.yW):
            for j in range(self.yH):
                for m in range(self.nW):
                    for n in range(self.nH):
                        if self.X[i * self.nW + m, j * self.nH + n] == self.Y[i, j]:
                            self.dx[i * self.nW + m, j * self.nH + n] = dy[i, j]

        return self.dx

    def Learn(self):

        nothing = 1

#池化层（取最大值），可以同时池化多层
class DMaxPooling2D:

    def __init__(self, nW, nH):
        self.nW = nW
        self.nH = nH

    # 池化层前馈传输
    def Forward(self, X):
        self.X = X
        self.nLayer = X.shape[0]
        self.yW = int(X[0].shape[0] / self.nW)
        self.yH = int(X[0].shape[1] / self.nH)
        self.Y = np.random.rand(self.nLayer, self.yW, self.yH)
        self.poolingLayers = []
        for i in range(self.nLayer):
            oneLayer = DMaxPooling2D_OneLayer(self.nW, self.nH)#单独池化一层
            self.Y[i, :] = oneLayer.Forward(X[i])
            self.poolingLayers.append(oneLayer)  #吧池化的结果层层叠加
        return self.Y

    # 池化反馈传输
    def Backward(self, dy):
        self.dx = np.zeros(self.X.shape)
        for i in range(self.nLayer):
            self.dx[i, :] = self.poolingLayers[i].Backward(dy[i, :])

        return self.dx

    # 学习
    def Learn(self):

        nothing = 1

# 交叉熵，交叉熵就是用来判定实际的输出与期望的输出的接近程度！交叉熵刻画的是实际输出（概率）与期望输出（概率）的距离，也就是交叉熵的值越小，两个概率分布就越接近。
class CrossEntropy:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        nx += + learn
        # nx += +0.001

        self.nx = nx
        self.ny = ny
        loss = np.sum(- ny * np.log(nx))
        return loss

    def backward(self):
        self.dnx = - self.ny / self.nx
        return self.dnx

# Softmax激活函数的交叉熵
class SoftmaxCrossEntropy:
    def __init__(self):
        self.nx = None
        self.px = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):

        nx += + learn
        self.nx = nx
        totalE = np.sum(np.exp(nx))
        self.px = np.exp(nx) / totalE
        self.ny = ny
        loss = np.sum(- ny * np.log(self.px))
        return loss

    def backward(self):
        self.dnx = self.px - self.ny
        return self.dnx

# MSE均方误差损失函数   具体网址：https://zhuanlan.zhihu.com/p/37217242
class MSE:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        self.nx = nx
        self.ny = ny
        loss = norm(nx - ny)
        return loss

    def backward(self):
        self.dnx = self.nx - self.ny
        return self.dnx


class DNN:

    def __init__(self):
        self.layers = []

    # 在前一层的基础上继续追加层
    def Add(self, layer):
        self.layers.append(layer)

    def Forward(self, X):
        if myDebug == 1:
            print('dnn forward', X.shape)

        nL = len(self.layers)
        y = X
        for i in range(nL):
            y = self.layers[i].Forward(y)
        return y

    # 批预测
    def BatchPredict(self, X):

        self.predictY = []

        for k in range(X.shape[0]):
            self.predictY.append(self.Forward(X[k]))

        self.predictY = np.array(self.predictY)
        return self.predictY

    def Compile(self, lossMethod='MSE'):
        if lossMethod == 'MSE':
            self.lossModel = MSE()

        if lossMethod == 'CrossEntropy':
            self.lossModel = CrossEntropy()

        if lossMethod == 'SoftmaxCrossEntropy':
            self.lossModel = SoftmaxCrossEntropy()
    # 拟合一轮（X，Y，当前第几轮，总共多少轮）
    def FitOneRound(self, X, Y, iRound, epochs):

        loss = 0
        nL = len(self.layers)#多少层（指模型有多少层）

        for k in range(X.shape[0]):  #按照 X的图片个数进行循环  X[0]即X: (10, 28, 28)中的10
            y = self.Forward(X[k])    #前馈计算一次得到Y值

            #激活函数的选择关系到选择哪个激活函数中的Loss函数来计算误差
            loss += self.lossModel.loss(nx=y, ny=Y[k])    #计算损失（误差） loss（计算所得的y值，真实的Y值）
            # 激活函数的选择关系到选择哪个激活函数中的backward函数来计算误差反馈
            dy = self.lossModel.backward()

            '''
            linalg = linear（线性）+ algebra（代数），norm则表示范数。
            范数理论的一个推论：L1>=L2>=L无穷大
            关于范数的理论见：https://blog.csdn.net/lens___/article/details/85697175
            '''
            if (y.shape[0] == 1) and (np.linalg.norm(dy, 1) > 1):
                if y > Y[k]:
                    dy = 1
                else:
                    dy = -1

            step = 0.75 * (epochs - iRound) / epochs + 0.01  #每一步
            #按照模型的层数循环
            for i in range(nL):
                dy = self.layers[nL - i - 1].Backward(dy)
                self.layers[nL - i - 1].Learn()

        if (iRound % (int(epochs / 10)) == 0) or (iRound == epochs - 1):
            print('round=', iRound, 'loss=', loss)

    def Fit(self, X, Y, epochs=1):
        for i in range(epochs):
            self.FitOneRound(X, Y, i, epochs)  #拟合一次循环
