'''
 * @ author     ：廖传港
 * @ date       ：Created in 2020/11/26 15:20
 * @ description：
 * @ modified By：
 * @ ersion     : 
 * @File        : model.py 
'''

import numpy as np
from com.lcg.version6 import Loading_pictures as lp

from numpy.linalg import norm


myDebug = 1
step = 0.1
# learn = 0.000000001
learn = 0.0000001

# region Activation Function
# 激活函数
class ActivationFunc:
    # 激活函数名称
    def __init__(self, funcName):

        self.funcName = funcName
        # #如果传输过来的是一个线性的函数，原数返回
        if funcName == 'linear':
            self.forwardFunc = self.forward_linear
            self.backwardFunc = self.backward_linear
        # s型的
        if funcName == 'sigmoid':
            self.forwardFunc = self.forward_sigmoid
            self.backwardFunc = self.backward_sigmoid

        if funcName == 'relu':
            self.forwardFunc = self.forward_relu
            self.backwardFunc = self.backward_relu

    def forward_linear(self, y0):

        return y0

    def backward_linear(self, y):

        return 1

    def forward_sigmoid(self, y0):

        return 1 / (1 + np.exp(-y0))

    def backward_sigmoid(self, y):

        return y * (1 - y)

    def forward_relu(self, y0):

        if y0 > 0:
            return y0
        return 0

    def backward_relu(self, y):

        if y > 0:
            return 1
        return 0

    def y0_y(self, y0):

        if (type(y0) is np.ndarray):  # and (self.funcName!='linear'):

            y = np.zeros(y0.shape)

            for i in range(len(y)):
                y[i] = self.forwardFunc(y0[i])

            return y

        else:
            return self.forwardFunc(y0)

    def dy_y0(self, y):

        if (type(y) is np.ndarray):  # and (self.funcName!='linear'):

            dy0 = np.zeros(y.shape)

            for i in range(len(y)):
                # dy0[i]=self.backwardFunc(y[i])
                dy0[i] = self.backwardFunc(y[i])

            # print('dy_dy',dy0)
            return dy0

        else:
            return self.backwardFunc(y)
# 交叉熵，交叉熵就是用来判定实际的输出与期望的输出的接近程度！交叉熵刻画的是实际输出（概率）与期望输出（概率）的距离，也就是交叉熵的值越小，两个概率分布就越接近。
class CrossEntropy:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        nx += + learn
        # nx += +0.001

        self.nx = nx
        self.ny = ny
        loss = np.sum(- ny * np.log(nx))
        return loss

    def backward(self):
        self.dnx = - self.ny / self.nx
        return self.dnx

# Softmax激活函数的交叉熵
class SoftmaxCrossEntropy:
    def __init__(self):
        self.nx = None
        self.px = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        # nx=np.array([0.1, 0.3, 0.5])

        # ny=np.array([0, 1, 0])

        nx += + learn
        # nx += +0.1

        # print('nx=',nx)

        self.nx = nx

        totalE = np.sum(np.exp(nx))

        self.px = np.exp(nx) / totalE
        self.ny = ny
        loss = np.sum(- ny * np.log(self.px))

        return loss

    def backward(self):
        self.dnx = self.px - self.ny
        return self.dnx

# MSE均方误差损失函数   具体网址：https://zhuanlan.zhihu.com/p/37217242
class MSE:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        self.nx = nx
        self.ny = ny
        loss = norm(nx - ny)
        return loss

    def backward(self):
        self.dnx = self.nx - self.ny
        return self.dnx

class DNN:

    def __init__(self):

        self.layers = []

    # 在前一层的基础上继续追加层
    def Add(self, layer):

        self.layers.append(layer)

    def Forward(self, X):

        if myDebug == 1:
            print('dnn forward==>X.shape=', X.shape)

        nL = len(self.layers)
        if myDebug == 1:
            print('dnn forward==>nL.shape=', nL)
        y = X

        for i in range(nL):
            y = self.layers[i].Forward(y)
        return y

        # 批预测
    def BatchPredict(self, X):

        self.predictY = []

        for k in range(X.shape[0]):
            self.predictY.append(self.Forward(X[k]))
            # print(np.array(self.predictY))

        self.predictY = np.array(self.predictY)

        return self.predictY

    def Compile(self, lossMethod='MSE'):
        # self.lossModel=Entropy()

        if lossMethod == 'MSE':
            self.lossModel = MSE()

        if lossMethod == 'CrossEntropy':
            self.lossModel = CrossEntropy()

        if lossMethod == 'SoftmaxCrossEntropy':
            self.lossModel = SoftmaxCrossEntropy()

    #拟合一层
    def FitOneRound(self, X, Y, iRound, epochs):

        loss = 0

        nL = len(self.layers)

        for k in range(X.shape[0]):

            y = self.Forward(X[k])

            loss += self.lossModel.loss(nx=y, ny=Y[k])

            dy = self.lossModel.backward()

            if (y.shape[0] == 1) and (np.linalg.norm(dy, 1) > 1):

                if y > Y[k]:
                    dy = 1
                else:
                    dy = -1

            step = 0.75 * (epochs - iRound) / epochs + 0.01

            for i in range(nL):
                dy = self.layers[nL - i - 1].Backward(dy)

                self.layers[nL - i - 1].Learn()

        if (iRound % (int(epochs / 10)) == 0) or (iRound == epochs - 1):
            print('round=', iRound, 'loss=', loss,'准确率=')
    #拟合Fit(X, Y, 100)
    def Fit(self, X, Y, epochs=1):

        for i in range(epochs):
            self.FitOneRound(X, Y, i, epochs)

# 卷积核
class DFilter(object):
    def __init__(self, fW, fH, fC, stride):
        self.fW = fW
        self.fH = fH
        self.fC = fC
        self.stride = stride  # 步幅
        self.bInitilize = False

    #向前传播
    def Forward(self, X):
        if self.bInitilize == False:
            self.InitlizeWeights(X.shape)
            # print(X.shape)

        stride = self.stride

        for i in range(self.yW):
            for j in range(self.yH):
                for k in range(self.yC):
                    # 卷积的核心其实就是:累乘然后累加的过程）
                    self.Y[i, j, k] = np.sum(
                        X[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH, k]
                        * self.W) + self.b

        # print(self.Y.shape)
        return self.Y

    # 初始化
    def InitlizeWeights(self, xShape):
        self.bInitilize = True
        self.xW = xShape[0]
        self.xH = xShape[1]
        self.yW = (int)((self.xW - self.fW) / self.stride + 1)  # 宽度方向步幅向前移动一步
        self.yH = (int)((self.xH - self.fH) / self.stride + 1)  # 高度方向步幅向前移动一步

        self.Y = np.random.rand(self.yW, self.yH)
        self.W = np.random.rand(self.fW, self.fH)  # 权重，3*3
        self.W = self.W / np.sum(self.W)
        self.b = 0.05 * np.random.random()
        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))

    # 反向传播
    def Backward(self, dy, X):

        self.dx = np.zeros((self.xW, self.xH))

        stride = self.stride

        for i in range(self.yW):
            for j in range(self.yH):
                self.db += dy[i][j]
                # 求导过程
                self.dW += dy[i][j] * X[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH]

                self.dx[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH] += dy[i][j] * self.W

                # self.dx=self.dx/norm(self.dx)

        return self.dx

    # 学习
    def Learn(self):

        self.W = self.W - step * self.dW
        self.b = self.b - step * 0.1 * self.db

        # 清零
        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))

# 三维卷积层
class CNN3D_MultiLayer(object):
    # CNN2D_MultiLayer(4, 4, 2, 10)-->(,,步幅,卷积核的数量)
    def __init__(self, fW, fH, fC, stride, nFilter):
        self.bInitilize = False
        self.fW = fW  # 宽
        self.fH = fH  # 高
        self.fC = fC  # 通道数
        self.stride = stride  # 步幅
        self.nFilter = nFilter  # 卷积核数量
        self.filters = []

        for i in range(nFilter):
            self.filters.append(DFilter(fW, fH, fC, stride))

    def InitlizeWeights(self, xShape):
        self.bInitilize = True    #初始化
        self.nLayer = xShape[0]   #层
        self.xW = xShape[1]
        self.xH = xShape[2]
        self.yW = (int)((self.xW - self.fW) / self.stride + 1)
        self.yH = (int)((self.xH - self.fH) / self.stride + 1)
        self.Y = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)
        self.dy = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)

    def Forward(self, X):

        self.X = X

        self.originalXShape = X.shape

        if len(X.shape) == 2:
            self.X = X.reshape(1, X.shape[0], X.shape[1])

        if self.bInitilize == False:
            self.InitlizeWeights(self.X.shape)

        for n in range(self.nLayer):

            for i in range(self.nFilter):
                self.Y[n * self.nFilter + i] = self.filters[i].Forward(self.X[n])

        return self.Y

    def Backward(self, dy):

        self.dx = np.zeros((self.nLayer, self.xW, self.xH))

        for n in range(self.nLayer):

            for i in range(self.nFilter):
                self.dx += self.filters[i].Backward(dy[n * self.nFilter + i], self.X[n])

                # self.dx=self.dx/np.linalg.norm(self.dx)

        self.dx = self.dx.reshape(self.originalXShape)

        return self.dx

    def Learn(self):

        for i in range(self.nFilter):
            self.filters[i].Learn()


if __name__ == '__main__':

    #数据准备
    n = 20
    # 产生标准正态分布的随机数或矩阵的函数
    X = np.random.randn(10, n, n, 3)
    # print("X.shape:",X.shape)
    # print("X:",X.shape)
    # 生成一个m×n×p×...的double类零矩阵
    Y = np.zeros((10,))
    for i in range(Y.shape[0]):
        Y[i] = i / 10
    # print("Y:",Y)
    # print("Y:", Y.shape)

    # # 训练开始
    dnn = DNN()
    # 添加卷积核高为4，宽为4，通道为3（R，G，B），步幅为2，数量为10
    dnn.Add(CNN3D_MultiLayer(4, 4, 3, 2, 10))
    # 添加池化层，高为4，宽为4
    dnn.Add(DMaxPooling2D(4, 4))
    dnn.Compile()  # 调用损失函数
    dnn.Fit(X, Y, 100)
