# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 15:25:10 2020

@author: 77994
"""
import numpy as np
from com.lcg.version4 import Loading_pictures as lp

from numpy.linalg import norm


myDebug = 0

step = 0.1
# learn = 0.000000001
learn = 0.0000001


# region Activation Function

# 激活函数
class ActivationFunc:
    # 激活函数名称
    def __init__(self, funcName):

        self.funcName = funcName
        # #如果传输过来的是一个线性的函数，原数返回
        if funcName == 'linear':
            self.forwardFunc = self.forward_linear
            self.backwardFunc = self.backward_linear
        # s型的
        if funcName == 'sigmoid':
            self.forwardFunc = self.forward_sigmoid
            self.backwardFunc = self.backward_sigmoid

        if funcName == 'relu':
            self.forwardFunc = self.forward_relu
            self.backwardFunc = self.backward_relu

    def forward_linear(self, y0):

        return y0

    def backward_linear(self, y):

        return 1

    def forward_sigmoid(self, y0):

        return 1 / (1 + np.exp(-y0))

    def backward_sigmoid(self, y):

        return y * (1 - y)

    def forward_relu(self, y0):

        if y0 > 0:
            return y0
        return 0

    def backward_relu(self, y):

        if y > 0:
            return 1
        return 0

    def y0_y(self, y0):

        if (type(y0) is np.ndarray):  # and (self.funcName!='linear'):

            y = np.zeros(y0.shape)

            for i in range(len(y)):
                y[i] = self.forwardFunc(y0[i])

            return y

        else:
            return self.forwardFunc(y0)

    def dy_y0(self, y):

        if (type(y) is np.ndarray):  # and (self.funcName!='linear'):

            dy0 = np.zeros(y.shape)

            for i in range(len(y)):
                # dy0[i]=self.backwardFunc(y[i])
                dy0[i] = self.backwardFunc(y[i])

            # print('dy_dy',dy0)
            return dy0

        else:
            return self.backwardFunc(y)


# endregion

#全连接层
class DDense:

    def __init__(self, outShape, activateFunc='linear', bFixRange=False):

        self.activate = ActivationFunc(activateFunc)

        self.outShape = outShape

        self.bFixRange = bFixRange

        self.bInitilize = False

    # 权值的初始化
    def InitlizeWeights(self, inShape):

        self.bInitilize = True

        self.inShape = inShape
        # w为权值
        self.W = np.random.rand(self.inShape, self.outShape)

        self.W = self.W / np.sum(self.W, axis=0)

        self.b = 0.1 * np.random.rand(self.outShape)

     # 向前传播过程
    def Forward(self, X):

        if self.bInitilize == False:
            self.InitlizeWeights(X.shape[0])

        self.X = X

        self.Y0 = np.dot(X, self.W) + self.b

        self.Y = self.activate.y0_y(self.Y0)

        return self.Y

    #
    def FixedBackward_dy0(self, dy0):

        if (self.outShape > 1):  # and (self.funcName!='linear'):

            idx = np.where(self.Y0 > 2)

            dy0[idx] = 0.1

            return dy0

        else:

            if self.Y0 > 2:
                return 0.1

            return dy0


    # 误差的反向传播
    def Backward(self, dy):

        self.dy0 = self.activate.dy_y0(self.Y) * dy

        # if self.bFixRange:
        #     self.dy0=self.FixedBackward_dy0(self.dy0)

        # self.dy0=self.activate.dy_y0(self.Y,dy)

        self.dw = np.outer(self.X, self.dy0)

        self.db = self.dy0

        self.dx = np.dot(self.W, self.dy0)

        # self.dx=self.dx/np.linalg.norm(self.dx)

        return self.dx

    def Learn(self):

        # print('dw norm=',np.linalg.norm(self.dw), 'db=',self.db)

        self.W = self.W - step * self.dw
        self.b = self.b - step * 0.1 * self.db

        # self.W=self.W-step*self.dw/np.linalg.norm(self.dw)

        # self.W=self.W/norm(self.W, axis=0, ord=2)
        # self.b=self.b-step*self.db/np.linalg.norm(self.db)

# 卷积核
class DFilter:

    def __init__(self, fW, fH, stride):

        self.fW = fW

        self.fH = fH

        self.stride = stride

        self.bInitilize = False

    #权重的初始化
    def InitlizeWeights(self, xShape):

        self.bInitilize = True

        self.xW = xShape[0]

        self.xH = xShape[1]

        self.yW = (int)((self.xW - self.fW) / self.stride + 1)

        self.yH = (int)((self.xH - self.fH) / self.stride + 1)

        self.Y = np.random.rand(self.yW, self.yH)

        self.W = np.random.rand(self.fW, self.fH)

        self.W = self.W / np.sum(self.W)

        self.b = 0.05 * np.random.random()

        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))

    def Forward(self, X):

        if self.bInitilize == False:
            self.InitlizeWeights(X.shape)

        stride = self.stride

        for i in range(self.yW):
            for j in range(self.yH):
                self.Y[i, j] = np.sum(
                    X[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH] * self.W) + self.b

        return self.Y

    def Backward(self, dy, X):

        self.dx = np.zeros((self.xW, self.xH))

        stride = self.stride

        for i in range(self.yW):
            for j in range(self.yH):
                self.db += dy[i][j]

                self.dW += dy[i][j] * X[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH]

                self.dx[i * stride:i * stride + self.fW, j * stride:j * stride + self.fH] += dy[i][j] * self.W

                # self.dx=self.dx/norm(self.dx)

        return self.dx

    def Learn(self):

        self.W = self.W - step * self.dW
        self.b = self.b - step * 0.1 * self.db

        self.db = 0
        self.dW = np.zeros((self.fW, self.fH))

        # self.W=self.W-step*self.dW/np.linalg.norm(self.dW)
        # self.b=self.b-step*self.db/np.linalg.norm(self.db)

        # self.W=self.W/norm(self.W)


class CNN2D:

    def __init__(self, fW, fH, stride, nFilter):

        self.bInitilize = False

        self.fW = fW

        self.fH = fH

        self.stride = stride

        self.nFilter = nFilter

        self.filters = []

        for i in range(nFilter):
            self.filters.append(DFilter(fW, fH, stride))

            # print('filter ',i,self.filters[i].yW, self.filters[i].yH)

    def InitlizeWeights(self, xShape):

        self.bInitilize = True

        self.xW = xShape[0]

        self.xH = xShape[1]

        self.yW = (int)((self.xW - self.fW) / self.stride + 1)

        self.yH = (int)((self.xH - self.fH) / self.stride + 1)

        self.Y = np.random.rand(self.nFilter, self.yW, self.yH)

        self.dy = np.random.rand(self.nFilter, self.yW, self.yH)

    def Forward(self, X):

        if self.bInitilize == False:
            self.InitlizeWeights(X.shape)

        self.X = X

        for i in range(self.nFilter):
            self.Y[i] = self.filters[i].Forward(X)

        return self.Y

    def Backward(self, dy):

        self.dx = np.zeros((self.xW, self.xH))

        for i in range(self.nFilter):
            self.dx += self.filters[i].Backward(dy[i])

            # self.dx=self.dx/np.linalg.norm(self.dx)

        return self.dx

    def Learn(self):

        for i in range(self.nFilter):
            self.filters[i].Learn()

# # 二维的CNN
# class CNN2D:
#可以传输多层图片
class CNN2D_MultiLayer:

    # CNN2D_MultiLayer(4, 4, 2, 10)-->(,,步幅,卷积核的数量)
    def __init__(self, fW, fH, stride, nFilter):

        self.bInitilize = False

        self.fW = fW

        self.fH = fH

        self.stride = stride

        self.nFilter = nFilter

        self.filters = []

        for i in range(nFilter):
            self.filters.append(DFilter(fW, fH, stride))

            # print('filter ',i,self.filters[i].yW, self.filters[i].yH)

    def InitlizeWeights(self, xShape):

        self.bInitilize = True

        self.nLayer = xShape[0]

        self.xW = xShape[1]

        self.xH = xShape[2]

        self.yW = (int)((self.xW - self.fW) / self.stride + 1)

        self.yH = (int)((self.xH - self.fH) / self.stride + 1)

        self.Y = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)

        self.dy = np.random.rand(self.nLayer * self.nFilter, self.yW, self.yH)

    def Forward(self, X):

        self.X = X

        self.originalXShape = X.shape

        if len(X.shape) == 2:
            self.X = X.reshape(1, X.shape[0], X.shape[1])

        if self.bInitilize == False:
            self.InitlizeWeights(self.X.shape)

        for n in range(self.nLayer):

            for i in range(self.nFilter):
                self.Y[n * self.nFilter + i] = self.filters[i].Forward(self.X[n])

        return self.Y

    def Backward(self, dy):

        self.dx = np.zeros((self.nLayer, self.xW, self.xH))

        for n in range(self.nLayer):

            for i in range(self.nFilter):
                self.dx += self.filters[i].Backward(dy[n * self.nFilter + i], self.X[n])

                # self.dx=self.dx/np.linalg.norm(self.dx)

        self.dx = self.dx.reshape(self.originalXShape)

        return self.dx

    def Learn(self):

        for i in range(self.nFilter):
            self.filters[i].Learn()


# df=DFlatten()

# x=np.random.rand(10,5,3)
# y=df.Forward(x)
#偏平化张量（就是把图片的数据由多维拉成一维）
class DFlatten:

    # def __init__(self):           

    def Forward(self, X):
        if myDebug == 1:
            print('flatten forward', X.shape)

        self.xShape = X.shape

        self.Y = X.reshape(-1)

        return self.Y

    def Backward(self, dy):
        self.dx = dy.reshape(self.xShape)

        return self.dx

    def Learn(self):
        nothing = 1


class DMaxPooling2D_OneLayer:

    def __init__(self, nW, nH):
        self.nW = nW
        self.nH = nH

    def Forward(self, X):

        self.X = X

        self.yW = int(X.shape[0] / self.nW)

        self.yH = int(X.shape[1] / self.nH)

        self.Y = np.random.rand(self.yW, self.yH)

        for i in range(self.yW):
            for j in range(self.yH):
                self.Y[i, j] = X[i * self.nW:(i + 1) * self.nW, j * self.nW:(j + 1) * self.nH].max()

        return self.Y

    def Backward(self, dy):

        self.dx = np.zeros(self.X.shape)

        for i in range(self.yW):
            for j in range(self.yH):

                for m in range(self.nW):
                    for n in range(self.nH):

                        if self.X[i * self.nW + m, j * self.nH + n] == self.Y[i, j]:
                            self.dx[i * self.nW + m, j * self.nH + n] = dy[i, j]

        return self.dx

    def Learn(self):

        nothing = 1

#池化层（取最大值），可以同时池化多层
class DMaxPooling2D:

    def __init__(self, nW, nH):
        self.nW = nW
        self.nH = nH

    def Forward(self, X):

        self.X = X

        self.nLayer = X.shape[0]

        self.yW = int(X[0].shape[0] / self.nW)

        self.yH = int(X[0].shape[1] / self.nH)

        self.Y = np.random.rand(self.nLayer, self.yW, self.yH)

        self.poolingLayers = []

        for i in range(self.nLayer):
            oneLayer = DMaxPooling2D_OneLayer(self.nW, self.nH)

            self.Y[i, :] = oneLayer.Forward(X[i])

            self.poolingLayers.append(oneLayer)

        return self.Y

    def Backward(self, dy):

        self.dx = np.zeros(self.X.shape)

        for i in range(self.nLayer):
            self.dx[i, :] = self.poolingLayers[i].Backward(dy[i, :])

        return self.dx

    def Learn(self):

        nothing = 1

# 交叉熵，交叉熵就是用来判定实际的输出与期望的输出的接近程度！交叉熵刻画的是实际输出（概率）与期望输出（概率）的距离，也就是交叉熵的值越小，两个概率分布就越接近。
class CrossEntropy:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        nx += + learn
        # nx += +0.001

        self.nx = nx
        self.ny = ny
        loss = np.sum(- ny * np.log(nx))
        return loss

    def backward(self):
        self.dnx = - self.ny / self.nx
        return self.dnx

# Softmax激活函数的交叉熵
class SoftmaxCrossEntropy:
    def __init__(self):
        self.nx = None
        self.px = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        # nx=np.array([0.1, 0.3, 0.5])

        # ny=np.array([0, 1, 0])

        nx += + learn
        # nx += +0.1

        # print('nx=',nx)

        self.nx = nx

        totalE = np.sum(np.exp(nx))

        self.px = np.exp(nx) / totalE
        self.ny = ny
        loss = np.sum(- ny * np.log(self.px))

        return loss

    def backward(self):
        self.dnx = self.px - self.ny
        return self.dnx

# MSE均方误差损失函数   具体网址：https://zhuanlan.zhihu.com/p/37217242
class MSE:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        self.nx = nx
        self.ny = ny
        loss = norm(nx - ny)
        return loss

    def backward(self):
        self.dnx = self.nx - self.ny
        return self.dnx


class DNN:

    def __init__(self):

        self.layers = []

    # 在前一层的基础上继续追加层
    def Add(self, layer):

        self.layers.append(layer)

    def Forward(self, X):

        if myDebug == 1:
            print('dnn forward', X.shape)

        nL = len(self.layers)

        y = X

        for i in range(nL):
            y = self.layers[i].Forward(y)
        return y

        # 批预测
    def BatchPredict(self, X):

        self.predictY = []

        for k in range(X.shape[0]):
            self.predictY.append(self.Forward(X[k]))
            # print(np.array(self.predictY))

        self.predictY = np.array(self.predictY)

        return self.predictY

    def Compile(self, lossMethod='MSE'):
        # self.lossModel=Entropy()

        if lossMethod == 'MSE':
            self.lossModel = MSE()

        if lossMethod == 'CrossEntropy':
            self.lossModel = CrossEntropy()

        if lossMethod == 'SoftmaxCrossEntropy':
            self.lossModel = SoftmaxCrossEntropy()

    def FitOneRound(self, X, Y, iRound, epochs):

        loss = 0

        nL = len(self.layers)

        for k in range(X.shape[0]):

            y = self.Forward(X[k])

            loss += self.lossModel.loss(nx=y, ny=Y[k])

            dy = self.lossModel.backward()

            # print('k=',k,'y=',y,'realY=',Y[k],'dy=',dy)
            # dy=np.min(-1,np.max(dy,1))

            # print('type y=',type(y),'shape y',y.shape)

            # if  (np.linalg.norm(dy,1)>1):
            if (y.shape[0] == 1) and (np.linalg.norm(dy, 1) > 1):

                if y > Y[k]:
                    dy = 1
                else:
                    dy = -1

            step = 0.75 * (epochs - iRound) / epochs + 0.01

            # if iRound%10==0:
            #     print('iter',iteration,'k= ',k,' dy=',dy,'y=',y,'Yk=',Y[k])

            for i in range(nL):
                dy = self.layers[nL - i - 1].Backward(dy)

                self.layers[nL - i - 1].Learn()

        if (iRound % (int(epochs / 10)) == 0) or (iRound == epochs - 1):
            print('round=', iRound, 'loss=', loss)

    def Fit(self, X, Y, epochs=1):

        for i in range(epochs):
            self.FitOneRound(X, Y, i, epochs)


if __name__ == '__main__':

    k, j = lp.loaddata("D:/python/data/")
    X = k
    print("Y:", j.shape)
    Y = np.zeros((len(j),))
    for i in range(Y.shape[0]):
        Y[i] = j[i]
    # print("Y:", j.shape)
    # print("Y:", Y.shape)

    # n = 20
    # # 产生标准正态分布的随机数或矩阵的函数
    # X = np.random.randn(10, n, n)
    # # print("X:",X)
    # # 生成一个m×n×p×...的double类零矩阵
    # Y = np.zeros((10,))
    # for i in range(Y.shape[0]):
    #     Y[i] = i / 10
    # print("Y:",Y)
    # print("Y:", Y.shape)
    dnn = DNN()
    #添加卷积核高为4，宽为4，步幅为2，数量为10
    dnn.Add(CNN2D_MultiLayer(4, 4, 2, 10))
    #添加池化层，高为4，宽为4
    dnn.Add(DMaxPooling2D(2, 2))
    #向前传播
    yy = dnn.Forward(X[0])
    print(X[0].shape)
    dnn.Add(CNN2D_MultiLayer(3, 3, 1, 5))
    yy2 = dnn.Forward(X[0])
    dnn.Add(DFlatten())
    dnn.Add(DDense(10, 'relu', bFixRange=False))
    # dnn.Add(DDense(1))
    dnn.Add(DDense(1,'sigmoid'))

    dnn.Compile()   #编译
    # dnn.Fit(X[0:40], Y[0:40], 100) #None
    # dnn.Fit(X[0:150, :], Y[0:150,], 10)
    # dnn.Fit(X[0:150, :], Y[0:150, :], 200)
    dnn.Fit(X[0:100,:], Y[0:100], 100)


    # # save model 保存模型
    # f = open('D:/python/data/model2.h5', 'wb')
    # pickle.dump(dnn, f)
    # f.close()
    # # load model 加载模型
    # f = open('D:/python/data/model2.h5', 'rb')
    # dnnnew = pickle.load(f)
    # f.close()
    # # print("dnnnew----------------->",dnnnew)

    # predictY：预测Y BatchPredict批预测
    predictY = dnn.BatchPredict(X[100:150,])

    # print("predictY=", predictY)
    predictYY = np.array([np.argmax(one_hot) for one_hot in predictY])

    realY = Y[100:150,]

    realYY = np.array([np.argmax(one_hot) for one_hot in realY])

    from sklearn.metrics import accuracy_score

    # accuracy_score(predictYY, realYY)

    print("准确数=",accuracy_score(predictYY, realYY, normalize=False))
    print("准确率=",accuracy_score(predictYY, realYY))