# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 15:25:10 2020

@author: 77994
"""


from keras.models import Sequential
from keras.layers import Dense

from keras import losses

import numpy as np

# import mnist


import os 

import random
import sys


import struct


from keras.utils import to_categorical

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K


from sklearn import metrics

from scipy import stats



from keras.models import Sequential
from keras.layers import Dense

from keras import losses

import numpy as np
import joblib


import math 

from numpy.linalg import norm
            
myDebug=0


step=0.1


#region Activation Function

#激活函数
class ActivationFunc:
    #激活函数名称
    def __init__(self,funcName):
        
        self.funcName=funcName
        #线性的
        if funcName=='linear':
            self.forwardFunc=self.forward_linear
            self.backwardFunc=self.backward_linear
        #s型的
        if funcName=='sigmoid':
            self.forwardFunc=self.forward_sigmoid
            self.backwardFunc=self.backward_sigmoid
            
        if funcName=='relu':
            self.forwardFunc=self.forward_relu
            self.backwardFunc=self.backward_relu
            
             
    
    
    def forward_linear(self,y0):        
        
        return y0
    
    def backward_linear(self,y):
        
        return 1
    
    
    def forward_sigmoid(self,y0):
        
        return 1/(1+np.exp(-y0))
    
    
    def backward_sigmoid(self,y):
        
        return y*(1-y)
    
    
    def forward_relu(self,y0):
        
        if y0>0:
            return y0
        return 0
    
    def backward_relu(self,y):
                 
         if y>0:
            return 1
         return 0
    
    
        
          
        
            
    def y0_y(self,y0):      
        
       
            
        if (type(y0) is np.ndarray): #and (self.funcName!='linear'):
            
            y=np.zeros(y0.shape)
            
            for i in range(len(y)):
                y[i]=self.forwardFunc(y0[i])
            
            return y
        
        else:
            return self.forwardFunc(y0)
            
          
    
    def dy_y0(self,y):
        
        
        if (type(y) is np.ndarray): # and (self.funcName!='linear'):
            
            dy0=np.zeros(y.shape)
            
            for i in range(len(y)):
                # dy0[i]=self.backwardFunc(y[i])
                dy0[i]=self.backwardFunc(y[i])
            
            
            # print('dy_dy',dy0)
            return dy0
        
        else:            
            return self.backwardFunc(y)
            
        
#endregion     
    


class DDense:
    
    def __init__(self,outShape,activateFunc='linear',bFixRange=False):
        
        
        self.activate=ActivationFunc(activateFunc)
        
        self.outShape=outShape
        
        self.bFixRange=bFixRange
        
        self.bInitilize=False        
    
         
    def InitlizeWeights(self,inShape):
              
        self.bInitilize=True
        
        self.inShape=inShape
         
        self.W=np.random.rand(self.inShape,self.outShape) 
        
        self.W=self.W/np.sum(self.W,axis=0)
     
        self.b=0.1*np.random.rand(self.outShape)   
    
    #向前传播
    def Forward(self,X):
        
        
        if self.bInitilize==False:
            self.InitlizeWeights(X.shape[0])
            
        
        self.X=X    

        
        self.Y0=np.dot(X,self.W)+self.b
        
        self.Y=self.activate.y0_y(self.Y0)    
        
        return self.Y
    
    
    
    def FixedBackward_dy0(self,dy0):       
               
        
        
        if (self.outShape>1): # and (self.funcName!='linear'):
            
            idx=np.where(self.Y0>2)
            
            dy0[idx]=0.1                
           
            return dy0
        
        else:            
            
            if self.Y0>2:
                return 0.1
            
            return dy0       
        
        
    #向后传播
    def Backward(self,dy):
        
        
        self.dy0=self.activate.dy_y0(self.Y)*dy
        
        # if self.bFixRange:
        #     self.dy0=self.FixedBackward_dy0(self.dy0)
            
        
        # self.dy0=self.activate.dy_y0(self.Y,dy)
        
        
        self.dw=np.outer(self.X,self.dy0)
        
        self.db=self.dy0         
         
        self.dx=np.dot(self.W,self.dy0)
        
        
        # self.dx=self.dx/np.linalg.norm(self.dx)
        
        return self.dx
    
    def Learn(self):        
    
        
        # print('dw norm=',np.linalg.norm(self.dw), 'db=',self.db)
        
       
        
        self.W=self.W-step*self.dw
        self.b=self.b-step*0.1*self.db
        
        
        # self.W=self.W-step*self.dw/np.linalg.norm(self.dw)
        
        # self.W=self.W/norm(self.W, axis=0, ord=2)
        # self.b=self.b-step*self.db/np.linalg.norm(self.db)
        
        






class DFilter:
    
    def __init__(self,fW,fH,stride):
                
        
        self.fW=fW
        
        self.fH=fH
        
        self.stride=stride      
        
        self.bInitilize=False
       
        
       
        
        
        
    def InitlizeWeights(self,xShape):
              
        self.bInitilize=True
        
        self.xW=xShape[0]
        
        self.xH=xShape[1]
        
        self.yW=(int)((self.xW-self.fW)/self.stride+1)
        
        self.yH=(int)((self.xH-self.fH)/self.stride+1)     
        
         
        self.Y=np.random.rand(self.yW,self.yH) 
        
        self.W=np.random.rand(self.fW,self.fH) 
        
        self.W=self.W/np.sum(self.W)
        
        self.b=0.05*np.random.random() 
        
        self.db=0  
        self.dW=np.zeros((self.fW,self.fH))
    
    
  
       
            
    
    def Forward(self,X):    
        
        
        if self.bInitilize==False:
           self.InitlizeWeights(X.shape)
            
        
            
        
        stride=self.stride
        
        for i in range(self.yW):
            for j in range(self.yH):   
            
                self.Y[i,j]=np.sum(X[i*stride:i*stride+self.fW,j*stride:j*stride+self.fH]*self.W)+self.b                
        
        
        return self.Y
    
    

                  
    def Backward(self,dy,X):
        
       
        self.dx=np.zeros((self.xW,self.xH))       
     
     
        stride=self.stride
         
        for i in range(self.yW):
            for j in range(self.yH):                
                           
              self.db+=dy[i][j]              
             
              
              self.dW+=dy[i][j]*X[i*stride:i*stride+self.fW,j*stride:j*stride+self.fH]
              
              self.dx[i*stride:i*stride+self.fW,j*stride:j*stride+self.fH]+=dy[i][j]*self.W                
 
        
        # self.dx=self.dx/norm(self.dx)
        
        return self.dx
    
    
    
    
    def Learn(self):
        
        
        self.W=self.W-step*self.dW
        self.b=self.b-step*0.1*self.db
        
        
        self.db=0  
        self.dW=np.zeros((self.fW,self.fH))
        
        # self.W=self.W-step*self.dW/np.linalg.norm(self.dW)
        # self.b=self.b-step*self.db/np.linalg.norm(self.db)
        
        # self.W=self.W/norm(self.W)






class CNN2D:
    
    def __init__(self, fW,fH,stride,nFilter):
        
        
        self.bInitilize=False
        
        self.fW=fW
        
        self.fH=fH
        
        self.stride=stride       
     
        
        self.nFilter=nFilter
        
        
                
        self.filters=[]
        
        for i in range(nFilter):
            self.filters.append(DFilter(fW,fH,stride))
            
            # print('filter ',i,self.filters[i].yW, self.filters[i].yH)
            
            
         
    
       
    def InitlizeWeights(self,xShape):
              
        self.bInitilize=True
        
        self.xW=xShape[0]
        
        self.xH=xShape[1]
        
        self.yW=(int)((self.xW-self.fW)/self.stride+1)
        
        self.yH=(int)((self.xH-self.fH)/self.stride+1)     
        
         
        self.Y=np.random.rand(self.nFilter,self.yW,self.yH) 
        
        self.dy=np.random.rand(self.nFilter,self.yW,self.yH) 
    
      
       
            
    
    def Forward(self,X):    
        
        
        if self.bInitilize==False:
           self.InitlizeWeights(X.shape)
        
        
        self.X=X           
                
        
        for i in range(self.nFilter):                 
            
            self.Y[i]=self.filters[i].Forward(X)
        
        return self.Y
    
    
    
    def Backward(self,dy):
        
        self.dx=np.zeros((self.xW,self.xH))
         
        for i in range(self.nFilter):
            
            self.dx+=self.filters[i].Backward(dy[i])    
            
    
        # self.dx=self.dx/np.linalg.norm(self.dx)
        
        return self.dx
    
    
    def Learn(self):
        
        for i in range(self.nFilter):
            
            self.filters[i].Learn()
        
        






class CNN2D_MultiLayer:
    
    def __init__(self, fW,fH,stride,nFilter):
        
        
        self.bInitilize=False
        
        self.fW=fW
        
        self.fH=fH
        
        self.stride=stride       
     
        
        self.nFilter=nFilter
        
        
                
        self.filters=[]
        
        for i in range(nFilter):
            self.filters.append(DFilter(fW,fH,stride))
            
            # print('filter ',i,self.filters[i].yW, self.filters[i].yH)
            
            
         
    
       
    def InitlizeWeights(self,xShape):
              
        self.bInitilize=True
        
        self.nLayer=xShape[0]
        
        self.xW=xShape[1]
        
        self.xH=xShape[2]
        
        self.yW=(int)((self.xW-self.fW)/self.stride+1)
        
        self.yH=(int)((self.xH-self.fH)/self.stride+1)     
        
         
        self.Y=np.random.rand(self.nLayer*self.nFilter,self.yW,self.yH) 
        
        self.dy=np.random.rand(self.nLayer*self.nFilter,self.yW,self.yH) 
    
      
  

            
    
    def Forward(self,X):    
        
        
               
                 
        self.X=X  
        
        self.originalXShape=X.shape
        
        if len(X.shape)==2:
            self.X=X.reshape(1,X.shape[0],X.shape[1])
            
        
        if self.bInitilize==False:
            self.InitlizeWeights(self.X.shape)
            
                      
        
        for n in range(self.nLayer):
        
            for i in range(self.nFilter):                 
                
                self.Y[n*self.nFilter+i]=self.filters[i].Forward(self.X[n])
        
        return self.Y
    
    
    
    def Backward(self,dy):
        
        self.dx=np.zeros((self.nLayer,self.xW,self.xH))
          
        
        for n in range(self.nLayer):
         
            for i in range(self.nFilter):
                
                self.dx+=self.filters[i].Backward(dy[n*self.nFilter+i],self.X[n])    
            
    
        # self.dx=self.dx/np.linalg.norm(self.dx)
        
        self.dx=self.dx.reshape(self.originalXShape)
        
        return self.dx
    
    
    def Learn(self):
        
        for i in range(self.nFilter):
            
            self.filters[i].Learn()
        
        
        

# df=DFlatten()

# x=np.random.rand(10,5,3)
# y=df.Forward(x)

class DFlatten:
    
    # def __init__(self):           
        

    
    
    def Forward(self,X):
        
        if myDebug==1:
            print('flatten forward',X.shape)
        
        
        self.xShape=X.shape
        
        self.Y=X.reshape(-1)        
       
        
        return self.Y
    
    
    
    def Backward(self,dy):
        
        self.dx=dy.reshape(self.xShape)
        
        return self.dx
    
    
    def Learn(self):
        
        nothing=1
        
        
       

class DMaxPooling2D_OneLayer:
    
    def __init__(self,nW,nH):
        self.nW=nW
        self.nH=nH           
        
        
        
    
    def Forward(self,X):
        
        
        self.X=X
        
        self.yW=int(X.shape[0]/self.nW)
        
        self.yH=int(X.shape[1]/self.nH)
        
        self.Y=np.random.rand(self.yW,self.yH)        
       
        
        for i in range(self.yW):
            for j in range(self.yH):
                            
                self.Y[i,j]=X[i*self.nW:(i+1)*self.nW, j*self.nW:(j+1)*self.nH].max()
              
       
        
        return self.Y
    
    
    
    def Backward(self,dy):
        
        self.dx=np.zeros(self.X.shape) 
        
        for i in range(self.yW):
            for j in range(self.yH):
                
                 for m in range(self.nW):
                    for n in range(self.nH):
                        
                        if self.X[i*self.nW+m,j*self.nH+n]==self.Y[i,j]:
                            
                            self.dx[i*self.nW+m,j*self.nH+n]=dy[i,j]                
             
              
        
        return self.dx
    
    
    def Learn(self):
        
        nothing=1
        
        


class DMaxPooling2D:
    
    def __init__(self,nW,nH):
        self.nW=nW
        self.nH=nH           
        
        
        
    
    def Forward(self,X):
        
        self.X=X
        
        self.nLayer=X.shape[0]
        
        self.yW=int(X[0].shape[0]/self.nW)
        
        self.yH=int(X[0].shape[1]/self.nH)
        
        self.Y=np.random.rand(self.nLayer,self.yW,self.yH)       
        
        self.poolingLayers=[]
       
        
        for i in range(self.nLayer):            
                            
            oneLayer=DMaxPooling2D_OneLayer(self.nW,self.nH)           
                        
            self.Y[i,:]=oneLayer.Forward(X[i]) 
            
            self.poolingLayers.append(oneLayer)
                   
        
        return self.Y
    
    
    
    def Backward(self,dy):
        
        self.dx=np.zeros(self.X.shape) 
        
        for i in range(self.nLayer):            
                            
            self.dx[i,:]=self.poolingLayers[i].Backward(dy[i,:])              
              
        
        return self.dx
    
    
    def Learn(self):
        
        nothing=1




class CrossEntropy:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        
        nx+=+0.000000001
         
        self.nx = nx
        self.ny = ny
        loss = np.sum(- ny * np.log(nx))
        return loss

    def backward(self):
        self.dnx = - self.ny / self.nx
        return self.dnx
    



class SoftmaxCrossEntropy:
    def __init__(self):
        self.nx = None
        self.px=None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        
        # nx=np.array([0.1, 0.3, 0.5])
        
        # ny=np.array([0, 1, 0])
        
        nx+=+0.000000001
        
        # print('nx=',nx)
        
        self.nx = nx       
        
    
        totalE=np.sum(np.exp(nx))
        
        self.px=np.exp(nx)/totalE
        self.ny = ny
        loss = np.sum(- ny * np.log(self.px))
        
        return loss

    def backward(self):
        self.dnx =  self.px-self.ny
        return self.dnx
    


class MSE:
    def __init__(self):
        self.nx = None
        self.ny = None
        self.dnx = None

    def loss(self, nx, ny):
        self.nx = nx
        self.ny = ny
        loss = norm(nx-ny)
        return loss

    def backward(self):
        self.dnx =  self.nx- self.ny
        return self.dnx
    
    
    




class DNN:
    
    def __init__(self):
        
        self.layers=[]
        
        
        
        
          
    def Add(self,layer):        
   
        
        self.layers.append(layer)
        
          
         
  
    
    def Forward(self,X):
        
        if myDebug==1:
            print('dnn forward',X.shape)
        
        nL=len(self.layers)
        
        y=X
        
        for i in range(nL):       
           
           y=self.layers[i].Forward(y)
        
        return y
           
        
        
    
   

    def BatchPredict(self,X):
        
        
        self.predictY=[]
        
        for k in range(X.shape[0]):
                
            self.predictY.append(self.Forward(X[k]))        
     
        
        self.predictY=np.array(self.predictY)
        
        return self.predictY
            
        
          
  
            
        
    def Compile(self,lossMethod='MSE'):
        # self.lossModel=Entropy()
        
        
        if lossMethod=='MSE':
            self.lossModel=MSE()
        
        if lossMethod=='CrossEntropy':
            self.lossModel=CrossEntropy()
            
        if lossMethod=='SoftmaxCrossEntropy':
            self.lossModel=SoftmaxCrossEntropy()            
            

            
        
    
    def FitOneRound(self,X,Y,iRound,epochs):
        
        
       

        loss=0
       
        
        nL=len(self.layers)
        
        for k in range(X.shape[0]):
            
            y=self.Forward(X[k])
                            
            
            
            loss+=self.lossModel.loss(nx=y,ny=Y[k])
            
            dy=self.lossModel.backward()
            
            
            # print('k=',k,'y=',y,'realY=',Y[k],'dy=',dy)
            # dy=np.min(-1,np.max(dy,1))
            
           
             # print('type y=',type(y),'shape y',y.shape)
                
               # if  (np.linalg.norm(dy,1)>1): 
            if ( y.shape[0]==1 ) and (np.linalg.norm(dy,1)>1):     
               
                if y>Y[k]:
                    dy=1
                else:
                    dy=-1
          
              
            step=0.75*(epochs-iRound)/epochs+0.01
                
            
            
            # if iRound%10==0:
            #     print('iter',iteration,'k= ',k,' dy=',dy,'y=',y,'Yk=',Y[k])
            
            
            for i in range(nL):
                
                dy=self.layers[nL-i-1].Backward(dy)
                
                self.layers[nL-i-1].Learn()
        
        if ( iRound%(int(epochs/10))==0 ) or (iRound==epochs-1):
            print('round=', iRound, 'loss=', loss)
        
           
        
        
        
    def Fit(self,X,Y,epochs=1):        
        
       
        
        for i in range(epochs):
            
            self.FitOneRound(X,Y,i,epochs)
        
        
      




n=20
X=np.random.randn(10,n,n)



Y=np.zeros((10,))

for i in range(10):
    
    Y[i]=i/10




# aa=X[0,:]

# print(aa.shape)



dnn=DNN()

# dnn.Add(CNN2D(3,3,2,10))


dnn.Add(CNN2D_MultiLayer(4,4,2,10))

dnn.Add(DMaxPooling2D(2,2))


yy=dnn.Forward(X[0])

dnn.Add(CNN2D_MultiLayer(3,3,1,5))

yy2=dnn.Forward(X[0])

dnn.Add(DFlatten())

# yy=dnn.Forward(X[0])

dnn.Add(DDense(10,'relu',bFixRange=False))

dnn.Add(DDense(1))

# dnn.Add(DDense(1,'relu'))

# yy=dnn.BatchPredict(X)

dnn.Compile()


# ratio=dnn.AdjustWeightByComputingRatio(X,Y)

# print(ratio)
# from sklearn.linear_model import LinearRegression


dnn.Fit(X, Y,100)

#
# model = LinearRegression()
# model.fit(X, Y)
# 将模型持久化保存
# joblib.dump(dnn, "linear.model")

import pickle


# 使用joblib保存模型并加载模型
f = open('picklemodel.pkl', 'wb')
pickle.dump(dnn, f)  # 模型保存
f.close()




# dnn=DNN()

# dnn.Add(CNN2D(3,3,2,10))

# dnn.Add(DFlatten())

# # yy=dnn.Forward(X[0])

# dnn.Add(DDense(10,'relu',bFixRange=True))

# dnn.Add(DDense(1))

# # dnn.Add(DDense(1,'relu'))

# # yy=dnn.BatchPredict(X)

# dnn.Compile()


# # ratio=dnn.AdjustWeightByComputingRatio(X,Y)

# # print(ratio)

# dnn.Fit(X, Y,500)



# dnn.Fit(X,Y,10)



# dF=DFilter(5,5,3,3,1)


# cnn2d=CNN2D(5,5,3,3,1,10)

# cY=cnn2d.Forward(X[0,:])



# filter0=DFilter(5,5,3,3,1)

# ccy=filter0.Forward(X[:,:,0])
# dnn=DNN()

# dnn.Add(cnn2d)

# yy=dnn.Forward(X)


# shapeX=np.array((2,3),int)

# print(shapeX)

# x=np.zeros(shapeX)



    
# W=np.array([[1,1],[2,3]])
        
# W=W/np.sum(W,axis=0)

# X=np.array([1,2])
# y=np.dot(X,W)
    
    
# xx=[1, -1, 3]

# xx=np.array(xx)

# len(xx)
# xx.shape
# xx=2

# type(xx)



# idx=np.where(xx>0 )

# xx[idx]=0
    

# if type(xx) is np.ndarray:
#     print('yes')


# xx=np.array([1, -2, 3])

# yy=np.array([1,2,3])

# xy=xx*yy

# X=np.array([1,1])
# W=np.array([[1,2,3],[2,3,3]])


# xw=np.dot(X,W)

# nnW=norm(W)
# # W=W/np.linalg.norm(W)

# nW=norm(W, axis=0, ord=2)

# W=W/nW

n = 20
A = np.random.randn(10, n, n)

B = np.zeros((10,))

for i in range(10):
    B[i] = i / 10

predictB = dnn.BatchPredict(A)

print("predictB=", predictB)
predictBB = np.array([np.argmax(one_hot) for one_hot in predictB])
print("predictBB=", predictBB)
realB = B
print("realB",realB)
realBB = np.array([np.argmax(one_hot) for one_hot in realB])
print("realBB", realBB)
from sklearn.metrics import accuracy_score

# accuracy_score(predictYY, realYY)

print("准确数=",accuracy_score(predictBB, realBB, normalize=False))
print("准确率=",accuracy_score(predictBB, realBB))
