import math
import mymath
import numpy as np
import functools
import numpy.linalg as LA


# basic functions
def I(m,n):
    return np.mat(np.ones((m,n)))

def R(m,n):
    return np.mat(np.random.randn(m,n))

def O(m,n):
    return np.mat(np.zeros((m,n)))

def pm(*Ms):
    '''Ms: matrixes with same size(shape)
pointwise multiply like .* in Matlab
for example, pm(A,B,C)=A.*B.*C
'''
    return functools.reduce(np.multiply, Ms)

def mat_map(f,*As):
    # matrices (np.matrix) in As have the same size
    rn, cn = As[0].shape    # size (shape) of matrix
    return np.mat([[f(*(A[k, l] for A in As)) for l in range(cn)] for k in range(rn)])
    

def getdata(Data):
        # Data is a list of {'x':[_,_,_],'t': _ }
        '''
        Data=[{'x':[1,1,1],'t':[1,1]},{'x':[1,1,0],'t':[0,1]},{'x':[0,0,0],'t':[0,0]},{'x':[1,0,0],'t':[1,0]}]
        X=[[1,1,1],[1,1,0],[0,0,0],[1,0,0]]
        Y=[[1,1],[0,1],[0,0],[1,0]]
        '''
        numData=len(Data)
        numIn=len(Data[0]['x'])
        numOut=len(Data[0]['t'])
        X=np.mat([d['x'] for d in Data])
        X=X.T
        T=np.mat([d['t'] for d in Data])
        T=T.T
        return X,T,numData

def makedata(X,T):
    N=X.shape[1]   #==Y.shape[1]
    X=X.T.tolist()
    T=T.T.tolist()
    Data=[{'x':X[k],'t':T[k]} for k in range(N)]
    return Data

# neuron classes
class Neuron:
    # neuron class
    # used as a super class

    def __init__(self, numIn=2, numOut=2, af=mymath.hardlim, p={'lr':.2,'tol':.001}):
        self.InputNum=numIn
        self.OutputNum=numOut
        #self.Error=0
        self.Weight=R(numOut,numIn)
        self.Threshold=R(numOut,1)
        self.act=af
        self.param=p

    def train(self, X, T):
        return NotImplemented

        
    def simu(self,X):
        #y=f(WX-th)
        y=self.Weight*X-np.tile(self.Threshold,X.shape[1]) #
        Y=mat_map(self.act, y)
        return Y

    def test(self,Data):
        #y=f(WX-th)
        X,T,N=getdata(Data)
        y=self.Weight*X-np.tile(self.Threshold,N) #
        Y=mat_map(self.act,y)
        Err=LA.norm(Y-T,2)/LA.norm(T,2)
        return Y,Err       
        
    def perturb(self):
        W=np.array(self.Weight)
        self.Weight+=R(self.OutputNum,self.InputNum)/1000
        self.Threshold+=R(self.OutputNum,1)/1000

        
class Perceptron(Neuron):
    '''p=Perceptron()
Data=[{'x':X,'y':Y},...]
X,T,N=getdata(Data)
p.train(X,Y)
'''
    def __init__(self,numIn=2,numOut=2,p={'lr':.2,'tol':.001}):
        self.InputNum=numIn
        self.OutputNum=numOut
        #self.Error=0
        self.Weight=R(numOut,numIn)
        self.Threshold=R(numOut,1)
        self.act=mymath.hardlim
        self.param=p
        
        
    def train(self,X,Y,K=1000):
        # Y=self.simu(X)
        Y=self.simu(X)
        Err=LA.norm(T-Y,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X,T-Y)     # learning
            Err=LA.norm(T-Y,2)/LA.norm(T,2)
            k += 1
            
    def learn(self,X,D):
        lr=self.param['lr']
        DW=lr*D*X.T; DT=lr*D.sum(1)        # Hebb rule
        self.Weight+=DW
        self.Threshold-=DT
'''
    def demo(self,K=100):
        # an=Perceptron(4,3)
        # an.demo()
        Data=[{'x': [1, 1, 1, 0], 't': [0, 1, 1]}, \
              {'x': [0, 0, 1, 0], 't': [0, 0, 1]}, \
              {'x': [1, 0, 1, 0], 't': [0, 1, 0]}, \
              {'x': [1, 1, 1, 1], 't': [1, 0, 0]}]
        X,T,N=getdata(Data)
        Y=self.simu(X)
        Err=linalg.LA.norm(Y-T,2)/linalg.LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X,T-Y)     # learning
            Err=LA.norm(Y-T,2)/LA.norm(T,2)
            k=k+1

            print('step '+str(k))
            print('Weight:')
            print(self.Weight)
            print('Threshold:')
            print(self.Threshold)
            print('Error:')
            print(Err)
            print('\n')
'''


class BPnet:
    # BP net
    '''Data=[{'x':X,'y':Y},...]
X,Y,N=getdata(Data)
bp=BPnet()
bp.train(X,Y)
'''

    def __init__(self,q=[2,3,1],p={'lr':(2,2),'tol':.001}):
        self.InputNum=q[0]
        self.OutputNum=q[-1]
        self.LayerNum=len(q)-1
        self.neuron=[BPnet1(q[l],q[l+1]) for l in range(self.LayerNum)]                          
        self.param=p
        
    def train(self,X,Y,K=200):
        Y=self.calc(X)
        Err=LA.norm(T-Y[-1],2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.calc(X)
            self.learn(T,Y)     # learning
            Err=LA.norm(T-Y[-1],2)/LA.norm(T,2)
            k=k+1
            
    def learn(self, T, Y):
        D = pm(T-Y[-1], Y[-1], 1-Y[-1])
        self.neuron[-1].learn(Y[-2], D)
        for k in range(1, self.LayerNum):
            #print(self.neuron[-k-1].Weight.T.shape)
            #print(D.shape)
            #print(Y[-k-1].shape)
            D = pm(self.neuron[-k].Weight.T*D, Y[-k-1], 1-Y[-k-1])
            self.neuron[-k-1].learn(Y[-k-2], D)


    def test(self,X,T):
        # X,T,N=getdata(Data)
        for l in range(self.LayerNum):
            X=self.neuron[l].simu(X)
        Err=LA.norm(T-X,2)/LA.norm(T,2)
        return X,Err

    def calc(self,X):
        Y=[X]
        for l in range(self.LayerNum):
            X=self.neuron[l].simu(X)
            Y.append(X)
        return Y

    def simu(self,X):
        for l in range(self.LayerNum):
            X=self.neuron[l].simu(X)
        return X

    def perturb(self):
        for k in range(self.LayerNum):
            self.neron[k].perturb()
        
    def demo(self,K=200):
        # an=BPnet([4,3,3])
        # an.demo()
        Data=[{'x': [1, 1, 1, 0], 't': [0, 1, 1]}, \
              {'x': [0, 0, 1, 0], 't': [0, 0, 1]}, \
              {'x': [1, 0, 1, 0], 't': [0, 1, 0]}, \
              {'x': [1, 1, 1, 1], 't': [1, 0, 0]}]
        X,T,N=getdata(Data)
        Y=self.calc(X)
        Err=linalg.LA.norm(T-Y[-1],2)/linalg.LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.calc(X)
            self.learn(T,Y)     # learning
            Err=LA.norm(T-Y[-1],2)/LA.norm(T,2)
            k=k+1

            print('step '+str(k))
            for l in range(self.LayerNum):
                print('Weight[%d]:'%l)
                print(self.neuron[l].Weight)
                print('Threshold[%d]:'%l)
                print(self.neuron[l].Threshold)
            print('Error:')
            print(Err)
            print('\n')


class BPnet1(Neuron):
    '''BP net with a single layer
Data=[{'x':X,'y':Y},...]
X,Y,N=getdata(Data)
bp=BPnet1()
bp.train(X,Y)
'''
    def __init__(self, numIn=2, numOut=2, af=mymath.sigmoid, p={'lr':2,'tol':.001}):
        super(BPnet1, self).__init__(numIn, numOut, af, p)
        
    def train(self,X,Y,K=2000):
        X,T,N=getdata(Data)
        # Y=self.simu(X)
        Y=self.simu(X)
        Err=LA.norm(Y-T,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X, pm(T-Y, Y, (1-Y)))     # learning
            Err=LA.norm(Y-T,2)/LA.norm(T,2)
            k+=1
            
    def learn(self,X,D):
        lr=self.param['lr']
        self.Weight += lr*D*X.T
        self.Threshold -= lr*D.sum(1)

    def demo(self,K=200):
        # an=BPnet1(4,3)
        # an.demo()
        Data=[{'x': [1, 1, 1, 0], 't': [0, 1, 1]}, \
              {'x': [0, 0, 1, 0], 't': [0, 0, 1]}, \
              {'x': [1, 0, 1, 0], 't': [0, 1, 0]}, \
              {'x': [1, 1, 1, 1], 't': [1, 0, 0]}]
        X,T,N=getdata(Data)
        Y=self.simu(X)
        Err=LA.norm(T-Y,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X,pm(T-Y,Y,(1-Y)))     # learning
            Err=LA.norm(T-Y,2)/LA.norm(T,2)
            k=k+1

            print('step '+str(k))
            print('Weight:')
            print(self.Weight)
            print('Threshold:')
            print(self.Threshold)
            print('Error:')
            print(Err)
            print('\n')


class BPnet1x(Neuron):
    '''more powerful than BPnet1
Data=[{'x':X,'y':Y},...]
X,T,N=getdata(Data)
bp=BPnet1x(m,n)
bp.train(X,T)
'''
    def __init__(self, numIn=2, numOut=2, p={'lr':2,'tol':.001}):
        self.InputNum=numIn
        self.OutputNum=numOut
        #self.Error=0
        self.Weight=R(numOut,numIn)
        self.act=mymath.ramp
        self.dact=mymath.std_bump
        self.lb=R(numOut,1)
        self.ub=self.lb+10.5
        self.param=p

    def simu(self,X):
        #y=f(WX,lb,ub)
        N=X.shape[1]
        Y=mat_map(self.act, self.Weight*X, np.tile(self.lb, N), np.tile(self.ub, N))
        return Y
        
    def train(self,X,Y,K=500):
        X,T,N=getdata(Data)
        # Y=self.simu(X)
        Y=self.simu(X)
        Err=LA.norm(Y-T,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X,pm(T-Y,mat_map(self.dact, self.Weight*X, np.tile(self.lb, N), np.tile(self.ub, N))))     # learning
            Err=LA.norm(Y-T,2)/LA.norm(T,2)
            k+=1
            
    def learn(self,X,D):
        lr=self.param['lr']
        DW=lr*D*X.T
        N=X.shape[1]
        Dlb=lr*pm(D,2*(self.Weight*X-np.tile(self.ub,N))/np.power(np.tile(self.ub-self.lb,N),2)).sum(1)
        Dub=lr*pm(D,2*(np.tile(self.lb,N)-self.Weight*X)/np.power(np.tile(self.ub-self.lb,N),2)).sum(1)
        self.Weight+=DW
        
        self.lb+=Dlb
        self.ub+=Dub

    def test(self,Data):
        #y=f(WX-th)
        X,T,N=getdata(Data)
        Y=mat_map(self.act, self.Weight*X, np.tile(self.lb,N), np.tile(self.ub,N))
        Err=LA.norm(Y-T,2)/LA.norm(T,2)
        return Y,Err

    def demo(self, K=200):
        # an=BPnet1x(4,3)
        # an.demo()
        Data=[{'x': [1, 1, 1, 0], 't': [0, 1, 1]}, \
              {'x': [0, 0, 1, 0], 't': [0, 0, 1]}, \
              {'x': [1, 0, 1, 0], 't': [0, 1, 0]}, \
              {'x': [1, 1, 1, 1], 't': [1, 0, 0]}]
        X,T,N=getdata(Data)
        Y=self.simu(X)
        Err=LA.norm(T-Y,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X,pm(T-Y,mat_map(self.dact, self.Weight*X, np.tile(self.lb, N), np.tile(self.ub, N))))
            Err=LA.norm(T-Y,2)/LA.norm(T,2)
            k+=1

            print('step '+str(k))
            print('Y:')
            print(Y)
            print('Weight:')
            print(self.Weight)
            print('lb, ub:')
            print(self.lb,self.ub,(self.ub-self.lb).all())
            print('Error:')
            print(Err)
            print('\n')


class BPnet1s(Neuron):
    '''simple version of BPnet1x
Data=[{'x':X,'y':Y},...]
X,T,N=getdata(Data)
bp=BPnet1s()
bp.train(X,T)
'''
    def __init__(self, numIn=2, numOut=2, af=lambda x: mymath.ramp(x,-6,6), p={'lr':2,'tol':.001}):
        super(BPnet1s, self).__init__(numIn, numOut, af, p)
        self.dact=lambda x: mymath.std_bump(x,-6,6)

    def learn(self,X,D):
        lr=self.param['lr']
        self.Weight += lr*D*X.T
        self.Threshold -= lr*D.sum(1)

    def train(self, X, Y, K=1000):
        X,T,N = getdata(Data)
        Y=self.simu(X)
        Err=LA.norm(Y-T,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while Err>self.param['tol'] and k<K:
            Y=self.simu(X)
            self.learn(X, pm(T-Y, mat_map(self.dact, self.Weight*X-np.tile(self.Threshold, N))))     # learning
            Err=LA.norm(Y-T,2)/LA.norm(T,2)
            k+=1

    def demo(self,K=100):
        # an=BPnet1s(4,3)
        # an.demo()
        Data=[{'x': [1, 1, 1, 0], 't': [0, 1, 1]}, \
              {'x': [0, 0, 1, 0], 't': [0, 0, 1]}, \
              {'x': [1, 0, 1, 0], 't': [0, 1, 0]}, \
              {'x': [1, 1, 1, 1], 't': [1, 0, 0]}]
        X, T, N=getdata(Data)
        Y=self.simu(X)
        Err=LA.norm(T-Y,2)/LA.norm(T,2)      # testing
        k=0
        # if error is too large
        while(Err>self.param['tol'] and k<K):
            Y=self.simu(X)
            self.learn(X, pm(T-Y, mat_map(self.dact, self.Weight*X-np.tile(self.Threshold, N))))
            Err=LA.norm(T-Y,2)/LA.norm(T,2)
            k+=1

            print('step '+str(k))
            print('Weight:')
            print(self.Weight)
            print('Threshold:')
            print(self.Threshold)
            print('Error:')
            print(Err)
            print('\n')

if __name__ == "__main__":
    Data=[{'x': [1], 't': [.1]}, \
                  {'x': [2], 't': [.2]}, \
                  {'x': [3], 't': [.3]}, \
                  {'x': [4], 't': [.4]}]
    X,T,N=getdata(Data)
    an=BPnet([1,2,1])
    an.train(X, T, K=500)
    Y=an.simu(X)
    print(Y)
