'''
Created on Mar 30, 2013

@author: yongchao
'''
import numpy as np
import matplotlib.pyplot as plt

from ConstantObj import ConstantObj

class Recurrent(object):

    def __init__(self):
        self.stopper = 0.001 # When the error is less than this value, we stop the recurrence.
    def testDataByBackProp(self,inputData,network,testAmount):

# normalData: whole Training Data Array
# network: neural network class
# testDateAmmount: really tested record amount 
        
        inputDataArr = np.array(inputData)
       # initial array of the data, but it will reshaped in a multi-dimensional numpy array.
        tempArr = [] # initial array of the data, but it will reshaped in a multi-dimensional numpy array.
        index  = 0
        if testAmount == -1:
           testAmount = np.inf 
        
        for inputRow in inputDataArr:
            temp = [] 
            
            index = index + 1

            if index > testAmount:
                break;
            
            exRow = inputRow[0:-1]

            resultValue = inputRow[-2:-1][0]
            
            outValue = self.backPropCalculate(exRow,network)
            
            
            calcuRet = network.calcuSquareDistance(0.0,1.0,outValue)
            
            temp.append (np.int(resultValue))
            temp.append (calcuRet)
            
          #  print temp
            tempArr.append(np.array(temp))

        return  np.array(tempArr)

    def backPropCalculate(self,singleData,network):
        
        inputLayerWeightArr = network.getLastInputLayerWeightArr()
        hiddenLayerWeightArr = network.getLastHiddenWeightArr()
        hiddenInputArr = []
        inputLayerResArr = []
        
        for hiddenIndex in range(0,hiddenLayerWeightArr.shape[0]):
            inputRes = np.dot(singleData, inputLayerWeightArr[hiddenIndex].reshape((-1,1)))
            inputRes = inputRes[0]
            inputLayerResArr.append(inputRes)
            
            activateResult = network.activateFunSigmod(inputRes) 
            hiddenInputArr.append(activateResult)
        
        hiddenResult = np.dot(hiddenInputArr , hiddenLayerWeightArr.reshape((-1,1)))
        hiddenResult = hiddenResult[0]
        outActivateResult =  network.activateFunSigmod(hiddenResult) 
        
        return outActivateResult     
        

    def BackPropLearing(self,example,network,sampleDateAmmount,repeatTimes,hiddenNodeAmount,learingRate):
# example: whole Training Data Array
# network: neural network class
# sampleDateAmmount: really training sample used amount, when sampleDateAmmount is -1, whole example will be training
# repeatTimes: training repeat times
# hiddenNodeAmount: hidden layer node amount 
        
        constantObj = ConstantObj()
        
        if sampleDateAmmount == -1:
           sampleDateAmmount = np.array(example).shape[0]  
                  
        print "training record amount is:", sampleDateAmmount
        print "traing repeate times is:",repeatTimes
        print "hidden layer node amount is:",hiddenNodeAmount        
        weightArray = [] # weight array
        weightArr1 = []
        inputLayerWeight = [] 
        hiddenWeight = []
        inputLayerWeightAll = []
        hiddenWeightAll = []
        inputLayerWeightRepeat = []
        hiddenWeightRepeat = []        
        c = []
        
        exampleArr = np.array(example)
        colNum = exampleArr.shape[1]
        rowNum = exampleArr.shape[0]
        
        corNum = colNum-1
        inputLayerWeightIndex = 0
        
        inputLayerWeight = np.random.rand(hiddenNodeAmount,41)
        hiddenWeight = np.random.rand(hiddenNodeAmount)
        
        inputLayerWeightAll.append(inputLayerWeight)
        hiddenWeightAll.append(hiddenWeight)
        
        inputLayerWeightRepeat.append(inputLayerWeight)
        hiddenWeightRepeat.append(hiddenWeight)
        
        #print "the random weight matrix is:"
        #print weightMatrix
        resultArr = exampleArr[0:,-2:-1]
        
        sumWeightVariation = 10
        
        indexk = 0
        
        errLineQuan=0
        
        indexArr = []
        indexkArr = []
        weightDistanceTimeArr = []
        inputIndex = 0
        hiddenLayer = 1
        totalLarer = (hiddenLayer +1)
        hiddenDeltasArr = []


        counter = 0
        repeatIndex = 0
        while repeatIndex < repeatTimes:
            
            for inputRow in exampleArr:
                if counter > sampleDateAmmount:
                   break  
                resultValue = inputRow[-2:-1][0]
                exRow = inputRow[0:-1]
                hiddenInputActivateArr = [] 
                hiddenInputArr = []  
                inputDeltasArr = []  
                  
                for hidderLayerIndex in range(0,hiddenNodeAmount):
                    weightMatrix = inputLayerWeight[hidderLayerIndex]
                    
                    inputResult =  np.dot(exRow, weightMatrix.reshape((-1,1)))
                    inputResult = inputResult[0]
                    hiddenInputArr.append(inputResult)
                    activateResult = network.activateFunSigmod(inputResult)
                    hiddenInputActivateArr.append(activateResult)
                    
                hiddenResult = np.dot(np.array(hiddenInputActivateArr) , hiddenWeight.reshape((-1,1)))
                hiddenResult = hiddenResult[0]
                outActivateResult =  network.activateFunSigmod(hiddenResult)  

            
                outErr = resultValue -  outActivateResult
                outDeltas = outErr * network.activateFunSigmodDerivative(hiddenResult)
                hiddenWeight = hiddenWeight +  learingRate * outDeltas * np.array(hiddenInputActivateArr)  
            
      
                for hidderLayerIndex in range(0,hiddenNodeAmount):
                    activateDerivativeValue = network.activateFunSigmodDerivative(hiddenInputArr[hidderLayerIndex])
                    inputDeltas = activateDerivativeValue * outDeltas * hiddenWeight[hidderLayerIndex]
                    inputDeltasArr.append(inputDeltas)
                
                    weightMatrix = inputLayerWeight[hidderLayerIndex] + learingRate * exRow * inputDeltas
                    inputLayerWeight[hidderLayerIndex] = weightMatrix
                
                counter = counter + 1
                inputLayerWeightAll.append(inputLayerWeight)
                hiddenWeightAll.append(hiddenWeight)
                
            repeatIndex = repeatIndex + 1
            inputLayerWeightRepeat.append(inputLayerWeight)
            hiddenWeightRepeat.append(hiddenWeight)
            
        network.setBackPropLearingWeight(inputLayerWeight,hiddenWeight,inputLayerWeightAll,hiddenWeightAll,inputLayerWeightRepeat,hiddenWeightRepeat)         
        return   network             
                           
        
    def testData(self,inputData,network,testAccount):
# normalData: whole Training Data Array
# network: neural network class
# testDateAmmount: really tested record amount 
        
        inputDataArr = np.array(inputData)
        weightArr = network.getWeightArray()
       # initial array of the data, but it will reshaped in a multi-dimensional numpy array.
        tempArr = [] # initial array of the data, but it will reshaped in a multi-dimensional numpy array.
        index  = 0
        for inputRow in inputDataArr:
            temp = [] 
            
            index = index + 1
            if index > testAccount:
                break;
            
            ex = inputRow[0:-1]

            resultValue = inputRow[-2:-1][0]

            sumValue = np.matrix(ex) * weightArr.reshape((-1,1))
                       
            activatedValue = network.activateFun(sumValue)
            
            temp.append (np.int(resultValue))
            temp.append (activatedValue)
          #  print temp
            tempArr.append(np.array(temp))

        return  np.array(tempArr)

    
    def perceptionLearning(self,example,network,trainingAccount,repeatTimes):
        constantObj = ConstantObj()
        
        weightArray = [] # weight array
        
        exampleArr = np.array(example)
        colNum = exampleArr.shape[1]
        rowNum = exampleArr.shape[0]
        
        corNum = colNum-1
        weightArr = network.getRandomWeightArray(corNum)
        weightMatrix = np.matrix(weightArr).reshape((1,-1)) 
        weightRevisedArr = weightMatrix
        #print "the random weight matrix is:"
        #print weightMatrix
        resultArr = exampleArr[0:,-2:-1]
        
        sumWeightVariation = 10
        
        indexk = 0
        
        errLineQuan=0
        
        indexArr = []
        indexkArr = []
        weightDistanceTimeArr = []
        #while sumWeightVariation > self.stopper and indexk < 10:
        while indexk < repeatTimes:    
            network.addWeigthArrays(weightRevisedArr)
            errLineQuan=0
            
            weightDistanceArr = []
            index = 0
            for inputRow in exampleArr:

                if index >trainAccount:
                   break
                
 #              print type(inputRow)
                ex = inputRow[0:-1]

                resultValue = inputRow[-1]

                reduceArr = weightRevisedArr - weightMatrix
                weightMatrix = weightRevisedArr
                sumValue = np.matrix(ex) * weightMatrix.reshape((-1,1))
                #print  sumValue
            
                activatedValue = network.activateFun(sumValue)
            
                errValue = resultValue - activatedValue 
                if errValue!=0:
                   errLineQuan = errLineQuan +1

                derivateValue = network.derivateActivation(sumValue)
            
                kk =  network.getLearningRate() * errValue  * derivateValue * np.matrix(ex)
            
                weightRevisedArr = weightMatrix.reshape((1,-1)) + network.getLearningRate() * errValue  * np.matrix(ex)
                
                weightDistanceArr.append(self.squareDistance(weightMatrix,weightRevisedArr))
                indexArr.append(index)
                
                index = index +1
                #print weightRevisedArr-weightMatrix
            
            #self.drawSquareDistance(indexArr,weightDistanceArr,"Sample Count","Mean Square Distance","")
            
            
#             print "error amount is:",errLineQuan
            #mean square distance
            lastWeightArr =  network.getLastWeigthArray()   
         #   print "get last weight array:",lastWeightArr
          #  print "new weight array is  :",weightRevisedArr
            subWeightArr = weightRevisedArr-lastWeightArr
        #    print "sub weight array is  :",subWeightArr
            subweightarray = subWeightArr.reshape((1,-1))

            multiValue = subweightarray * subweightarray.reshape((-1,1))
#             print "multiValue is       :",multiValue
            sumWeightVariation = np.sqrt( multiValue )
            
            weightDistanceTimeArr.append(sumWeightVariation[0,0])
            indexkArr.append(indexk)
            
            
#             print "sum value is        :",sumWeightVariation[0,0]
            indexk = indexk+1
            
        network.addWeigthArrays(weightRevisedArr)    
        tt =     weightRevisedArr
        
#         self.drawSquareDistance(indexkArr,weightDistanceTimeArr,"Train Times","Mean Square Distance","")
        
        return  self.neuralNetHypothesis(weightRevisedArr,network)  
          
    def drawSquareDistance(self,XValues,YValues,XLable,YLable,title):
        plt.figure()
        plt.plot(XValues,YValues,label="$f(t)=e^{-t} \cdot \cos (2 \pi t)$")

        plt.axis()
        plt.xlabel(XLable)
        plt.ylabel(YLable)
        plt.title(title)

        plt.grid(True)
        plt.legend()
        plt.show()  
        
    def squareDistanceSingleValue(self,input1,input2):
        #mean square distance y = 
        
        #calculate the mean square distance of two matrixes
        subWeightArr = input1-input2

        multiValue = subWeightArr **2
        #print "multiValue is       :",multiValue
        sumWeightVariation = np.sqrt( multiValue )
        resultValue = sumWeightVariation
        #print "sum value is        :",sumWeightVariation   
        return   resultValue
                   
    def squareDistance(self,input1,input2):
        #mean square distance y = 
        
        #calculate the mean square distance of two matrixes
        subWeightArr = input1-input2
        #    print "sub weight array is  :",subWeightArr
        subweightarray = subWeightArr.reshape((1,-1))

        multiValue = subweightarray * subweightarray.reshape((-1,1))
        #print "multiValue is       :",multiValue
        sumWeightVariation = np.sqrt( multiValue )
        resultValue = sumWeightVariation[0,0]
        #print "sum value is        :",sumWeightVariation   
        return   resultValue  
            
    def normalize(self,inputData):

    #  normalize function is: y = ( x - min )/( max - min )

        temp = [] # initial array of the data, but it will reshaped in a multi-dimensional numpy array.
        max = inputData[0:,0:-1].max(axis=0)
        min = inputData[0:,0:-1].min(axis=0)
        
        denominator = max - min
      #  denominator[[denominator == 0]] = 1
        zeroArr = np.zeros_like(denominator)
        denominator[denominator==zeroArr] = 1
        #print denominator
        
        for inputRow in inputData:
            exValueRow = inputRow[0:-1]
            valueRow = (exValueRow  - min)
            divValue = np.divide(valueRow, denominator)
  
            inputRow[0:-1] = divValue
            
            temp.append(inputRow)
        
        return np.array(temp)
    
    
    
    def neuralNetHypothesis(self,weightArray,network):

        network.setWeigthArray(weightArray)
        
        return network
    
    
    def test(self):
        c = np.array([[1.0, 2.0, 3.0, 4.0],[4., 5., 6., 7.], [7., 8., 9., 10.]])
        dd = [1.0, 2.0, 3.0, 4.0]
        dd[dd==3.0]=0

        normalData = recurrent.normalize(training.training)


        network = recurrent.perceptionLearning(normalData,network)

        network.printData()

        #testing = TestingData(sys.argv[2])

        training.printData()
