import pylab
from numpy import *
from generic import GenericNetwork, GenericLayer, GenericLink, GenericTrainer

class RecurrentNetwork(GenericNetwork):
    pass

class RecurrentLayer(GenericLayer):
    def __init__(self, rows):
         GenericLayer.__init__(self, rows)

class RecurrentLink(GenericLink):
    def __init__(self, source, destination, spread, center):
        GenericLink.__init__(self, source, destination, spread, center)
        self.memory = mat(zeros(source.size)).transpose()
        self.cache = mat(empty(source.size)).transpose()

    def propogate(self):
        self.destination.data += self.data * self.memory
        self.cache = self.memory.copy()
        self.memory = self.destination.activated_data()

    def __str__(self):
        return 'RecurrentLink'

class RecurrentTrainer(GenericTrainer):
    def __init__(self, network, dataset, derivative, derivative_simple,  eta = 0.5):
        GenericTrainer.__init__(self, network, dataset)
        self.derivative = derivative
        self.derivative_simple = derivative_simple
        self.eta = eta
        self.total_avg_error  = 0.0
        self.dispersion = 0.0
        self.dispersion2 = 0.0

        shape = self.network.links[0].data.shape
        shape = (shape[1],shape[0],shape[0])
        self.gradient1 = zeros(shape)
        self.gradient1b = zeros(shape)
        shape = self.network.links[1].data.shape
        shape = (shape[0],shape[1],shape[1])
        self.gradient2 = zeros(shape)
        self.gradient2b = zeros(shape)

        self.desired = []
        self.output = []

    def train(self, steps = 0,U=True):
        counter = 0
        pattern = self.dataset.fetch_pattern()
        while pattern and (not steps or counter<steps):
            counter+=1
            print 'training step #' + str(counter)
            self.train_one_step(pattern,U)
            pattern = self.dataset.fetch_pattern()
            print

    def train_one_step(self,pattern,U):
        output = self.network.propogate_one_step(pattern[0])
        desired = array(pattern[1], dtype=float32)
        error = mat(desired) - output
        #print 'output: ' + str(output.sum())
        #print 'desired output: ' + str(desired.sum())
        self.total_avg_error += abs(error/desired)
        print 'error: ' + str(error)

        self.dispersion += float(desired.sum())
        self.dispersion2 += float((desired.sum())**2)
        self.desired.append(float(desired.sum()))
        self.output.append(float(output.sum()))
        if abs(error.sum()) > 0.001:
            if U:
                self.update_weights(error)
            pass
        else:
            print 'optimal'

    def update_weights(self,error):
        net = self.network
        RI, RC, OR = net.links
        I, R, O = net.input, net.hidden[0], net.output
        kronecker = lambda x,y: int(x==y)
        #ajust OR weights
        dWOR = zeros(OR.data.shape)
        error_derivative = mat(array(error) * array(self.derivative(O.data)))
        dWOR =  self.eta * R.activated_data() * error_derivative
        OR.data = OR.data + dWOR.transpose()

        #ajust RI weights
        dWRI = zeros(RI.data.shape)
        cshape = (RI.data.shape[0],RI.data.shape[1],O.data.size)
        for j,i,k in ndindex(cshape):
                    hsum = 0
                    for h in ndindex(R.data.size):
                        gsum = self.gradient1[i][j] * RC.data[h].transpose()
                        gsum += I.data[i] * kronecker(h,j)
                        gsum *= self.derivative_simple(R.data[i])
                        self.gradient1b[i][j][h] = gsum
                        gsum *= OR.data[k,h]
                        hsum += gsum
                    dWRI[j][i] += error[k] * self.derivative_simple(O.data[k]) * hsum
        dWRI *= self.eta
        self.gradient1 = self.gradient1b.copy()

        RI.data = RI.data + dWRI
        #print dWRI

        #ajust RC weights
        dWRC = zeros(RC.data.shape)
        cshape = (RC.data.shape[0],RC.data.shape[1],O.data.size)
        for j,i,k in ndindex(cshape):
                    hsum = 0
                    for h, itemh in enumerate(R.data):
                        gsum = self.gradient2[i][j] * RC.data[h].transpose()
                        gsum += RC.cache[i] * kronecker(h,j)
                        gsum *= self.derivative_simple(R.data[i])
                        self.gradient2b[i][j][h] = gsum
                        gsum *= OR.data[k,h]
                        hsum += gsum
                    dWRC[j][i] += error[k] * self.derivative_simple(O.data[k]) * hsum
        dWRC *= self.eta
        self.gradient2 = self.gradient2b.copy()

        RC.data = RC.data + dWRC
        #print dWRC

    def draw_desired_output(self):
        import matplotlib.pyplot as plt
        import matplotlib.cm as cm
        d1 = xrange(len(self.desired))
        d2 = xrange(len(self.output))
        TE = self.total_avg_error/len(self.output)
        self.dispersion2/=len(self.desired)
        self.dispersion/=len(self.desired)
        print self.dispersion,self.dispersion2
        print
        SD = ( self.dispersion2 - (self.dispersion)**2) **0.5
        m = lambda x: x*1
        me1 = lambda x: x*1*(1-float(TE[0][0]))
        me2 = lambda x: x*1*(1+float(TE[0][0]))
        grid = []
        plt.plot(map(m,d1), self.desired,color = (0,0,0,0.7),linestyle='dotted')
        plt.plot(map(m,d2), self.output,color = (0,0,0,0.2))
        plt.xlabel('timescale 60 min')
        plt.ylabel('traffic volume')
        avgo = []
        #for n in xrange(3,len(self.output)):
        #    avgo.append((self.output[n]+self.output[n-1]+self.output[n-2]+self.output[n-3])/4.0)
        #plt.plot(map(m,d2)[3:], avgo,color = (0,1,0,0.2))


        #plt.plot(map(me1,d2), self.desired,color = (0,1,0,0.1))
        #plt.plot(map(me2,d2), self.output,color = (0,1,0,0.1))
#        for n,i in enumerate(self.output):
#            plt.bar(n-2,i,1,color = (1,0,0,0.7),edgecolor = (1,0,0,0))
#        for n,i in enumerate(self.desired):
#            plt.bar(n-1,i,1,color = (0,0,1,0.1),edgecolor = (0,1,0,0))
        print 'TOTAL_ERROR=',1-TE,'%'
        print 'STANDART_DEVIATION=',SD,'%'
        print min(self.output), max(self.output), min(self.output)/max(self.output)
        plt.show()

