'''
Created on 2009-10-19

@author: Adam Kalinski
@author: Rafal Gajdulewicz
'''
from random import uniform
from optparse import OptionParser, OptionGroup
from loader import parse_learning_set, parse_testing_set
import math
import logging
import pickle

logging.basicConfig(level=logging.ERROR,
                    format='[%(asctime)s at %(filename)s>%(module)s>%(funcName)s>line:%(lineno)d]:<%(levelname)s>   %(message)s')

def tanh(x):
    return math.tanh(x)

def dtanh(y):
    return 1.0 - math.tanh(y)**2

def sigmoid(x):
    return 1/(1+math.e**(-x))

def dsigmoid(y):
    return sigmoid(y) *(1-sigmoid(y))

def matrix(n, k, fill=0.0):
    
    m = []
    for i in xrange(n):
        m.append([fill]*k)
    return m

def print_matrix(m, delimiter=', '):
    for row in m:
        print delimiter.join(["%s" % f for f in row])

class NeuralNet(object):
    '''
    Object representing neural net 
    '''

    def __init__(self, layer_sizes, f=tanh, df=dtanh, init_range=(-0.2,0.2), bias=True):
        '''
        layer_sizes - list of sizes of layers
        f - activation function
        df - derivative of f
        init_range - range from witch weights will be chosen 
        '''
        self.f = f
        self.df = df
        self.layer_sizes = layer_sizes
        self.bias = bias
        self.learning_rate = 0.5
        self.iterations = 1000
                
        self.layer_count = len(layer_sizes)
        
#        if self.bias:
#            for i in xrange(self.layer_count-1):
#                self.layer_sizes[i] += 1
        
        self.__init_layers(self.layer_sizes)
        self.__init_weights(init_range)
        self.__init_changes()
        
        
    def __init_changes(self):
        logging.debug("Initialising weights")
        self.changes = []
        for i in xrange(self.layer_count-1):
            n = len(self.layers[i])
            k = len(self.layers[i+1])
            m = matrix(n, k)
            logging.debug("Created changes matrix for layers %d and %d: %s" % (i,i+1,m))
            self.changes.append(m)
            
    def __init_layers(self, layer_sizes):
        logging.debug("Creating neurons for %d layers" % len(layer_sizes))
        self.layers = []
        for i in layer_sizes:
            self.layers.append([0.0]*i)
        logging.debug("Layers: %s" % self.layers)
        
    def __init_weights(self, init_range):
        logging.debug("Initialising weights")
        self.weights = []
        for i in xrange(self.layer_count-1):
            n = len(self.layers[i])
            if self.bias: n += 1
            k = len(self.layers[i+1])
            m = matrix(n, k)
            logging.debug("Created martix for layers %d and %d: %s" % (i,i+1,m))
            self.weights.append(m)
        logging.debug("Filling matrices with random numbers")
        for w in self.weights:
            for i in xrange(len(w)):
                for j in xrange(len(w[0])):
                    w[i][j] = uniform(*init_range) 
            logging.debug("%s" % w)
        
    def compute_output(self, values):
        ''' 
        Computes the values of output neurons for given values.
        Size of values must be equal to the size of first layer
        '''

        if len(values) != self.layer_sizes[0]:
            raise ValueError, "Invalid input size. Got input of size %d and size of first layer is %d" \
                              % (len(values), len(self.layers[0]))
        self.layers[0] = values
        for i in xrange(1,self.layer_count):
            self.__compute_layer(i)
        return self.layers[-1]
            
    def __compute_layer(self,index):
        prev = self.layers[index-1]
        w = self.weights[index-1]
        if self.bias:
            from copy import copy 
            prev = copy(prev)
            prev.append(1.0)
        this = self.layers[index]
        for i in xrange(len(this)):
            this[i] = self.f(sum([w[j][i] * prev[j] for j in xrange(len(prev))]))
            logging.debug("Node %d of layer %d = %f" % (i, index, this[i]))
    
    def __backPropagate(self, targets, LR):
        """
        targets - what we want to achieve
        LR - learning rate
        """
        # array of errors
        deltas = []
        for i in xrange(self.layer_count):
            deltas.append([0.0]*self.layer_sizes[i])
        # number of outputs
        outn = self.layer_sizes[-1]
        # output layer
        outl = self.layers[-1]
        if len(targets) != outn:
            raise ValueError, 'wrong number of target values'
        # calculate error terms for output
        for k in range(outn):
            deltas[-1][k] = (targets[k]-outl[k])*self.df(outl[k])
            #print 'deltas[-1][%d]: %f = outl[%d]: %f targ[%d]: %f ' % (k,deltas[-1][k],k,outl[k],k,targets[k])
            #curr_deltas[k] = self.df(outl[k]) * error

        # process hidden layers, descending order
        for i in xrange(self.layer_count-2,0,-1):
            # current layer
            ncurr = self.layer_sizes[i]
            # previous layer
            nprev = self.layer_sizes[i+1]
            # calculate error terms for hidden
            for j in range(ncurr):
                # for each neuron from previous
                error = 0.0
                for k in range(nprev):
                    error = error + deltas[i+1][k]*self.weights[i][j][k]
                    deltas[i][j]=error*self.df(self.layers[i][j])
                    
        # update weights
        for i in xrange(self.layer_count-1):
            ncurr = self.layer_sizes[i]
            nnext = self.layer_sizes[i+1]
            for j in xrange(ncurr):
                for k in xrange(nnext):
                    #print 'WEIGHTS[%d][%d][%d]: %f += LR ERR: %f df(%f): %f ' %(i,j,k,self.weights[i][j][k],deltas[i+1][k],self.layers[i][j],self.df(self.layers[i][j]))
                    self.weights[i][j][k]= self.weights[i][j][k] + LR*deltas[i+1][k]*self.layers[i][j]
                    

        # calculate error
        error = 0.0
        for k in range(len(targets)):
            error = error + 0.5*(deltas[-1][k])**2
        return error        
    
    def test(self,patterns):
        print '\n>> Testing... \n'
        for pat in patterns:
            print pat, ' response: ', self.compute_output(pat)
            
        print '\n>> Testing completed '
            
    def train(self,patterns):
        '''
        patterns - to learn
        self.iterations - how many iterations on all patterns to perform
        self.learning_rate - learning rate
        '''        
        
        print ">> Training on set:"
        print_matrix(patterns)
        for i in xrange(self.iterations):
            try:
                error = 0.0
                for pat in patterns:
                    inputs = pat[0]
                    targets = pat[1]
                    self.compute_output(inputs)
                    
                    error = error + self.__backPropagate(targets, self.learning_rate)
                if i%100== 0 :
                    if self.print_error:
                        print "Error: %e" % error
            except KeyboardInterrupt, message:
                print "User aborted learning"
                return
    
    @classmethod
    def load_from_file(cls, file):
        '''Loads a NeuralNet from file'''
        net = None
        logging.info("Saving net to: %s " % file)
        with open(file, 'r') as f:
            net = pickle.load(f)
        return net

    def save_to_file(self, file):
        '''Saves NeuralNet to file'''
        logging.info("Loading net from: %s " % file)
        with open(file, 'w') as f:
            pickle.dump(self, f)
            
    def print_info(self):
        print "Bias: %s" % (self.bias and "On" or "Off")
        print "Layer sizes: %s" % self.layer_sizes
        print "Learning rate: %f" % self.learning_rate
        print "Learning cycles: %d" % self.iterations    
        
    def print_weights(self):
        for i in xrange(len(self.weights)):
            print "Weights between %d and %d layer" % (i,i+1)
            print "-" * 30
            print_matrix(self.weights[i])

def main():
    usage = "usage: %prog [options] [layers sizes]"
    parser = OptionParser(usage)
    
    creation_gr = OptionGroup(parser, "Network", "")
    creation_gr.add_option("-u", "--unipolar",
                           action="store_true", 
                           help="use the unipolar function")
    creation_gr.add_option("-b", "--bias",action="store_true", dest="bias",
                           help="use bias")
    creation_gr.add_option("-n", "--load_network", metavar="FILE", dest="load_network",
                           help="load network from FILE")
    creation_gr.add_option("-s", "--save_network", metavar="FILE", dest="save_network",
                           help="save network to FILE")
    parser.add_option_group(creation_gr)
    
    learning_gr = OptionGroup(parser, "Learning", "")
    learning_gr.add_option("-i","--iterations", type='int', metavar="INT", 
                           help="number of learning cycles")
    learning_gr.add_option("-r","--learning_rate", type='float', metavar="FLOAT", 
                           help="number of learning cycles")
    learning_gr.add_option("-l", "--learning_set", metavar="FILE", 
                           help="load learning set from FILE")
    parser.add_option_group(learning_gr)
    
    testing_gr = OptionGroup(parser, "Testing", "")
    testing_gr.add_option("-t", "--test_set", metavar="FILE", 
                           help="load test set from FILE")
    parser.add_option_group(testing_gr)
    
    parser.add_option("-e", "--print_error",
                      action="store_true",
                      help="print out error values")
    parser.add_option("-w", "--print_weights",
                      action="store_true",
                      help="print out weights of all layers")   
    
    (options, args) = parser.parse_args()
    
    #print options
    #print args
    
    lrn_dict = None
    if options.learning_set:
        lrn_dict = parse_learning_set(options.learning_set)
        
    nn = None;
    if options.load_network:    # load existing net
        nn = NeuralNet.load_from_file(options.load_network)
    else:   #creating new neural net
        if not options.learning_set:
            parser.error("Load network or provide a learning set")
        
        args = map(int,args)
        args.insert(0,len(lrn_dict['learning_set'][0]))
        args.append(len(lrn_dict['solutions'][0]))
        
        nn = NeuralNet(args, bias=bool(options.bias))
        if options.unipolar:
            nn.f = sigmoid
            nn.df = dsigmoid
    
    if options.learning_rate: nn.learning_rate = options.learning_rate
    if options.iterations: nn.iterations = options.iterations
    nn.print_error = bool(options.print_error)

    nn.print_info()    
        
    if lrn_dict:
        print "\n>> Starting training network..."
        nn.train([list(t) for t in zip(lrn_dict['learning_set'],
                                       lrn_dict['solutions'])])
        print ">> training done\n"
        
    if options.save_network:
        nn.save_to_file(options.save_network)
        
    if options.test_set:
        tests = parse_testing_set(options.test_set)
        nn.test(tests)
    
    if options.print_weights:
        print "\n"
        nn.print_weights()
        print "\n"

        
    

def train_all():
    
    # XOR
    XOR = [
        [[0,0], [-1]],
        [[0,1], [1]],
        [[1,0], [1]],
        [[1,1], [-1]]
    ]
    AND= [
        [[0,0], [-1]],
        [[0,1], [-1]],
        [[1,0], [-1]],
        [[1,1], [1]]
    ]
    OR= [
        [[0,0], [-1]],
        [[0,1], [1]],
        [[1,0], [1]],
        [[1,1], [1]]
    ]
    # sample from website
    SAM = [
           [[1, 1], [-1]],
           [[2, 1], [-1]],
           [[1, 2], [-1]],
           [[2, 3], [-1]],
           [[3, 3], [1]],
           [[3, 2], [1]]
    ]
    PAT = XOR 
    PATS = [AND,OR,XOR,SAM]
    NAMES = ['AND','OR','XOR','SAM']
    nn = NeuralNet([len(PAT[0]), 2, 1], bias=True)
    nn.train(PAT)
    nn.iterations = 5000
    nn.test([p[0] for p in PAT])
    print nn.weights
    
    for i in xrange(len(PATS)):
        pat = PATS[i]
        nn = NeuralNet([len(pat[0]), 2, 1], bias=True)
        nn.iterations = 5000
        nn.train(pat)
        print NAMES[i]
        nn.test([p[0] for p in pat])

if __name__ == "__main__":
    #train_all()
    main()
