# BigData course project
# Serial version of training algorithm for SOM
# SOM class

import random
from math import log as logn
from trainset import TrainSet
from neuron import Neuron
from util import *

class SOM:
    # create an initialized SOM with given dimension for training
    # vectors (per training set), range of values to initialize those 
    # weight vectors and sizes for the mapping mesh 
    def __init__(self, ts, size_x, size_y, min_w, max_w):
        self.ts  = ts
        self.size_x = size_x
        self.size_y = size_y
        self.min_w = min_w
        self.max_w = max_w
        random.seed()  # initialize random number generator
        neu = lambda i,j: Neuron(ts.dim, min_w, max_w, float(i), float(j))
        row = lambda j: [neu(i,j) for i in xrange(size_x)]
        # name the map "mesh" to avoid confusion with python "map" functor
        self.mesh = map(row, xrange(size_y))
        self.r = float(max(size_x, size_y)) / 2.0
        self.max_r = float(ts.size) / logn(self.r)
        self.dw = [float(0) for i in xrange(ts.dim)]

    # dumps som state (pretty much, one weight vector per line)
    def dump(self, file):
        proj_w = lambda row: map(lambda n: n.w, row)
        concat = lambda l1,l2: l1 + l2
        vlist = reduce(concat, map(proj_w, self.mesh))
        dump_vector_list("som", vlist, file)

    # find closest neuron in som to i-th training vector (one with min distance)
    def _find_min(self, i):
        dist = lambda n: n.dist(self.ts, i)
        c_dist = lambda n: n.d  # uses neuron's cached distance
        min_row = lambda row: min(row, key=dist)
        winner = min(map(min_row, self.mesh), key=c_dist)
        deb("winner dist %0.6f\n", winner.d)
        winner.dump()
        return winner

    # iteration i-th of the learning algorithm
    def _learn_iter(self, i):
        # each each time, present input i-th and select he winner neuron
        v = self._find_min(i)
        
        # save the difference of input vs winner, to reuse in calculation
        for j in xrange(self.ts.dim):
            self.dw[j] = self.ts.c[i].get(j) - v.w[j]        

        # update the weights of the winner and its neighbours
        for yi in xrange(self.size_y):
            for xi in xrange(self.size_x):
                self.mesh[yi][xi].update_weights(i, v, self.ts.size, 
                                                 self.r, self.max_r, self.dw)

    # learning algorithm
    def learn(self, show_progress):
        log("running learning algorithm ... \n")
        start_t = get_currtime()
        for i in xrange(self.ts.size): 
            iter_start_t = get_currtime()
            self._learn_iter(i)
            iter_end_t = get_currtime()
            if (i%show_progress) == 0:
                log("iter %-4d  (%d ms)...\n", i, iter_end_t-iter_start_t)
        end_t = get_currtime()
        log("total training time = %d secs \n", (end_t-start_t)/1000)
