# BigData course project
# Serial version of training algorithm for SOM
# SOM class

import random
from trainset import TrainSet
from neuron import Neuron
from numpy import *
from util import *
# our utility module overwrites the logarithm function, just rename it
from numpy import log as logn

class SOM:
    # create an initialized SOM with given dimension for training
    # vectors (per training set), range of values to initialize those 
    # weight vectors and sizes for the mapping mesh 
    def __init__(self, ts, size_x, size_y, min_w, max_w, 
                 to_learn=True, file=None):
        self.ts  = ts
        self.size_x = size_x
        self.size_y = size_y
        self.min_w = min_w
        self.max_w = max_w
        if to_learn:
            self._init_to_learn()
        else:
            self._parse_to_classify(file)

    # initializes som for learning purposes (random weights)
    def _init_to_learn(self):
        random.seed()  # initialize random number generator
        neu = lambda i,j: Neuron(self.ts.dim, self.min_w, self.max_w, 
                                 float(i), float(j))
        coords = [(x,y) for x in xrange(self.size_x) for y in xrange(self.size_y)]
        # note we use a single row, so the mesh is stored linearly
        self.lmap = [neu(i,j) for (i,j) in coords]
        self.r = float(max(self.size_x, self.size_y)) / 2.0
        self.max_r = float(self.ts.size) / logn(self.r)
        self.dw = array([float(0) for i in xrange(self.ts.dim)], dtype=float64)

    # initializes the som for classification purposes (parses weights from file)
    def _parse_to_classify(self, file):    
        neu = lambda i,j,l: Neuron(self.ts.dim, self.min_w, self.max_w, 
                                   float(i), float(j), False, l)
        lines = read_n_lines(file, self.size_x * self.size_y)
        coords = [(x,y) for x in xrange(self.size_x) for y in xrange(self.size_y)]
        self.lmap = [neu(i,j,l) for ((i,j),l) in zip(coords, lines)]

    # dumps som state (pretty much, one weight vector per line)
    def dump(self, file):
        proj_w = lambda n: n.w
        vlist = map(proj_w, self.lmap)
        dump_vector_list("som", vlist, file)

    # find closest neuron in som to i-th training vector (one with min distance)
    def _find_min(self, iv):
        # calculate all distances first (and cache them)
        for n in self.lmap:
            n.dist(iv)
        # comparator that uses neuron's cached distance
        c_dist = lambda n: n.d  
        winner = min(self.lmap, key=c_dist)
        deb("winner dist %0.6f\n", winner.d)
        winner.dump()
        return winner

    # iteration i-th of the learning algorithm
    def _learn_iter(self, i):
        iv = self.ts[i % self.ts.size]

        # each each time, present input i-th and select the winner neuron
        v = self._find_min(iv)
        
        # save the difference of input vs winner, to reuse in calculation
        self.dw = v.minus(iv)

        # update the weights of the winner and its neighbours (whole map)
        for n in self.lmap:
            n.update_weights(float(i), v, float(self.ts.size), 
                             self.r, self.max_r, self.dw)

    # learning algorithm
    def learn(self, epochs, show_progress):
        log("running learning algorithm ... \n")
        start_t = get_currtime()
        tsize = self.ts.size
        for e in xrange(epochs):
            for i in xrange(e*tsize, (e+1)*tsize): 
                iter_start_t = get_currtime()
                self._learn_iter(i)
                iter_end_t = get_currtime()
                if (i%show_progress) == 0:
                    log("epoch %3d, iter %4d  (%d ms)...\n", 
                        e, i, iter_end_t-iter_start_t)
                    end_t = get_currtime()
        log("total training time = %d secs \n", (end_t-start_t)/1000)

    # run the som to classify the trainset (testset)
    def classify(self, file, show_progress):
        log("running classification algorithm ... \n")
        f = open(file, "w")
        start_t = get_currtime()
        for i in xrange(self.ts.size): 
            iter_start_t = get_currtime()
            self._find_min(self.ts[i]).dump_id(self.size_x, f)
            iter_end_t = get_currtime()
            if (i%show_progress) == 0:
                log("iter %-4d  (%d ms)...\n", i, iter_end_t-iter_start_t)
        end_t = get_currtime()
        log("total classification time = %d secs \n", (end_t-start_t)/1000)
        f.close()
