# BigData course project
# Serial version of training algorithm for SOM
# SOM class

from numpy import random, log as logn
import util
from trainset import TrainSet
from neuron import Neuron
from util import *

class SOM:
    # create an initialized SOM with given dimension for training
    # vectors (per training set), range of values to initialize those 
    # weight vectors and sizes for the mapping mesh 
    def __init__(self, ts, epochs, size_x, size_y, min_w, max_w, 
                 to_learn=True, file=None):
        self.ts  = ts
        self.epochs = epochs
        self.size_x = size_x
        self.size_y = size_y
        self.min_w = min_w
        self.max_w = max_w
        if to_learn:
            self._init_to_learn()
        else:
            self._parse_to_classify(file)

    # initializes som for learning purposes (random weights)
    def _init_to_learn(self):
        random.seed(666)  # initialize random number generator
        neu = lambda i,j: Neuron(self.ts.dim, self.min_w, self.max_w, 
                                 float(i), float(j))
        coords = [(x,y) for x in xrange(self.size_x) for y in xrange(self.size_y)]
        # note we use a single row, so the mesh is stored linearly
        self.lmap = [neu(i,j) for (i,j) in coords]
        self.r = float(max(self.size_x, self.size_y)) / 2.0
        max_t = float(self.ts.size * self.epochs)
        self.max_r = max_t / logn(self.r)

    # initializes the som for classification purposes (parses weights from file)
    def _parse_to_classify(self, file):    
        neu = lambda i,j,l: Neuron(self.ts.dim, self.min_w, self.max_w, 
                                   float(i), float(j), False, l)
        lines = read_n_lines(file, self.size_x * self.size_y)
        coords = [(x,y) for x in xrange(self.size_x) for y in xrange(self.size_y)]
        self.lmap = [neu(i,j,l) for ((i,j),l) in zip(coords, lines)]

    # dumps som state (pretty much, one weight vector per line)
    def dump(self, file):
        proj_w = lambda n: n.w
        vlist = map(proj_w, self.lmap)
        dump_vector_list("som", vlist, file)

    # calculate neurons that belong to neighbourhood  of v (ratio sigma)
    def _neighbours(self, v, sigma):
        nvs = []
        # half the sigma (ratio), rounded one up (to avoid leaving anyone out)
        hsu = int(sigma)/2 + int(sigma)%2
        # simply return all neurons on square containing neighbourhood;
        # the real cicle-neighbourhood will be contained inside, and the
        # neuron's eta function will eliminate those inside rectable but
        # out of the circle (by returning a value close to zero)
        for x in xrange(int(v.x)-hsu, int(v.x)+hsu):
            if x >= 0 and x < self.size_x:
                for y in xrange(int(v.y)-hsu, int(v.y)+hsu):
                    if y >= 0 and y < self.size_y:
                        nvs.append(self.lmap[x*self.size_x + y])
        return nvs

    # one iter of the learning algorithm (within an epoch), at time t
    def _learn_iter(self, e, t, i, show_progress):
        iter_start_t = get_currtime()
        # precalculate new sigma (neighbourhood ratio)
        sigma = Neuron.sigma(t, self.r, self.max_r)
        # each each time, present input i-th and select the winner neuron
        iv = self.ts[i]
        _, v = find_min(self.lmap, iv)
        # calculate neurons to be affected (the neighbours)
        nvs = self._neighbours(v, sigma)
        # accumulate numerator and denominators for neighbours
        for n in nvs:
            n.accum_numdem(t, v, sigma, iv)
        iter_end_t = get_currtime()
        if (t%show_progress) == 0:
            log("epoch %3d, iter %4d  (%d ms)...\n",  
                e, t, iter_end_t-iter_start_t)

    # one epoch the batch learning algorithm 
    def _learn_epoch(self, e, init_t, show_progress):
        # init accumulators for all neurons
        for n in self.lmap:
            n.init_accums()
        #iterate over whole trainset (randomly)   
        idxs = range(self.ts.size)
        random.shuffle(idxs)
        idxs = list(enumerate(idxs))
        for (t,i) in idxs:
            self._learn_iter(e, init_t+float(t), i, show_progress)
        # update all neuron weights using accumulated num / den
        for n in self.lmap:
            n.update_weights()

    # batch learning algorithm
    def learn(self, show_progress):
        log("running batch learning algorithm ... \n")
        start_t = get_currtime()
        for e in xrange(self.epochs):
            epoch_start_t = get_currtime()
            self._learn_epoch(e, float(e*self.ts.size), show_progress)
            epoch_end_t = get_currtime()
            log("epoch %3d (%d ms) ...\n", e, epoch_end_t-epoch_start_t)
        end_t = get_currtime()
        log("total training time = %d secs \n", (end_t-start_t)/1000)

    # run the som to classify the trainset (testset)
    def classify(self, file, show_progress):
        log("running classification algorithm ... \n")
        f = open(file, "w")
        start_t = get_currtime()
        for i in xrange(self.ts.size): 
            iter_start_t = get_currtime()
            find_min(self.lmap, self.ts[i])[1].dump_id(self.size_x, f)
            iter_end_t = get_currtime()
            if (i%show_progress) == 0:
                log("iter %-4d  (%d ms)...\n", i, iter_end_t-iter_start_t)
        end_t = get_currtime()
        log("total classification time = %d secs \n", (end_t-start_t)/1000)
        f.close()
