# BigData course project
# Serial version of training algorithm for SOM
# Neuron class

from random import random
from math import *
from util import *

class Neuron:
    # constructor for the neuron, supports two types of initialization 
    # (controlled by the rand parameter): random or parsing weights from
    # an space separated line.
    def __init__(self, dim, min_w, max_w, x, y, rand=True, line=None):
        self.x = x
        self.y = y
        if rand:
            gen_w = lambda: (max_w-min_w)*random() + min_w
            self.w = [gen_w() for i in xrange(dim)]
        else:
            self.w = [float(s) for s in line.split()]
            if len(self.w) != dim:
                log("Parsed neuron got incorrect dimension: %d != %d\n",
                    len(self.w), dim)
                raise
        self.d = float(0)
            
    def dump(self):
        deb("neuron at [%d,%d]\n", int(self.x), int(self.y))
        for i in xrange(len(self.w)):
            deb("%.6f ", self.w[i])
        deb("\n")

    def get_idx(self, size_x):
        return self.x*size_x + self.y
        
    def dump_id(self, size_x, f):
        idx = self.get_idx(size_x)
        f.write("%6d %6d %6d\n" % (idx, int(self.x), int(self.y)))

    # the learning rate (called alpha in formula); decreases
    # exponentially with iteration (t). it also needs to know the
    # total number of iterations (max_t)
    @staticmethod    
    def _alpha(t, max_t):
        return 0.5 * exp(-t / max_t)

    # the neighbourhood ratio (called sigma in formula); which
    # decreases exponentially with iteration (t). it also needs to know
    # the r,max_r parameters of the som
    @staticmethod
    def _sigma(t, r, max_r):
        return r * exp(-t / max_r)

    # euclidean distance in the map (mesh), between this neuron and 
    # neuron v (thought as the winner of current iteration)
    def _dist_map(self, v):
        return sqrt((self.x - v.x)**2 + (self.y - v.y)**2)

    # calculates the influence (eta in formula) of a neuron (self)
    # respect to the other (v), where the second is thought as the center
    # of a neighbourhood. it requires to know also current iteration time
    # (t), as well as the som parameters (r and max_r)
    def _eta(self, t, v, r, max_r):
        return exp(-(self._dist_map(v)**2) / (2 * Neuron._sigma(t,r,max_r)))

    # given the winning neuron v, updates the weights of neuron (self)
    # needs to know iteration (t), and maximum number of iters (max_t),
    # as well as the som params (r, max_r and dw)
    def update_weights(self, t, v, max_t, r, max_r, dw):
        alpha_eta = Neuron._alpha(t, max_t) * self._eta(t,v,r,max_r)
        for j in xrange(len(self.w)):
            self.w[j] += alpha_eta * dw[j]

    # calculates distance between i-th training vector and the    
    # neuron's weight vector. also updates internal cache for the distance
    def dist(self, iv):
        self.d = 0
        for j in xrange(len(self.w)):
            self.d += (iv[j] - self.w[j])**2
        self.d = sqrt(self.d)
        return self.d
