# BigData course project
# Serial version of training algorithm for SOM
# Neuron class

from numpy import *
from numpy import log as logn
from util import *

class Neuron:
    # constructor for the neuron, supports two types of initialization 
    # (controlled by the rand parameter): random or parsing weights from
    # an space separated line.
    def __init__(self, dim, min_w, max_w, x, y, rand=True, line=None):
        self.x = x
        self.y = y
        if rand:
            gen_w = lambda: (max_w-min_w)*random.random() + min_w
            self.w = array([gen_w() for i in xrange(dim)])
        else:
            self.w = array([float(s) for s in line.split()])
            if len(self.w) != dim:
                log("Parsed neuron got incorrect dimension: %d != %d\n",
                    len(self.w), dim)
                raise
        # to calculate and cache distances on input space
        self.d = float(0)
        # accumulators for numerator and denominator of update formula
        self.num = zeros(dim)
        self.den = zeros(dim)
            
    def dump(self):
        deb("neuron at [%d,%d]\n", int(self.x), int(self.y))
        for i in xrange(len(self.w)):
            deb("%.6f ", self.w[i])
        deb("\n")

    def get_idx(self, size_x):
        return self.x*size_x + self.y
        
    def dump_id(self, size_x, f):
        idx = self.get_idx(size_x)
        f.write("%6d %6d %6d\n" % (idx, int(self.x), int(self.y)))

    # calculates distance between i-th training vector and the    
    # neuron's weight vector. also updates internal cache for the distance
    def dist(self, iv):
        # delegate task to input (sparse) vector, he knows how to do it fast
        self.d = iv.dist(self.w)
        return self.d

    # the neighbourhood ratio (called sigma in formula); which
    # decreases exponentially with iteration (t). it also needs to know
    # the r,max_r parameters of the som
    @staticmethod
    def sigma(t, r, max_r):
        return r * exp(-t / max_r)

    # euclidean distance in the map (mesh), between this neuron and 
    # neuron v (thought as the winner of current iteration)
    def _dist_map(self, v):
        return sqrt((self.x - v.x)**2 + (self.y - v.y)**2)

    # calculates the influence (eta in formula) of a neuron (self)
    # respect to the other (v), where the second is thought as the center
    # of a neighbourhood. it requires to know also current iteration time
    # (t), as well as the som parameters (r and max_r)
    def _eta(self, t, v, sigma):
        return exp(-(self._dist_map(v)**2) / (2 * sigma))

    # initializes numerator and denominator accums
    def init_accums(self):
        self.num.fill(0)
        self.den.fill(0)

    # accumulates the numerator/denominator for weights update
    # t = time, v = winner, sigma = ratio and iv = input vector
    def accum_numdem(self, t, v, sigma, iv):
        eta = self._eta(t,v,sigma)
        self.num += eta * iv.arr
        self.den += eta

    # updates weights using accumulated num/dem 
    def update_weights(self):
        self.w = self.num / self.den


