#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Roxane Levy

from numpy import sqrt,sqrt,array,unravel_index,nditer,linalg,random,subtract,power,exp,pi,zeros,arange,outer,meshgrid,apply_along_axis
from random import randrange


"""
   Classe "Som"
	Elle crée le réseau neuronal et gère l'apprentissage.

Dans cette classe, on trouve les fonctions suivantes : 
- input_sequence : charge les vecteurs d'apprentissage en séquence
- input_random : charge les vecteurs d'apprentissage dans le désordre
	-
	
"""

class Som:
    def __init__(self,x,y,input_len,taux_apprentissage=1.0,diffusion="g"):

        self.taux_apprentissage_initial = taux_apprentissage
	self.diffusion = diffusion
	self.x = x
	self.y = y
	
	self._init_neighborhood()
		
	# initialisation des poids de la map
        self.weights = random.rand(x,y,input_len) # initialisation aléatoire
        self.weights = array([v/linalg.norm(v) for v in self.weights]) # normalisation

        self.activation_map = zeros((x,y))

	self.neigx = arange(x)
        self.neigy = arange(y) 



    def _init_neighborhood(self) :
        # Ajustement des parametres selon la fonction de voisinage voulue     
	

	if (self.diffusion == "s"):
		self.sigma_initial = max(self.x,self.y)/2
		self.neighborhood = self.simple

	if (self.diffusion == "g") :
		self.sigma_initial = max(self.x,self.y)/2
		self.neighborhood = self.simple

	if (self.diffusion == "t") :
		self.sigma_initial = max(self.x,self.y)/2
		self.neighborhood = self.gaussian

	if (self.diffusion == "m") : 
		self.neighborhood = self.gaussian
		self.sigma_initial = 1.0





    def _init_T(self,num_iteration):
        """ Permet d'initialiser la variable Ttotal """
        if (self.diffusion == "m") : self.T = num_iteration/2
	if (self.diffusion == "s") : self.T = num_iteration
	if (self.diffusion == "g") : 
		self.T = num_iteration
		self.C = self.T / 100
	if (self.diffusion == "t") :
		self.T = num_iteration
		self.C = self.T / 100
	


    def gaussian(self,c,sigma):

        d = 2*pi*sigma*sigma
        ax = exp(-power(self.neigx-c[0],2)/d)
        ay = exp(-power(self.neigy-c[1],2)/d)
        return outer(ax,ay) 



    def simple(self,c,sigma) : 
	n = self.activation_map
	return n




    def euclidian(self, vectorMap, vectorData) :
	""" Fonction qui retourne la distance euclidienne entre deux vecteurs """
	distance = 0 
        for i in range(len(vectorMap)) :
 		distance += (vectorData[i] - vectorMap[i]) * (vectorData[i] - vectorMap[i])
	dist = sqrt(distance)
	return dist



    def findbestmatchingnode(self,vector) :
      for a in range(len(self.weights)) : 
	   for b in range(len(self.weights[a])) : 
		self.activation_map[a][b] = linalg.norm(self.euclidian(self.weights[a][b],vector))
      return unravel_index(self.activation_map.argmin(),self.activation_map.shape) 



    
    def update(self,x,win,t):
        """
           Mis à jour des poids des neurones
            x - le vecteur en cours
            win - la position du neurone gagnant (un couple de coordonnées)
            t - l'index d'itération 
        """

	if (self.diffusion == "s") :
       		alpha = self.taux_apprentissage_initial - float(t)/self.T
        	sigma = self.sigma_initial * (1.0 - float(t)/self.T) 
        	h = self.neighborhood(win,sigma) 
        	it = nditer(h, flags=['multi_index'])
        	while not it.finished:
		  d = abs(win[0] - it.multi_index[0]) + abs(win[1] - it.multi_index[1])
		  if (d < sigma) :
			  self.weights[it.multi_index] += alpha*(x-self.weights[it.multi_index])            
          	  self.weights[it.multi_index] = self.weights[it.multi_index] / linalg.norm(self.weights[it.multi_index])
          	  it.iternext()
	if (self.diffusion == "g") : 
		alpha = float(self.C) / float(self.C + t)
        	sigma = self.sigma_initial * (1.0 - float(t)/self.T) 
        	h = self.neighborhood(win,sigma)  
        	it = nditer(h, flags=['multi_index'])
        	while not it.finished:
		  d = abs(win[0] - it.multi_index[0]) + abs(win[1] - it.multi_index[1])
		  if (d < sigma) :
			  self.weights[it.multi_index] += alpha*(x-self.weights[it.multi_index])            
          	  self.weights[it.multi_index] = self.weights[it.multi_index] / linalg.norm(self.weights[it.multi_index])
          	  it.iternext()
	if (self.diffusion == "t") :
		alpha = float(self.C) / float(self.C + t)
    		sigma = self.sigma_initial/(1+t/self.T) 
		h = self.neighborhood(win,sigma) * alpha
		it = nditer(h, flags=['multi_index'])
        	while not it.finished:
		  d = abs(win[0] - it.multi_index[0]) + abs(win[1] - it.multi_index[1])
		  if (d < sigma) :
			  self.weights[it.multi_index] += h[it.multi_index] *(x-self.weights[it.multi_index])            
          	  self.weights[it.multi_index] = self.weights[it.multi_index] / linalg.norm(self.weights[it.multi_index])
          	  it.iternext()
	if (self.diffusion == "m") :
       		alpha = self.taux_apprentissage_initial/(1+t/self.T)
        	sigma = self.sigma_initial/(1+t/self.T)
        	h = self.neighborhood(win,sigma)*alpha  
        	it = nditer(h, flags=['multi_index'])
        	while not it.finished:
          	  self.weights[it.multi_index] += h[it.multi_index]*(x-self.weights[it.multi_index])            
          	  self.weights[it.multi_index] = self.weights[it.multi_index] / linalg.norm(self.weights[it.multi_index])
          	  it.iternext()
		
		



    def distance_map(self):
        distance_map = zeros((self.weights.shape[0],self.weights.shape[1]))
        it = nditer(distance_map , flags=['multi_index'])
        while not it.finished:
            for ii in range(it.multi_index[0]-1,it.multi_index[0]+2):
                for jj in range(it.multi_index[1]-1,it.multi_index[1]+2):
                    if ii >= 0 and ii < self.weights.shape[0] and jj >= 0 and jj < self.weights.shape[1]:
                        distance_map [it.multi_index] += linalg.norm(self.weights[ii,jj,:]-self.weights[it.multi_index])
            it.iternext()
        distance_map  = distance_map /distance_map .max()
        return distance_map 



    def knuth_shuffle(self,vector):
	for i in range(len(vector)-1,0,-1) :
		j = randrange(i+1)
		vector[i],vector[j] = vector[j],vector[i]



    def input_random(self,data,num_iteration):        
        """ Apprentissage du SOM en chargant les vecteurs de données dans le désordre """
	self._init_T(num_iteration) 
	shuffle_order = arange(len(data) -1)
        for iteration in range(num_iteration):
	    print "iteration : ",iteration
	    self.knuth_shuffle(shuffle_order)
            for rand_i in range(len(shuffle_order)) :        
            	self.update(data[rand_i],self.findbestmatchingnode(data[rand_i]),iteration+1)



    def input_sequence(self,data,num_iteration):        
        """ Apprentissage du SOM en chargant les vecteurs de données en séquence """
	self._init_T(num_iteration) 
        for iteration in range(num_iteration):
	    for idx in range(len(data)-1) :      
            	self.update(data[idx],self.findbestmatchingnode(data[idx]),iteration+1)
	   




if __name__ == '__main__':
	data = random.rand(100,3)
	data = apply_along_axis(lambda x: x/linalg.norm(x),1,data) # normalisation des données
	som = MiniSom(5,5,3)
	som.train_random(data,1)
 

	


