from playdoh03 import *
from base import *

import numpy
from numpy import inf,  zeros, tile, nonzero, maximum,minimum, amin,ceil,floor,argsort,array,argmax,sort,mod,cumsum,sum,arange,linspace,ones
from numpy.random import rand,randint,randn

class GA(Optimization):
    @staticmethod
    def default_values(**opt_info):

        if not 'proportion_elite' in opt_info.keys():
            opt_info['proportion_elite'] = 0.05
        if not 'proportion_xover' in opt_info.keys():
            opt_info['proportion_xover'] = 0.6
        if not 'proportion_mutation' in opt_info.keys():
            opt_info['proportion_mutation'] = 0.3
            
        if not 'func_scale' in opt_info.keys(): 
            opt_info['func_scale'] = 'ranking'
             
        if not 'func_selection' in opt_info.keys():
            opt_info['func_selection'] = 'stoch_unfiform'
        
        if not 'func_xover' in opt_info.keys():
            opt_info['func_xover'] ='intermediate'
            
        if not 'func_mutation' in opt_info.keys():
            opt_info['func_mutation'] = 'gaussian'
            #opt_info['func_mutation'] = 'uniform'
            opt_info['scale_mutation'] = 1
            opt_info['shrinkMutation'] = 0.5
            
        if not 'migration_time_interval' in opt_info.keys():
            opt_info['migration_time_interval']=10
            
        if not 'proportion_migration' in opt_info.keys():   #of the total population, should be smalled than the number of worker
            opt_info['proportion_migration']=0.1
        
        if not 'proportion_parents' in opt_info.keys():   
            opt_info['proportion_parents']=1
        
        if  opt_info['func_xover'] == 'linear_combination':
            opt_info['ratio_xover']=0.5
        if  opt_info['func_xover'] == 'heuristic':    
            opt_info['ratio_xover']=1.2
        
        if not 'Xmin' in opt_info.keys():
            opt_info['Xmin'] = -inf
        if not 'Xmax' in opt_info.keys():
            opt_info['Xmax'] = inf
        
        return opt_info
    
    def initialize(self):
        """
        Initializes the optimization algorithm
        Can use this variable :
            * self.opt_info = dict(algorithm-specific optimization parameters) # static
            self.this_machine_index
        """
        
        #check if there are more than two machines available. If so then there will be migration
        #if self.machines is not None :  which one?
        if self.all_machines is not None:
            self.island_flag=1
            self.time_from_last_migration=1
        else:
            self.island_flag=0
            
        #self.opt_info=self.default_values(opt_info=self.opt_info)
        
        self.D = len(self.params)
        self.N = self.particles
        self.nbr_elite=int(ceil(self.opt_info['proportion_elite']*self.N))
        self.nbr_xover=int(floor(self.opt_info['proportion_xover']*self.N))
        self.nbr_mutation=self.N-self.nbr_elite-self.nbr_xover
        self.nbr_offspring=self.nbr_xover+self.nbr_mutation
        self.nbr_migrants=int(ceil(self.N*self.opt_info['proportion_migration']))
        self.nbr_parents=self.N#int(ceil(self.N*self.opt_info['proportion_parents']))
        self.migration_time_interval=self.opt_info['migration_time_interval']
        
        Xmin = self.opt_info['Xmin']
        Xmax = self.opt_info['Xmax']
        
        if Xmin is None:
            Xmin = -inf*ones(self.D)
        if Xmax is None:
            Xmax = inf*ones(self.D)
        self.Xmin = tile(Xmin.reshape((-1,1)), (1, self.N))
        self.Xmax = tile(Xmax.reshape((-1,1)), (1, self.N))

        
        if  self.opt_info['func_mutation'] == 'gaussian':

            if (Xmin!=-inf*ones(self.D)).any() and (Xmax!=inf*ones(self.D)).any():
                self.sigmaMutation=self.opt_info['scale_mutation']*(Xmax-Xmin)
            else:      # if boudneries are infinite
                self.sigmaMutation=self.opt_info['scale_mutation']*(1000*self.X.max(axis=1)-(-1000*self.X.min(axis=1)))
        

        
        self.results['best_fits']=list()

    def migrate(self, best_population):
        """
        Call from other machines to the master machine
        """
        self.X[:,-self.nbr_migrants:]=best_population  # modulo the number of worker
        self.set_this_ready(True)



    def iterate(self):
        """
        Performs the iteration step of the algorithm.
        The following variables are available :
            * self.param_values = initial parameters # current param values
            * self.X = corresponding initial matrix # current matrix
            * self.fitness = fitness values of initial parameters # current fitness
            * self.iteration = 0 # current iteration
        The variable self.results must be filled by this method.
        The following methods can be used :
            * self.contact : contacts other machines 
            * self.get_fitness(matrix) computes the fitness of the given matrix
            * self.this_machine, other_machines, all_machines in self.contact()
        """
               
        # resort the population from best to worst
        
        self.indices_Population_sorted=argsort(self.fitness)[::-1]
        self.X=self.X[:,self.indices_Population_sorted]   
        self.fitness=self.fitness[self.indices_Population_sorted]

        
        
        #migrate if there is a migration policy (if more than one machine) and if it is time to do so
        if self.island_flag==1:
            if  self.time_from_last_migration==self.migration_time_interval:
                log.debug("contacting...")
                self.contact(self.machines[(mod(self.this_machine_index+1,len(self.all_machines)))], 'migrate', [self.X[:,:self.nbr_migrants]])
                
                log.debug("waiting...")
                self.wait()
                self.time_from_last_migration=0
                print 'migration happened'
            else:
                self.time_from_last_migration+=1
        
        ##rescale fitness
        self.fitness_scaled=self.scale_fitness()
         ### select parents ######
        self.parents_indices=self.select_parents()
        
        #compute the island elite from the fitness that has been just computed for X
        self.fitness_lElite,self.X_lElite,self.X_bestIsland,self.fitness_bestIsland=self.find_elite()
        #print self.X_lElite[:,-1]

    
        ## reproduction: recombine parents with xover and mutation -->compute a new X from the selected adults
        self.X=self.recombine()
       # print self.X[:,-1]
        # Boundary checking
    
        self.X = maximum(self.X, self.Xmin)
        self.X = minimum(self.X, self.Xmax)
        
        # compute new fitness
        self.get_fitness()
        
        
        ind_best=argmax(self.fitness)
        
        self.results['best_fit_local'] = self.fitness[ind_best]
        self.results['best_pos_local'] = self.X[:,ind_best]
        self.results['best_fits'].append(self.fitness[ind_best])

        
    def scale_fitness(self):
        ### rescale the fitness values of the entire population###    
        if self.opt_info['func_scale'] == 'ranking':
            fitness_scaled=arange(self.N,0,-1.)
        return fitness_scaled
    
    
    def select_parents(self):
        #In a strict generational replacement scheme
        #the size of the mating pool is always equal to the size of the population.
        
        ### Stochastic univerasl uniform
        if self.opt_info['func_selection'] == 'stoch_unfiform':

            wheel = cumsum(self.fitness_scaled/sum(self.fitness_scaled))  #wheel has the length of the entire population
            parents_indices = zeros((self.nbr_parents),'int')
            stepSize = 1./self.nbr_parents
            position = rand(1) * stepSize    #  initisialtion position pointer at the starting position
            lowest = 1

            for iparent in range((self.nbr_parents)): 
                for ipos in arange(lowest,wheel.shape[0]): # find the wheel position
                    if(position < wheel[ipos]): #if the step fall in this chunk ipos of the wheel
                        parents_indices[iparent] = ipos
                        lowest = ipos
                        break
                position = position + stepSize
                
        return parents_indices 
     
     
    def find_elite(self):
        fitness_lElite= self.fitness[:self.nbr_elite]
        X_lElite= self.X[:,:self.nbr_elite]
        fitness_bestIsland=self.fitness[0]
        X_bestIsland=self.X[:,0]
        return fitness_lElite,X_lElite,X_bestIsland,fitness_bestIsland
    
    
    
    
    def recombine(self):
        Xnew=zeros((self.D,self.N))     
        
        
        Xnew=self.do_crossover(Xnew)
        Xnew=self.do_mutation(Xnew)
        Xnew=self.include_elite(Xnew)
        return Xnew 
    
    def do_crossover(self,Xnew):
        ### CROSSOVER
        if self.opt_info['func_xover']=='discrete_random': 
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                vec=randint(0,2,self.D)
                Xnew[nonzero(vec),ixover]=self.X[nonzero(vec),parent1_ind]
                vec=abs(vec-1)
                Xnew[nonzero(vec),ixover]=self.X[nonzero(vec),parent2_ind]
            
        if self.opt_info['func_xover']=='one_point':  
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                    
                split_point=randint(self.D,size=1)[0]
                if split_point != 0:
                    Xnew[:split_point,ixover]=self.X[:split_point,parent1_ind]
                else :
                    Xnew[split_point,ixover]=self.X[split_point,parent1_ind]
                if split_point != self.D:
                    Xnew[split_point:,ixover]=self.X[split_point:,parent2_ind]    
                else :
                    Xnew[split_point,ixover]=self.X[split_point,parent2_ind]    
                
        if self.opt_info['func_xover']=='two_points': 
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]

                split_points1=randint(self.D,size=1)
                split_points2=split_points1
                while split_points1==split_points2:
                    split_points2=randint(self.D,size=1)
                split_points=sort(array([split_points1,split_points2])  )

                if split_points[0] != 0:
                    Xnew[:split_points[0],ixover]=self.X[:split_points[0],parent1_ind]
                else :
                    Xnew[split_points[0],ixover]=self.X[split_points[0],parent1_ind]
                    
                Xnew[split_points[0]:split_points[1],ixover]=self.X[split_points[0]:split_points[1],parent2_ind] 
                
                if split_points[1] != self.D:
                    Xnew[split_points[1]:,ixover]=self.X[split_points[1]:,parent1_ind]
                else :
                    Xnew[split_points[1],ixover]=self.X[split_points[1],parent1_ind]
                         
        if self.opt_info['func_xover']=='heuristic': 
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                
                if self.fitness[parent1_ind]>=self.fitness[parent2_ind]: # choose the best parent
                    Xnew[:,ixover]=self.X[:,parent2_ind]+self.opt_info['ratio_xover']*(self.X[:,parent1_ind]-self.X[:,parent2_ind])
                else:
                    Xnew[:,ixover]=self.X[:,parent1_ind]+self.opt_info['ratio_xover']*(self.X[:,parent2_ind]-self.X[:,parent1_ind])
                    
        if self.opt_info['func_xover']=='intermediate': 
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                Xnew[:,ixover]=self.X[:,parent1_ind]+(-0.25+1.25*rand(self.D))*(self.X[:,parent2_ind]-self.X[:,parent1_ind])
              
        if self.opt_info['func_xover']=='linear_combination':
            for ixover in range(self.nbr_xover):
                parent1_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                parent2_ind=parent1_ind
                while parent1_ind==parent2_ind:     #make sure the two parents are not the same
                    parent2_ind=self.parents_indices[randint(self.nbr_parents,size=1)[0]]
                    
                Xnew[:,ixover]=self.X[:,parent1_ind]+self.opt_info['ratio_xover']*(self.X[:,parent2_ind]-self.X[:,parent1_ind])
        return Xnew
    
    def do_mutation(self,Xnew):
                #### MUTATION
        if self.opt_info['func_mutation']=='gaussian':
            for imut in range(self.nbr_mutation):
                Xnew[:,self.nbr_xover+imut]=self.X[:,self.parents_indices[randint(self.nbr_parents,size=1)[0]]]+self.sigmaMutation*randn(self.D)
            self.sigmaMutation=self.sigmaMutation*(1-self.opt_info['shrinkMutation']*self.iteration/self.iterations)
                 
        if self.opt_info['func_mutation']=='uniform':
            
            for imut in range(self.nbr_mutation):
                Xnew[:,self.nbr_xover+imut]=self.X[:,self.parents_indices[randint(self.nbr_parents,size=1)[0]]]+(self.Xmax[:,0]-self.Xmin[:,0])*rand(self.D)
        return Xnew
    
    def include_elite(self,Xnew):
        ### add the current elite to the next  generation

        Xnew[:,self.nbr_xover+self.nbr_mutation:]=self.X_lElite      
        return Xnew
    
    @staticmethod
    def finalize(results_machines):
        results_final=dict()
        if len(results_machines)==1:
            results_final['best_fit'] = results_machines[0]['best_fit_local']
            results_final['best_pos'] = results_machines[0]['best_pos_local']
            results_final['best_fits']= array(results_machines[0]['best_fits'])
        else:
            #print results_machines[0]
            best_fits_matrix=zeros((len(results_machines),len(results_machines[0]['best_fits'])))
            best_pos_matrix=zeros((results_machines[0]['best_pos_local'].shape[0],len(results_machines)))
            best_fit_vector=zeros((len(results_machines)))
            for machine_idx in range(len(results_machines)):
                best_fits_matrix[machine_idx,:]=array(results_machines[machine_idx]['best_fits'])
                best_pos_matrix[:,machine_idx]=results_machines[machine_idx]['best_pos_local']
                best_fit_vector[machine_idx]= results_machines[machine_idx]['best_fit_local']
                
            indice_best=argmax(best_fit_vector)
            results_final['best_fit'] = best_fit_vector[indice_best]
            results_final['best_pos'] = best_pos_matrix[:,indice_best]
            results_final['best_fits']= amin(best_fits_matrix,axis=0) 
            
        return results_final

        
        
