"""This module contains the implementation of the main classes of the B Smart project

@author: Alessandro Stranieri, Nicolas Tagliani
"""
from ghmm import *
from math import log
from random import uniform
from copy import *

class BSmart:
    """This class implements all the functionalities required to work with HMMs
    """

 #   """The alphabeth we are using"""
 #   alphabet = None
    
 #   """List of models """
 #   models = []
    
    """Initial frequence for each model used in step 3"""
    startFrequence = 1
    
    
    __learnCount = 0
    
    def __init__(self, alphabet):
        """Constructor
        
        @param alphabet: The alphabet to be used in the project as a List
        """
        self.alphabet = Alphabet(alphabet)
        self.models = []
    
    def saveModels(self):
        """This method saves the models to a directorty named "models"
        """
        probabilities = file ("./models/probabilities.txt", 'w')
        for model_idx in range(len(self.models)):
             self.models[model_idx][0].write("./models/model"+str(model_idx)+".xml")
             probabilities.write('%s %s\n' % ("model"+str(model_idx)+".xml", str(self.models[model_idx][1])))
        probabilities.close()
        
        
    def createModel(self, transition_matrix, emission_matrix, start_probability):
        """Creates a model given the transition matrix, the emission matrix and the start probability for each state as a List of Lists
        
        @param transition_matrix: The transition matrix as a List of Lists
        @param emission_matrix: The emission matrix for each state as a List of Lists
        @param start_probability: The start probability for each state as a List
        
        """
        if (self.alphabet is not None):     
            if (len(transition_matrix) == len(emission_matrix) & len(emission_matrix) == len(start_probability)):
                return HMMFromMatrices( self.alphabet, DiscreteDistribution(self.alphabet), transition_matrix, emission_matrix, start_probability)
            else:
                raise ValueError()
        else:
            raise Exception("You must provide an alphabet before generating a model")
        
    def addModel(self, model):#Mergerei questi due metodi
        """Adds a new model to the current list of models
        
        @param model: the model to add
        """
        self.models.append([model, -1])
        pass
        
    def addModelProbability(self, model, probability):
        """Associates a probability to a model
        
        WARNING: Be careful that the sum of all probability must be 1.0
        
        @param model: The chosen model as its integer index
        @param probability: The probability to associate to the chosen mode
        """
        self.models[model][1] = probability
        pass
         
    def getModel(self, index):
        """Returns the selected model
        
        @param index: The index of the model to return
        @return: The model choosen
        """
        return self.models[index]
    
    def setAlphabet(self, alphabet):
        """Sets the model alphabet
        
        @param alphabet: The alphabet to set as a List 
        """
        self.alphabeth = Alphabet(alphabet)
        pass


    def fancyProduce(self, produced):
        """Generates a readeable list from the produced output of a model
        
        @param produced: The previously produced output of a model.
        
        @return: A list of the produced output
        """
        return  map(self.alphabet.external, produced)
        
    def produce(self, lenght, modelIndex = -1):
        """Chooses a model using the a priori probability and generates
        a sequence of lenght lenght.
        
        @param lenght: The lenght of the sequence to be generated
        @param modelIndex: The index of the model choosen to generate the sequence. If -1 it uses the a priori probability for each model
        
        @return: The sequence cenerated by the choosen model
        """
        if (modelIndex != -1):
            return   self.models[modelIndex][0].sampleSingle(lenght)
        else:
            rnd = uniform(0,1)
            return self.models[self.__compute(self.models, rnd)][0].sampleSingle(lenght)
    
    def produceVerbose(self, length ):
        """Chooses a model using the a priori probability and generates
        a sequence of length length.
        
        @param length: The length of the sequence to be generated
        
        @return: The sequence cenerated by the choosen model and the index of the choosen model
        """
        idx = self.__compute(self.models, uniform(0,1))
        return map(self.alphabet.external, self.models[idx][0].sampleSingle(length)), idx
    
    
    
    def mostLikelyModel(self, sequence):
        """Given the sequence chooses which model most likely generated the sequence
        
        @param sequence: the sequence to evaluate
        
        @return: Returns the id of the most likely model 
        """
        if (isinstance(sequence, list)):
            sequence = EmissionSequence(self.alphabet, sequence)
        max = None
        modelId = None
        for i in range(len(self.models)):
            try:
                if (max == None):
                    modelId = i
                    max = self.models[i][0].loglikelihood(sequence)    
                else:
                    newmax = self.models[i][0].loglikelihood(sequence)
                    if (newmax > max):
                        max = newmax
                        modelId = i
            except SequenceCannotBeBuild:
                print "La sequenza non puo' essere prodotta dal modello", i
                pass
        return modelId
    
    def mostLikelyModelStep2(self, sequence):
        """Given the sequence chooses which model most likely generated the sequence according to its a priopri probability
        
        @param sequence: the sequence to evaluate
        
        @return: Returns the id of the most likely model 
        """
        if (isinstance(sequence, list)):
            sequence = EmissionSequence(self.alphabet, sequence)
        logLikelyhood =[]
        for model in self.models:
            logLikelyhood.append(model[0].loglikelihood(sequence))
        aprioriLogLikelyhood =[]
        for i in range(len(logLikelyhood)):
            aprioriLogLikelyhood.append(log(self.models[i][1]) + logLikelyhood[i])
        m = max(aprioriLogLikelyhood)
        modelId=aprioriLogLikelyhood.index(m)
        return modelId
    
    
    def initApriori(self, startFrequence):
        """Initializes all the a priori probabilities with a frequence given by th startFrequence
        
        @param startFrequence: The start frequence to set to each model
        """
        self.__learnCount = 0
        self.startFrequence = startFrequence
        for model in self.models:
            model[1] = startFrequence
        pass
    
    
    def blindLearnApriori(self, sequence):
        """This method doesn't use the model probability to evaluate the most likely model and updates the 
        freqeunce of the selected model
        
        @param sequence: the sequence to evaluate
        """
        if (isinstance(sequence, list)):
            sequence = EmissionSequence(self.alphabet, sequence)
        max = None
        modelId = 0
        for i in range(len(self.models)):
            if (max == None):
                modelId = i
                max = self.models[i][0].loglikelihood(sequence)
            else:
                newmax = self.models[i][0].loglikelihood(sequence)
                if (newmax > max):
                    max = newmax
                    modelId = i
        
        self.models[modelId][1] = self.models[modelId][1] +1
        self.__learnCount = self.__learnCount +1
        
        
    
    def learnApriori(self, sequence):
        """Learns the a priori probability for each model updating the frequency for each model.
        
        @param sequence: the sequence used to update the priori probability
        """
        if (isinstance(sequence, list)):
            sequence = EmissionSequence(self.alphabet, sequence)
        logLikelihoods = []
        for model in self.models:
            l = model[0].loglikelihood(sequence)
            p = float(model[1])/( (len(self.models)*self.startFrequence)+self.__learnCount)
            logLikelihoods.append( log(p)+l )
        m = max(logLikelihoods)
        idx = logLikelihoods.index(m)
        self.models[idx][1] = self.models[idx][1] +1
        self.__learnCount = self.__learnCount +1
        pass
    
    def finalizeApriori(self):
        """This method updates the frequences of each model with the corresponding probabilities
        """
        for model in self.models:
            model[1] = float(model[1])/(self.__learnCount+(len(self.models)*self.startFrequence))
            

    def __compute(self, density, rnd):
        """This function chooses a model given the probability density
            of all the models and a random number from 0 to 1
            
            @param density: The density probability in the form of list of lists.
            @param rnd: A random value from 0 to 1.0
            
            @return: The index of the chosen model
        """
        
        sum = 0.0
        #iter = ripartition.iterkeys()
        now = 0
        while (density[now][1]+sum < rnd):
            sum += density[now][1]
            now = now +1
        return now

        def createInitialModelPool(self, observations):
            """Create a set hidden Markov models from a set of observation sequences.
            For each sequence a model is created such that it represents the sequence,
            in the sense that P(X|M) is the highest(Or just high? I don't know).
            
            @param observations: A list of observations, from the current alphabet.
            """
            
            _aPriori = 1.0/len(observations)
            """Evaluate initial model priori probability, very easy"""
    
            tmp_models = []
            """Cycle through each observation sequence"""
            for index in range(len(observations)):
                curr_o = observations[index] #current observation
                num_states = len(curr_o) #one state for each symbol
                """beginning state probability matrix"""
                pi = [1.0] + [0]*(num_states - 1)
                """transition square matrix, only (i, j) connections with j = i + 1 and j < num_states allowed"""
                temp_A = []
                for s in range(num_states):
                    temp_list = [0]*num_states
                    if s != num_states - 1:
                        temp_list[s+1] = 1.0
                    temp_A.append(temp_list)
                """Emission matrix, set to 1 only the probability of the current character in the observation"""    
                temp_B = []
                #for each state in the model, that is for each     
                for i in range(num_states):
                    temp_list = [0]*len(self.alphabet) # a column for each alphabet symbol 
                    if self.alphabet.isAdmissable(curr_o[i]): 
                        temp_list[self.alphabet.internal(curr_o[i])] = 1.0 #index of the symbol in the alphabet
                        temp_B.append(temp_list)
                    else:
                        raise Exception("current symbol not in the alphabet")
                    
                """Add the model and the start probability to the pool.
                """
                m =self.createModel(temp_A, temp_B, pi)
                tmp_models.append([m, _aPriori])
            return tmp_models
                
       
    
    def MergeModels(self, model1, model2, state1, state2):
        """This method produces a new model by merging two models in the specifed states.
        
        @param model1: The first model, as an index?
        @param model2: The second model, as an index?
        @param state1: The first state to merge, as an index
        @param state2: The second state to merge, as an index
        @return: The model resulting from the merging
        """
        
        assert(isinstance(model1, HMM))
        assert(isinstance(model2, HMM))
        assert(state1 < model1.N)
        assert(state2 < model2.N)
        
        """Number of states in the new model"""
        num_states = model1.N + model2.N - 1
        
        """New start probability matrix"""
        new_pi = []
        for state_idx in range(model1.N):
            if (state_idx != state1):
                new_pi.append(model1.getInitial(state_idx))
                
        new_pi.append(model1.getInitial(state1) + model2.getInitial(state2))
            
        for state_idx in range(model2.N):
            if (state_idx != state2):
                new_pi.append(model2.getInitial(state_idx))
        
        new_pi=self.__normalizeList(new_pi)
        
        assert(len(new_pi) == num_states)
        
        """Create transition matrix"""
        new_A = []
        #extract first sub-matrix
        model1_A = self.__extractTransitionMatrix(model1, state1, 'right') 
        #pad the sub-matrix
        self.__padMatrix(model1_A, 'right', num_states)
        
        #extract second sub-matrix
        model2_A = self.__extractTransitionMatrix(model2, state2, 'left')
        #pad the sub-matrix
        self.__padMatrix(model2_A, 'left', num_states)
        
        for state_idx in range(num_states):
            if (state_idx < model1.N - 1):
                new_A.append(model1_A[state_idx])
            elif (state_idx == model1.N - 1):
                #merge last row of first matrix with first row of first matrix
                new_A.append(self.__mergeDensities(model1_A[state_idx], model2_A[0]))
            else:
                #hope indexing is right :)
                new_A.append(model2_A[state_idx - len(model1_A)+1])
                
        
        """Create emission matrix
        First rows are from first old matrix; 
        one row of weighted emission probabilities; 
        last rows from second old matrix.
        """
        #first model density, unchanged
        new_B = []
        for state_idx in range(model1.N):
            if(state_idx != state1):
                new_B.append(model1.getEmission(state_idx))
        #new state density
        new_B.append(self.__mergeDensities(model1.getEmission(state1), model2.getEmission(state2)))
        #second model density, unchanged 
        for state_idx in range(model2.N):
            if(state_idx != state2):
                new_B.append(model2.getEmission(state_idx))
        
        model = self.createModel(new_A, new_B, new_pi)
                
        return model
    
    def MergeStates(self, model, state1, state2):
        """This method merges two states of the same model.
        I just thought it could be useful. The new single element, result of the merging, 
        is positioned in the matrices as first element. Richtig?
        
        @param model: the model to reduce
        @param state1: the first state to merge
        @param state2: the second state to merge
        @return: The reduced model
        """

        assert(isinstance(model, HMM))
        assert(state1 != state2)
        assert(state1 < model.N)
        assert(state2 < model.N)
        
        """Number of states in the new model, one less than before"""
        num_states = model.N - 1
        
        """New start probability matrix"""
        new_pi = [] #sum density of the two states, then normalize everything
        for state_idx in range(model.N):
            new_pi.append(model.getInitial(state_idx))
        newstate_pi = new_pi.pop(max(state1, state2)) + new_pi.pop(min(state1, state2))
        new_pi.insert(0, newstate_pi)
        new_pi=self.__normalizeList(new_pi)
        
        """Create transition matrix"""
        new_A = self.__extractTransitionMatrix(model, state1, 'inner', state2)
        #sum elements with same index of the first two rows, and append new list
        state1_list = new_A.pop(0)
        state2_list = new_A.pop(0)
        newstate_list = []
        for state_idx in range(len(state1_list)):
            newstate_list.append(state1_list[state_idx] + state2_list[state_idx])
        new_A.insert(0, newstate_list)
        #now sum first two elements of every row
        for row in new_A:
            val1 = row.pop(0)
            val2 = row.pop(0)
            row.insert(0, val1 + val2)
        
        #necessary normalization
        new_A = self.__normalizeMatrix(new_A)
        
        """Create emission matrix"""
        new_B = []
        #copy not involved emission distribution
        for state_idx in range(model.N):
            if(state_idx != state1 and state_idx != state2):
                new_B.append(model.getEmission(state_idx))
        #insert at the beginning the other emissions
        mergedEmissionDensity = self.__mergeDensities(model.getEmission(state1), model.getEmission(state2))
        new_B.insert(0, mergedEmissionDensity)
        
        model = self.createModel(new_A, new_B, new_pi)
        
        return model
    
    def __mergeDensities(self, density1, density2):
        """Merge two different distributions.
        Accepts two lists, each element representing a probability. The method returns
        a new list, each element properly weighted.
        
        @param density1: First density
        @param density2: Second density
        @return: A new distribution
        """
        #Integrity check
        assert(len(density1) == len(density2))
        
        #new list
        new_distribution = []
        for symbol_idx in range(len(density1)):
            new_distribution.append(density1[symbol_idx] + density2[symbol_idx])
        
        return self.__normalizeList(new_distribution)
    
        
        
    def __extractTransitionMatrix(self, model, state, side, statex = None):
         """Extract a matrix, from the transition distribution of a model. 
         The returned matrix is the same matrix with elements of the specified states changed of place
         @param model: The model to extract the transition matrix from
         @param state: The state, that is row and column to remove
         @param side: Tells how to rearrange the matrix
         @return: The reduced transition matrix, as a list of lists
         """   
         assert(isinstance(model, HMM))
         
         subA = []
         
         #Build matrix as a list of lists
         for row in range(model.N):
            temp_row = []
            for col in range(model.N):
                 temp_row.append(model.getTransition(row, col))
            subA.append(temp_row)
         
         if (side == 'right'):
             #first swap column elements, then rows
             for row in subA:
                 temp = row.pop(state)
                 row.append(temp)
             temp = subA.pop(state)
             subA.append(temp)
         
         elif (side == 'left'):
             #first swap rows, then columns
             temp = subA.pop(state)
             subA.insert(0, temp)
             for row in subA:
                 temp = row.pop(state)
                 row.insert(0, temp)    
         
         elif (side == 'inner'):
             assert(statex != None)
             #put the specified columns at the beginning
             for row in subA:
                 temp1 = row.pop(max(state,statex))
                 temp2 = row.pop(min(state,statex))
                 row.insert(0, temp2)
                 row.insert(1, temp1)
             #put the specified rows on top
             temp1 = subA.pop(max(state,statex))
             temp2 = subA.pop(min(state,statex))    
             subA.insert(0, temp2)
             subA.insert(1, temp1)    
                 
         return subA
     
        
    def __padMatrix(self, matrix, side, totNumStates):
        """Just right or left pad a matrix, in order to have totNumStates columns. 
        
        @param matrix: The matrix to pad, is a list of lists
        @param totNumStates: 
        """
        if (side == 'right'):
            for idx in range(len(matrix)):
                while (len(matrix[idx]) <totNumStates):
                    matrix[idx].append(0.0)
        elif (side == 'left'):
            for idx in range(len(matrix)):
                while (len(matrix[idx]) <totNumStates):
                    matrix[idx].insert(0, 0.0)
            
    def __normalizeMatrix(self, matrix):
        """Normalize the rows of the given matrix, so that the sum of the elements are not
        greater than 1.
        
        @param matrix: The matrix to normalize, list of list
        """
        
        for row_idx in range(len(matrix)):
            matrix[row_idx]= self.__normalizeList(matrix[row_idx])
        return matrix
   
    def __normalizeList(self, list):
       """Normalize elements of the list, so that the sum is not greater than 1.
       
       @param list: a list
       """    
       
       norm = sum(list)
       if (norm != 0):
           for idx in range(len(list)):
                list[idx] = float(list[idx])/norm
       return list


    def learn(self, observations, minModels = 6, epsilon=5, tolerance=4, forceFirstClicle=True):
        """This method generates a pool of model from the observations an merges them
        until minModels is reached or the error is bigger then epsilon
        
        @param observations: list of lists containing all the observations
        @param minModels: lower bound for the number of models to generate
        @param workingModels: the number of models to be uset in each iteration
        @param epsilon: the maximum error tolerable
        @param tolerance: the maximum number of increments in the error
        
        return: a list containing all the models generated and merged
        """
        
        assert (self.alphabet is not None)
        #sort the list to work only with longer observations
        observations.sort(self.__cmp)
        #models = self.createInitialModelPool(observations[0:workingModels])
        models = self.createInitialModelPool(observations)
        total_states = 0
        for m in models:
            total_states += m[0].N
            
        cur_tolerance = tolerance
        observations = self.__ListToEmission(observations)
        last_error = float('inf')
        work = True
        first = True
        while(work):
            errors = []
            for state1 in range(total_states-1):
                for state2 in range (state1+1, total_states):
                    cur_models = copy(models)
                    ret1 = self.__getModelFromState(cur_models, state1)
                    ret2 = self.__getModelFromState(cur_models, state2)
                    #check if i'm tring to merge 2 states from the same model or not
                    if (ret1[0] != ret2[0] and len(cur_models) > minModels ):
                        model = self.MergeModels(cur_models[ret1[0]][0], cur_models[ret2[0]][0], ret1[1], ret2[1])
                        #now remove the original models and evaluate the merge
                        apriori_m1 = cur_models.pop(max(ret1[0], ret2[0]))[1]
                        apriori_m2 = cur_models.pop(min(ret1[0], ret2[0]))[1]
                        cur_models.append([model, apriori_m1+apriori_m2])
                    #we are merging 2 states from the same model
                    elif (ret1[0] == ret2[0] ):
                        model = self.MergeStates(cur_models[ret1[0]][0], ret1[1], ret2[1])
                        #now remove the original models and evaluate the merge
                        apriori = cur_models.pop(max(ret1[0], ret2[0]))[1]
                        cur_models.append([model, apriori])
                        
                    #evaluation phase:
                    a=[]
                    a.append(self.__evaluate(cur_models, observations ))
                    a.append(state1)
                    a.append(state2)
                    #in errors we have all the errors found in the possible merge
                    errors.append(a)
            #now choose the best merge, apply it an repeat 
            ret = min(errors)
            #let's see if we should apply the best merge
            if (ret[0]== last_error):
                work = True
            elif (ret[0] > last_error or ret[0] > epsilon):
                if (ret[0]> epsilon):
                    if (forceFirstClicle == True):
                        forceFirstClicle = False
                    else:
                        print "Quitting bacause error is growing or greater then", epsilon
                        print "Last:", last_error, "now:", ret[0]
                        work = False
                else: 
                    if (tolerance > 0):
                        tolerance = tolerance -1
                    else:
                        print "Quitting because we have reached the tolerance limit for error increments"
                        work = False
               
            if(work):
                last_error = ret[0]
                ret1 = self.__getModelFromState(models, ret[1])
                ret2 = self.__getModelFromState(models, ret[2])
                if (ret1[0] != ret2[0]):
                    model = self.MergeModels(models[ret1[0]][0], models[ret2[0]][0], ret1[1], ret2[1])
                    #now remove the original models and evaluate the merge
                    apriori_m1 = models.pop(max(ret1[0], ret2[0]))[1]
                    apriori_m2 = models.pop(min(ret1[0], ret2[0]))[1]
                    models.append([model, apriori_m1+apriori_m2])
                    #we are merging 2 states from the same model
                else:
                    model = self.MergeStates(models[ret1[0]][0], ret1[1], ret2[1])
                    #now remove the original models and evaluate the merge
                    apriori = models.pop(ret1[0])[1]
                    models.append([model, apriori])
                total_states = total_states -1
                if (total_states < 2):
                    print "Quitting because reached a two states model"
                    work = False
            
        return models 

    def generalizeFromSequence(self, obsSequence, minModels, errorTolerance):
        """Creates a set of general models, provided a set of sequences.
        This method build a set of specific models from a set of observation sequences
        and start merging them in order to produce a smaller set of more general models.
        
        @param obsSequence : list of observation sequences
        @param minModels : minimum number of models
        @param errorTolerance : biggest error increase tolerated between two merges
        """
        
        #variables needed
        likelihoodDiff = 0.0
        currPerformance = float('inf')
        #list of errors management
        errors = []
        errorindexes = []
        
        #clear previously stored models
        self.__clearModels()
        
        #build first pool from sequences
        self.models = self.createInitialModelPool(obsSequence)
        assert(len(obsSequence) == self.__numModels())
        
        #keep merging until specified requirements are met
        while(self.__numModels() > minModels and likelihoodDiff < errorTolerance ):
            #evaluate current performance
            #currPerformance = self.__evaluate(self.models, obsSequence)
            print "Current performance is: ", currPerformance
            modelParameters = [0]*4
            
            #cycle through all the models
            for firstModel in range(self.__numModels()):
                for secondModel in range(self.__numModels()):
                    modelParameters[0] = firstModel
                    modelParameters[1] = secondModel      
                            
                    numStates1 = self.models[firstModel][0]
                    numStates2 = self.models[secondModel][0]
                    tempModel = None
                    
                    #create a temporal pool of models, without the currently merged ones
                    tempModelPool = []
                    tempModelPool = deepcopy(self.models)
                    if firstModel == secondModel:
                        tempModelPool.pop(firstModel)
                    else:
                        tempModelPool.pop(max(firstModel, secondModel))
                        tempModelPool.pop(min(firstModel, secondModel))
                        
                    
                    #cycle through the internal states permutation
                    for state1 in range(numStates1):
                        for state2 in range(numStates2):
                            modelParameters[2] = state1
                            modelParameters[3] = state2    
                            
                            if firstModel == secondModel and state1 != state2:
                                tempModel = self.MergeStates(firstModel, state1, state2)
                                tempModelPool.append(tempModel)
                            elif firstModel != secondModel:
                                tempModel = self.MergeModels(firstModel, secondModel, state1, state2)
                                tempModelPool.append(tempModel)
                            
                            #evaluate after-merge performance        
                            tempPerformance = self.__evaluate(tempModelPool, obsSequence)
                            
                            #store performance value for comparison, and model parameters
                            errors.append(tempPerformance)
                            errorindexes.append(modelParameters)
                            
                    #retrieve best performance index
                    bestMergeIndex = errors.index(min(errors)) 
                    
                    #perform merge and update model set
                    mergedModel = None
                    mergedModelaPriori = None
                    model1 = errorindexes[bestMergeIndex][0]
                    model2 = errorindexes[1]
                    state1 = errorindexes[2]
                    state2 = errorindexes[3]
                    if model1 == model2 :
                        mergedModel = self.MergeStates(model1, state1, state2)
                        mergedModelaPriori = self.models[model1][1]
                        self.models.pop(model1)
                    else:
                        mergedModel = self.MergeModels(model1, model2, state1, state2)
                        mergedModelaPriori = self.models[model1][1] + self.models[model2][1]
                        models.pop(max(model1, model2))
                        models.pop(min(model1, model2))
                    self.addModel(mergedModel)
                    self.addModelProbability(self.__numModels(), mergedModelaPriori)
        
    def __numModels(self):
        """Return the number of models currently managed by the class
        
        """
        
        return len(self.models)

    def __clearModels(self):
        """Erase all previously stored models
        
        """
        self.models = []    

    def __randomSamples(self, sourceList, n):
        """Take n samples randomly from list sourceList
        
        @param sourceList: the source list for taking samples
        @param n: number of samples to take
        
        @return: a list wit n random samples from sourceList
        """
        ret = []
        randoms = set()
        for i in range(n):
            #we do not want to add twice or more the same sample
            rnd_idx = int(uniform(0,len(sourceList)))
            while rnd_idx in randoms:
                rnd_idx = int(uniform(0,len(sourceList)))
            randoms.add(rnd_idx)
            ret.append(sourceList[rnd_idx])
        return ret
    
    
    def __cmp(self, l1, l2):
        """Method uset to sort the observations according to the lengh
        
        @param l1: first list to campare
        @parat l2: second list to compare
        @return: -1 if l1 is longer then l2, otherwise 1
        """
        if (len(l1) > len(l2)):
            return -1
        return 1
    
    
    def __getModelFromState(self, models, state):
        """This method computes for a state the corresponding model in a list of models.
        
        @param models: the list of models
        @param state: the index of the state to search
        
        @return: the index of the corresponding model and the internal position of the selected state
        """
        for model_index in range(len(models)):
            if (state >= models[model_index][0].N):
                state -= models[model_index][0].N
            else:
                return model_index, state
        
        raise ValueError()
        
    def __evaluate(self, models, observations):
        """This function evaluates the error in the current model pool.
        The error function is -log(L(s|m)), the negative logarithm of the likelihood function of the 
        sequence given the model. The set of sequence is evaluated 
        
        @param models: The list of models to evaluate
        @observations: the observations used to evaluate the models as 
        
        @return: the error we have using this model
        
        """
        evaluation = 0.0
        ret = False
        for model in models:
            eval = 0.0
            for obs in observations:
                tmp = model[0].loglikelihood(obs)
                if (tmp != -float('inf') ):
                    eval += -tmp
                    ret = True
            evaluation += (eval - log(model[1]))
        if (ret):
            return evaluation
        return float('inf')
       
    def __ListToEmission(self, list_to_transform):
        """This method transforms a list of lists to a listo of EmissionSequence
        
        @param list_to_transform: list of lists to be transformed
        @return: a list of EmissionSequence
        """
        return SequenceSet(self.alphabet, list_to_transform)

class ValueError(Exception):
    
    
    def __init__(self):
        pass
    
    