



def compute_entropy_for_ngram(outs, all, total_counts_original, prior, verbose):
    
    '''
        outs is a dictionary {neighbor:counts}
        returns the entropy
        '''
    # total_counts is such that probability 
    # of what is next is normalized
    total_counts=total_counts_original+prior*len(all)
    h=0.
    total_pr=0.
    
    if verbose:
        print '------ ngram'
    for neighbor in all:
        # defauls is prios
        pr=prior
        #if the transitions is present 
        if neighbor in outs:
            pr+=float(outs[neighbor])
        # normalization
        pr/=total_counts
        # total_pr should be 1 (check is later)
        total_pr+=pr
        # entropy's definition
        h-=pr*math.log(pr)
        if verbose:
            print neighbor, pr, 'neighbor, pr'
    
    if verbose:
        print 'entropy: ', h
    #check probability is normalized
    assert math.fabs(total_pr-1)<1e-7
    return h






def compute_entropy_for_transit_probs(ngram_net, ngram_counts, all, prior, verbose):
    
    '''
        computing entropies 
        as the weighted average of 
        the entropy of each codon/ammino
        '''
    
    # total entropy of the network
    total_h=0.
    total_counts=0.
    for ngram in ngram_net:
        # weight normalization
        total_counts+=float(ngram_counts[ngram])
        # summing node's entropy (with weights)
        total_h+= ngram_counts[ngram]*compute_entropy_for_ngram(ngram_net[ngram], all, \
                                                                ngram_counts[ngram], prior, verbose)
        if verbose:
            print ngram_counts[ngram], total_h, 'counts, total_h'
    
    if verbose:
        print total_h, total_counts, 'END'
    return total_h/total_counts, total_counts
    
    
    
    
    


def compute_entropy_multiple_lists(lists, all, order, prior, verbose=True):
    
    '''
        lists is a list of lists every list is a gene
        we compute the aggregrate network of all the lists
        the routine returns the entropy of this lists
        and its weight
        '''
    
    
    #### getting the transition probabilities
    # {codon-unigram/...: counts}
    ngram_counts={}
    # {codon-unigram/diagram/trigram/...: {codon:count}}
    ngram_net={}
    
    for l in lists:
        split_path(l, order, ngram_counts, ngram_net)
    
    if verbose:
        print ngram_counts
        print ngram_net
        print l
    
    #### getting the entropy
    return compute_entropy_for_transit_probs(ngram_net, ngram_counts, all, prior, verbose)



def check_zero_order(l,all,prior):
    '''
        quick check that the zero order 
        entropy is computed correctly
        '''
    prs={}
    total_counts=0.
    for gene in l:
        for n in gene:
            if n not in prs:
                prs[n]=prior
                total_counts+=prior
            prs[n]+=1
            total_counts+=1
    
    assert len(prs)==len(all)
    h=0.
    for n, p in prs.iteritems():
        h-=p/total_counts*math.log(p/total_counts)
    return h






        '''
            # compute entropies for this gene
            ammino_h, ammino_weight=compute_entropy_multiple_lists([codons], all_codons, order, prior)
            codon_h, codon_weight=compute_entropy_multiple_lists([amminos], all_amminos, order, prior)
            # writing to a file
            outfile.write(str(math.exp(codon_h))+' '+str(math.exp(ammino_h))+\
            ' '+str(ammino_weight)+'\n')
            
            outfile.close()
            
            # now for the entire genome
            ammino_h, ammino_weight=compute_entropy_multiple_lists(all_codon_lists, all_codons, order, prior)
            codon_h, codon_weight=compute_entropy_multiple_lists(all_ammino_lists, all_amminos, order, prior)
            outfile=open('all_genome_entropy.txt', 'w')
            outfile.write(str(math.exp(codon_h))+' '+str(math.exp(ammino_h))+' '+\
            str(ammino_weight)+'\n')
            
            outfile.close()
            
            print 'checking'
            # checking the entropy 0th order in a different way
            print 'effective #amminos in the genome (0th order): ', math.exp(check_zero_order(all_ammino_lists, all_amminos, prior))
            print 'effective #codons in the genome (0th order): ', math.exp(check_zero_order(all_codon_lists, all_codons, prior))
            
            '''



'''
    def compute_all_possible_states(all_codons, order):
    
    if order==0:
    return [()]
    if order==1:
    return [(v,) for v in all_codons]
    if order==2:
    all_codons2=list(all_codons)
    all_possible_states=[]
    for v in all_codons:
    for v2 in all_codons:
    all_possible_states.append((v,v2))
    return all_possible_states
    else:
    print 'order higher than 2nd is not done yet'
    exit()
    '''



def compute_entropy_given_state(transitions, all_aminos_net, prior, verbose=False):
    
    
    '''
        transistions is supposed to be 
        a dict {(aa, codon):counts}
        this function returns H(aa) and H(codon|aa)
        given this state of the system
    '''
    
    
    # if prior_on_codons is True, all codons get the same prior
    # and aminoacids get multiple priors if the have syn. codons
    prior_on_codons=False
    
    if verbose:
        print 'compute_entropy_given_state::: prior:', prior 
        print 'transitions', transitions
    
    # for each amino, the list of possible codons
    amino_codon_net={}
    
    total_counts=0.
    for aa_codon_pair in transitions:
        aa= aa_codon_pair[0]
        codon= aa_codon_pair[1]
        if aa not in amino_codon_net:
            amino_codon_net[aa]={}
        if codon not in amino_codon_net[aa]:
            amino_codon_net[aa][codon]=0
        amino_codon_net[aa][codon]+=transitions[aa_codon_pair]
        total_counts+=transitions[aa_codon_pair]
    
    if verbose: print amino_codon_net
    if verbose: print '***************************************'
    
    #aa entropy
    H_aa=0.
    H_codon=0.
    aa_norm=total_counts + prior * len(all_aminos_net)
    if prior_on_codons: 
        aa_norm=total_counts+prior*sum([len(all_aminos_net[aa]) for aa in all_aminos_net])
    p_aa_all=[]
    
    for aa in all_aminos_net:
        # p(aa)
        if prior_on_codons: 
            p_aa=prior*len(all_aminos_net[aa])
        else:
            p_aa=prior
        if aa in amino_codon_net:
            # summing all the values of the codon dictionary
            p_aa+=sum(amino_codon_net[aa].values())
        p_aa/=aa_norm
        if verbose: print aa, p_aa
        p_aa_all.append(p_aa)
        # compute entropy for codons
        p_codon_all=[]
        # compute codon probabilities
        if aa in amino_codon_net:
            for codon in all_aminos_net[aa]:
                p_codon_all.append(amino_codon_net[aa].get(codon, 0)+prior)            
                if verbose: print codon, amino_codon_net[aa].get(codon, 0)+prior
            if verbose: print p_codon_all
            # entropy for codons
            H_codon+= p_aa * entropy_list(p_codon_all)
        else:
            H_codon+= p_aa * math.log(len(all_aminos_net[aa]), 2)
    
    H_aa=entropy_list(p_aa_all)
    if verbose: print sum(p_aa_all), 'aa_norm_check'
    assert math.fabs(sum(p_aa_all)-1)<1e-6, 'error in aa_norm'
    
    
    if verbose: print 'entropies:', H_aa, H_codon
    return total_counts+prior, H_aa, H_codon



