

from gc_maximum_entropy_multiAA import beta_given_mu
import math



def entropy_list(l):
    '''
        l lst of positive numbers
        normalizes and returns entropy
        '''
    suml=float(sum(l))
    if suml<=0:
        return 0.
    h=0.
    for v in l:
        n=(float(v)/suml)
        if n>0:
            h-=n*math.log(n,2)
    return h


def check_data(data1, data2, prec=1e-7):

    if type(data1) == tuple:
        flags = []
        for i in range(len(data1)):
            flags.append( check_data(data1[i], data2[i], prec=prec) )
        return False not in flags
    elif type(data1) == float:
        #if math.fabs(data1-data2)<prec:
        #    print 'passed', data1, data2
        return math.fabs(data1-data2)<prec
    elif type(data1) == int:
        return data1 == data2
    elif type(data1) == dict:
        # dictionaries. returns true only if always true
        for x in data1:
            if check_data(data1[x], data2[x], prec=prec) == False:
                return False
        return True
    else:
        print 'data not supported', type(data1)
        return False
    
    


def marginal_entropy(p_xy, index=0):
    
    p_x={}
    for xy, p in p_xy.iteritems():
        p_x[xy[index]]=p_x.get(xy[index],0)+p
    return entropy_list(p_x.values())




def normalize_p_xy(p_xy):

    normalization=sum(p_xy.values())
    for xy in p_xy:
        p_xy[xy]/=normalization
    return p_xy



def get_data_for_given_x(p_xy):

    '''
        returns {x: {y:p}}
    '''

    x_y_pr = {}
    for xy, pr in p_xy.iteritems():
        x=xy[0]
        y=xy[1]
        if x not in x_y_pr:
            x_y_pr[x]={}
        assert y not in x_y_pr[x]
        x_y_pr[x][y] = pr

    return x_y_pr



def compute_gc_content(cod_weight):

    GC_content=0
    for cod, weight in cod_weight.iteritems():
        GC_content+= weight * (cod.count('C')+cod.count('G'))
    return GC_content


def compute_degeneracies(AA_to_codons):

    energy_levels={}
    for AA, codons, in AA_to_codons.iteritems():
        energy_levels[AA]={}
        for cod in codons:
            gc= (cod.count('C')+cod.count('G'))
            energy_levels[AA][gc]=energy_levels[AA].get(gc,0)+1
    return energy_levels
    
    
def gc_content_model(cod_weight, AA_to_codons, codon_to_AA, beta = '', verbose = False):
        
    '''
        compares the effective number of codons
        with what you would get in a model with the 
        same GC content
        returns a dict 
        {codon: probability}
    '''
    
    # {codon: probability}
    codon_gc_model_prob = {}
    GC_content = compute_gc_content(cod_weight)
    if verbose: print '\n\n\ntotal GC-content:', GC_content, 'GC-content fraction', GC_content/(3*sum(cod_weight.values()))
        
    # energy_levels[amino] = { E: g_E }
    energy_levels=compute_degeneracies(AA_to_codons)
    # amino_presence[amino]=[num]
    amino_presence={}
    
    for cod, occ in cod_weight.iteritems():
        aa=codon_to_AA[cod]
        amino_presence[aa] = amino_presence.get(aa, 0.) + occ
    
    
    if beta == '':
        if verbose: print 'computing beta...', beta
        beta = beta_given_mu(GC_content, amino_presence, energy_levels)
        if verbose: print 'beta:: ', beta
        assert_flag = True
    else:
        assert_flag = False
    
    # for each amino, the actual probabilities
    # and the model ones
    
    
    # { amino : {codon: counts} }
    aa_cod_counts = {}
    for cod, occ in cod_weight.iteritems():
        aa=codon_to_AA[cod]
        if aa not in aa_cod_counts:
            aa_cod_counts[aa]={}
        aa_cod_counts[aa][cod] = aa_cod_counts[aa].get(cod, 0.)+occ
    
        
    
    # this is just to check that the average is correct
    expected_gc_content=0.    
    
    # this is for the global measures
    random_conditional_entropy_overall=0.
    conditional_entropy_overall=0.
    gc_conditional_entropy_overall=0.
    all_aminos=sum(amino_presence.values())



    for aa, empirical_distr in aa_cod_counts.iteritems():
        if verbose: print '=== amino', aa, '===\ncodon,empirical distribution, gc-model, gc content ==='
        all_values= sum(empirical_distr.values())
        
        assert math.fabs(amino_presence[aa] - all_values) < 1e-6
        
        
        # entropy
        entropy_given_aa = entropy_list(empirical_distr.values())

        # computing normalization for theoretical distr
        normalization=0.
        for e, g_E in energy_levels[aa].iteritems():
            normalization += g_E * math.exp(-beta * e)
        
        theoretical_values=[]
        for cod, counts in empirical_distr.iteritems():
            gc= (cod.count('C')+cod.count('G'))
            if verbose: 
                if all_values > 0: 
                    print cod, counts/all_values, math.exp(-beta * gc)/ normalization, gc
                else: 
                    print cod, 'no values', math.exp(-beta * gc)/ normalization, gc
            expected_gc_content += gc * math.exp(-beta * gc)/ normalization * all_values
            theoretical_values.append(math.exp(-beta * gc)/ normalization)
            codon_gc_model_prob[cod] = math.exp(-beta * gc)/ normalization 
        
        if len(empirical_distr)>1 and verbose:
            print 'eff. no: ', 2**entropy_given_aa, 'random:', len(empirical_distr),
            print 'gc-model: ', 2**entropy_list(theoretical_values) 
        
        # global measures
        px= float(all_values)/all_aminos
        random_conditional_entropy_overall += px * math.log(len(empirical_distr),2)
        conditional_entropy_overall += px * entropy_given_aa
        gc_conditional_entropy_overall += px * entropy_list(theoretical_values)


    if assert_flag: assert math.fabs(expected_gc_content - GC_content)<1e-6

    compressibility_overall_random= (2**conditional_entropy_overall-1.)/(2**random_conditional_entropy_overall-1.)    
    compressibility_overall_gc= (2**conditional_entropy_overall-1.)/(2**gc_conditional_entropy_overall-1.)    

    if verbose: 
        print '=== OVERALL ==='
        print 'overall_comp:', compressibility_overall_random, 'overall_comp_gc:',  compressibility_overall_gc
        print '\n\n\n'
    
    return codon_gc_model_prob, beta



def entropy_list(l):
    '''
        l lst of positive numbers
        normalizes and returns entropy
        '''
    suml=float(sum(l))
    if suml<=0:
        return 0.
    h=0.
    for v in l:
        n=(float(v)/suml)
        if n>0:
            h-=n*math.log(n,2)
    return h



def readfile(filename):
    
    '''
        returns two lists of lists
        where each list is a gene
        first has AAs on each list
        secon has codons
        '''
    
    # is line_counter is even, it is codon.
    # otherwise and amino
    line_counter=0    
    all_amino_lists=[]
    all_codon_lists=[]
    
    
    for l in open(filename):
        if line_counter%2==0:
            # a list of codons (strings)
            codons=l.split()[0].split(',')
            # saves codons for analyzing the whole genome later
            if len(codons)>0:
                all_codon_lists.append(codons)
        else:
            # a list of AAs (strings)
            aminos=l.split()[0].split(',')
            # saves aminos for analyzing the whole genome later
            if len(aminos)>0:
                all_amino_lists.append(aminos)
            assert len(codons)==len(aminos), 'codon and aminos lengths dont match '+str(codons)+' '+str(aminos)
        
        line_counter+=1
        if line_counter%100==0:
            print 'line', line_counter
    
    assert len(all_amino_lists)==len(all_codon_lists)
    
    print 'sample codon->>>> ', all_codon_lists[0][0]
    
    return all_amino_lists, all_codon_lists




