
from codonDictionaries import get_codon_to_AA_dict
from codonDictionaries import get_AA_dict
from codonDictionaries import get_dictionaries
from utilities import readfile
from utilities import entropy_list
from utilities import check_data
from utilities import marginal_entropy
from utilities import normalize_p_xy
from utilities import get_data_for_given_x
from utilities import gc_content_model

import math


def compressibility_eq(actual_value, expected_value):
    
    # no abs value
    abs_diff = (2**expected_value - 2**actual_value)
    return 1 - abs_diff / (2**expected_value -1)


def compute_entropies_two_models(p_xy, p_xy_null_model, verbose, uniform_null_model=False):
    
    '''
        given p_xy --a dictionary {(x,y):occs} --
        returns
        {
        overall_comp : overall compressibility of y,
        unw_eff_no: unweighted eff. no. of y,
        x_to_comp_effno: {x: (compressibility of y, eff. no. of y) },
        eff_x: 2**H(X),
        eff_y : 2**H(Y)
        }
        the second dictionary p_xy, 
        is used to compute the bias respect with that null model
    '''
    
    # makes sure dict are normalized
    p_xy = normalize_p_xy(p_xy)
    
    if uniform_null_model == True:
        # each pair gets uniform probability
        # here we do not care about H(X) in the null model
        # therefore there is no need to normalize this dict
        # numbers will get the right normalization in entropy_list
        p_xy_null_model = {}
        for pair in p_xy:
            p_xy_null_model[pair] = 1.
    
    H_XY = entropy_list(p_xy.values())
    
    if verbose:
        print 'H_XY', H_XY, 'H(X)', marginal_entropy(p_xy,0), 'H(Y)', marginal_entropy(p_xy,1)
    
    # same keys
    assert sorted(p_xy.keys()) == sorted(p_xy_null_model.keys())

    
    # computing the conditional entropy
    # { x : {y: p} }
    x_y_pr = get_data_for_given_x(p_xy)

    x_y_pr_null_model = get_data_for_given_x(p_xy_null_model)
    
    
    # {x : (compressibility, eff_no) }
    x_compress_eff_no_dict={}
    
    # overall outputs
    unweighted_no=0.
    
    # just the average of all compressibilities
    just_weighted_average = 0.
    
    
    sum_px = 0.
    for x, y_pr in x_y_pr.iteritems():
        
        
        # this will be used for both the null model and 
        # the actual data
        px=sum(y_pr.values())
        # entropy of H(Y | X = x)
        conditional_entropy_x = entropy_list(y_pr.values())
        # entropy of H(Y | X = x) in the null model
        conditional_entropy_x_null_model = entropy_list(x_y_pr_null_model[x].values())  
        # unweighted no.
        unweighted_no += 2**conditional_entropy_x
        
        
        if len(y_pr)>1:
            compressibility_x = compressibility_eq(conditional_entropy_x, conditional_entropy_x_null_model)
            x_compress_eff_no_dict[x] = (compressibility_x, 2**conditional_entropy_x)
            # just weighted averages
            just_weighted_average += px * compressibility_eq(conditional_entropy_x, conditional_entropy_x_null_model)
            sum_px += px
    


    # here we take the weighted average
    compressibility_overall = just_weighted_average / sum_px
    if verbose:
        print '>>> compress, coditional_entr.: ',compressibility_overall
    
    return {'overall_comp' : compressibility_overall,
            'unw_eff_no' : unweighted_no,
            'x_to_comp_effno' : x_compress_eff_no_dict,
            'eff_x' : 2**marginal_entropy(p_xy,0),
            'eff_y' : 2**marginal_entropy(p_xy,1) }


def compute_codon_weight(list_cod, AA_to_codons, codon_to_AA, prior_aa, prior_cod, prior_info = {}):
    
    '''
        given a list of codons the function computes
        the dict { codon : occurrences }
        with the prior
        prior_info is an optional dict {codon:prior}
    '''
    
    assert len(prior_info) == 0 or sorted(prior_info.keys()) == sorted(codon_to_AA.keys())
    
    cod_weight={}
    
    # inserting prior
    if len(prior_info) == 0:
        for cod, aa in codon_to_AA.iteritems():
            cod_weight[cod] = float(prior_aa)/len(AA_to_codons[aa]) + prior_cod
    else:
        cod_weight = prior_info.copy()
    
    # counting
    for cod in list_cod:
        cod_weight[cod]+=1.

    return cod_weight

def compute_cooccurrences(cod_weight, codon_to_something):
    
    # smt stands for 'something'
    # {(smt, codon): counts}
    p_smt_cod={}
    for cod, occ in cod_weight.iteritems():
        smt_cod=(codon_to_something[cod], cod)
        assert smt_cod not in p_smt_cod
        p_smt_cod[smt_cod] = occ
    
    return p_smt_cod


def compute_cooccurrences2(cod_weight, codon_to_TRNA, codon_to_AA):
    
    
    # {(aa, trna): counts}
    p_aa_trna={}
    for cod, occ in cod_weight.iteritems():
        aa_trna=(codon_to_AA[cod], codon_to_TRNA[cod])
        p_aa_trna[aa_trna] = p_aa_trna.get(aa_trna, 0.) + occ
    
    return p_aa_trna



def compute_two_random_variable_entropy(gc_model, list_cod, 
                                        AA_to_codons,
                                        codon_to_AA,
                                        codon_to_TRNA,
                                        prior_aa, prior_cod,
                                        verbose, gc_verbose,
                                        beta_user, prior_from_gc_model):
    
    '''
        see: compute_compressibility_et_al
    '''
    
    
    # list_cod is translated in 
    # {cod: weight}
    # where weight is occurrences + prior    
    cod_weight= compute_codon_weight(list_cod, AA_to_codons, codon_to_AA, prior_aa, prior_cod)
    
    
    # gc content for AA-codons
    codon_gc_model_prob, beta = gc_content_model(cod_weight, AA_to_codons, codon_to_AA, beta = beta_user, verbose= gc_verbose)
    if verbose: print '>>> beta for gc model ::', beta
    
    if prior_from_gc_model:
    
        # we are now recomputing the cod_weight
        # using self-consistent priors from the codon_gc_model_prob
        iter = 0
        while True:
            if verbose: print 'iter', iter
            if verbose: print beta, beta_user, 'beta, beta_user'
            # using the gc model for priors
            cod_weight = compute_codon_weight(list_cod, AA_to_codons, codon_to_AA, 0, 0, codon_gc_model_prob)
            codon_gc_model_prob, new_beta = gc_content_model(cod_weight, AA_to_codons, codon_to_AA, beta = beta_user, verbose= False)

            if math.fabs(new_beta - beta) < 1e-10:
                break
            beta = new_beta
            iter += 1        
    
    p_ac_gc_model = compute_cooccurrences(codon_gc_model_prob, codon_to_AA)
    p_tc_gc_model = compute_cooccurrences(codon_gc_model_prob, codon_to_TRNA)
    p_at_gc_model = compute_cooccurrences2(codon_gc_model_prob, codon_to_TRNA, codon_to_AA)
    
    # co-occurrences
    p_ac = compute_cooccurrences(cod_weight, codon_to_AA)    
    p_tc = compute_cooccurrences(cod_weight, codon_to_TRNA)    
    p_at = compute_cooccurrences2(cod_weight, codon_to_TRNA, codon_to_AA)    

    
    
    if gc_model:
        # gc model (AA - CODONS)
        ac_gc_model_results = compute_entropies_two_models(p_ac, p_ac_gc_model, verbose)
        #print ac_gc_model_results

        # gc model (tRNA - CODONS)
        tc_gc_model_results = compute_entropies_two_models(p_tc, p_tc_gc_model, verbose)
        #print tc_gc_model_results

        # gc model (tRNA - CODONS)
        at_gc_model_results = compute_entropies_two_models(p_at, p_at_gc_model, verbose)
        #print at_gc_model_results
        
        return [ac_gc_model_results, tc_gc_model_results, at_gc_model_results, beta]

    else:
        #============== UNIFORM MODEL'

        # entropies
        if verbose: print '*** AA-CODONS ======================='
        ac_results = compute_entropies_two_models(p_ac, {}, verbose, uniform_null_model=True)
        
        if verbose: print '*** TRNA-CODONS ======================='
        tc_results = compute_entropies_two_models(p_tc, {}, verbose, uniform_null_model=True)

        if verbose: print '*** AA-TRNA ======================='
        at_results = compute_entropies_two_models(p_at, {}, verbose, uniform_null_model=True)

        
        return [ac_results, tc_results, at_results, beta]



def compute_compressibility_et_al(all_codon_lists, gc_model,
                                  verbose=False, gc_verbose = False,
                                  beta = '',
                                  prior_aa=0., prior_cod=0., prior_from_gc_model = False):
    
    
    '''
        INPUT:
            >>> gc_model is a bool Flag (True / False)
                If True, uses the gc model
            >>> verbose, gc_verbose are setting the verbosity levels
            >>> beta is computed by default, but can be set by the user (the same beta for all genes)
            >>> prior_aa, prior_cod set the priors in different ways (prior_aa per aa, prior_cod per codon)
            >>> prior_from_gc_model is a flag (True/False) to use the gc-model to set the priors
            >>> if prior_from_gc_model is True, the priors will be automatically set to match the
            >>> gc-corrected model
        
        OUTPUT:
            returns 3 three dicts and beta:
            1. aa - codons
            2. trna - condons
            3. aa - trnas
            
            each dict is made of:
            
            -call x the first argument and y the second-
            
            {
                'overall_comp' : overall compressibility of y,
                'unw_eff_no': unweighted eff. no. of y,
                'x_to_comp_effno': {x: (compressibility of y, eff. no. of y) },
                'eff_x': 2**H(X),
                'eff_y' : 2**H(Y)
            }
        
    '''
    
    assert isinstance(gc_model, bool)
    assert isinstance(prior_from_gc_model, bool)
    
    # all_codons concatenated
    all_codons=[]
    for c in all_codon_lists:
        all_codons+=c
    
    aaToCodonDict, codonToAAdict, trnaToCodonDict, codonToTRNAdict, aaToTRNAdict= get_dictionaries()
    
    # returns [ac_results, tc_results, at_results, beta]
    return compute_two_random_variable_entropy(gc_model, all_codons, 
                                               aaToCodonDict, 
                                               codonToAAdict, 
                                               codonToTRNAdict,
                                               prior_aa, prior_cod,
                                               verbose, gc_verbose, beta, prior_from_gc_model)
    


if __name__=='__main__':
    
    
    filename='escherichia.bak'
    filename = 'pseudomonasData.csv'
    #filename = 'test.csv'
    all_amino_lists, all_codon_lists= readfile(filename)



    print 'all genome ========================================'
    results = compute_compressibility_et_al(all_codon_lists, True, gc_verbose = False,
                                            verbose = True, beta = '',
                                            prior_aa = 1., prior_cod = 0.,
                                            prior_from_gc_model = True)
    
    print 'AA-CODONS==========='
    print results[0]
    
    print 'TRNA-CODONS==========='
    print results[1]
    
    print 'AA-TRNA==========='
    print results[2]
    
    beta_genome = results[3]

    exit()

    print 'beta_genome::', beta_genome
    

    
    # this is for each single gene (skipped here)
    outfile=open('codGivAA_codGivT_TGivAA.txt', 'w')
    all_dicts=[]
    for counter, c in enumerate(all_codon_lists):
        print counter
        results= compute_compressibility_et_al([c], True, verbose = False, beta = '', 
                                               prior_aa=1., prior_cod=0.,
                                               prior_from_gc_model = True)
        outfile.write('%d %f %f %f\n'%( len(c),
                                        results[0]['overall_comp'], 
                                        results[1]['overall_comp'], 
                                        results[2]['overall_comp']))
        all_dicts.append(results[0]['x_to_comp_effno'])
        
    
    

    
    
