'''
Created on Feb 2, 2010

@author: oabalbin
'''

#import sys;
#import string;
#import StringIO;
import os
import sys
import glob
import shutil
import copy
import numpy as np
import scipy as sp
from scipy import stats
import pickle
import operator
from optparse import OptionParser
from datetime import datetime
from collections import deque, defaultdict



# My libraries
import signatures.parsers.read_rnaseq_data as pm
import signatures.parsers.read_microarray_data as pmd
import signatures.parsers.read_gene_lists as gpm
import signatures.db.tables as dt
import signatures.db.query as dq
import signatures.common.classes as cm #record, arrayMatrix
import signatures.preprocess.tools as tl #filter_constant_genes, get_variable_genes, 
import signatures.seedgenes.seedlist as sd #get_seed_genes, write_varin_file
import signatures.bfrm.paramfile as bp
import signatures.bfrm.bfrm_run as br
import signatures.bfrm.output_parser as bo
import signatures.plotting.plots as myplt
import signatures.db.gene_annotations as ga
from signatures.stats.Rstats import calc_matrix_correlation, calc_hypergeometric_test, plot_correlation_matrix2
from signatures.plotting.plots import plot_correlation_matrix_py, plot_heatmap_mod2


class pipeline():
    
    def __init__(self, dbhost, dbuser, dbpasswd, database):
        
        self.mt = dt.tables(dbhost, dbuser, dbpasswd, database)
        self.mq = dq.query(dbhost, dbuser, dbpasswd, database)
        #table information
        self.tableDescription = 'probeID VARCHAR(100), geneSym VARCHAR(40), sample VARCHAR(40), log_fold FLOAT(20,14), INDEX probe (probeID), INDEX gene (geneSym), INDEX sample (sample)'
        #self.tableName = 'rnaseq_breast_cl'
        self.tableName = 'rnaseq_prostate_AllTUsv5'
        #self.tableName = 'rnaseq_breast_AllTUs'

        
        ######## Parameters ############
        # Parameters for pre-processing data:
        # percentage of low variable gene to be filter out of the analysis
        self.lvar=5 #15
        # percentage of low median gene to be filter out of the analysis
        self.lmed=5 #15
        self.lmasked=0.30  
            
    
    def parse_input_annotfiles(self, annotInputFile):
    
        expr_file, seed_file, outdirs, db_file, samples, bfrm, param, typefile={},{},{},{},{},{},{},'nofile'
    
        for line in annotInputFile:
            fields = line.strip('\n')
                 
            if fields[0][0] == '' or fields[0][0] == '#':
                continue
            
            if fields[0][0] == '@':
                typefile = fields
                continue
            
            fields = fields.split('=')
            dict = {'@Array expression':expr_file, '@Seed genes':seed_file,'@Database file':db_file,'@Samples':samples,'@Output Directories':outdirs,\
                    '@BFRM':bfrm,'@Parameters':param}[typefile]
            if typefile=='@Parameters':
                dict[fields[0]] = fields[1].split(',')
            else:
                dict[fields[0]] = fields[1]
            
            
        return expr_file, seed_file, db_file, samples, bfrm, outdirs, param
    

    def read_files_folder(self,folderpath,ext):
        ''' '''
        # Read files in folder
        myfiles=[]
        for infile in glob.glob( os.path.join(folderpath, '*'+ext) ):
            myfiles.append(infile)
            #print "current file is: " + infile
        return myfiles
    
    
    def process_microarray_files(self,exp_files,tp, protgenes):
        dbf=[]
        for f in exp_files:
            f1 = f.split('/')[-1]
            of=f+'_dbfile'
            if f1 == 'protein_coding_genes.exprmat.txt':
                print 'reading protein coding genes samples ' + f
                tp.read_rnaseqexp_file(open(f),open(of,'w'),protgenes)
            else:
                print 'reading house samples' + f
                tp.read_rnaseqexp_file(open(f),open(of,'w'),protgenes)
            dbf.append(of)
        
        return dbf
    
    def create_database(self,dbfiles,doit=False):
        """
        It creates a databases if doit = True
        """
        if doit:
            print 'Making tables'            
            for dbf in dbfiles:     
                if not(self.mt.table_existence(self.tableName)):
                    self.mt.create_table(self.tableName,self.tableDescription)
                    
                self.mt.load_data_table(self.tableName,dbf)
                
    def initialize_database(self, exp_files,tp, protgenes, doit=False):
        
        dbfiles = self.process_microarray_files(exp_files,tp, protgenes)
        self.create_database(dbfiles,doit)
    
    def check_create_dir( self, full_path_name ):
        """
        creates an output folder. Name folder is the date it was created
        returns the full path to the folder and the folder name only: outfolder, t.strftime
        """
        t = datetime.now()
        outfolder = full_path_name + t.strftime("%Y_%m_%d_%H_%M")
        if not os.path.isdir(outfolder):
            os.mkdir(outfolder)
        
        return outfolder, t.strftime("%Y_%m_%d_%H_%M")
    
    def move_bfrm_files(self, folderpath, tstampedfolder):
        """
        moves .txt files created for bfrm to the output time stamped directory
        """
        
        outtxtfiles = self.read_files_folder(folderpath,'.txt')
        for f in outtxtfiles:
            print f
            shutil.move(f,tstampedfolder)
    
    def check_bfrm_output(self, bfrmfolder,newFFile=[]):
        """
        Check if files  mA.txt, mF.txt, mPostPib.txt, exist
        """
        if newFFile:
            print newFFile
            bfrmof = deque(['mA.txt', 'mPostPib.txt', 'mF.txt','mPsi.txt','mVariablesIn.txt',newFFile])
        else:
            bfrmof = deque(['mA.txt', 'mPostPib.txt', 'mF.txt','mPsi.txt','mVariablesIn.txt'])
        
        files=[]   
        
        for f in bfrmof:
            fname = bfrmfolder + f
            print fname
            if os.path.isfile(fname):
                files.append(fname)
   
        return files
     
    def write_expmat_sdv(self, expMat, genelistind, outfile1, outfile2, spclass0, spclass1=[], spcltest=[]):
        """
        writes a expression matrix. The matrix is ordered according to the sample clasess: spclass0, spclass1, spcltest
        expMat= an expression Array object (common classes), genelistind =  the index in expMat of the wanted 
        genes.  string outfile1= writting file for the gene list, outfile2=writting file for the expression matrix
        """
        #self.mq.create_connector()
        self.mq.write_variable_genes_sdv(expMat, genelistind, outfile1, outfile2, spclass0, spclass1, spcltest)
    

    def calc_corr_compfactor(self,pathactmat,compoundsmat,cmpsamples,samplelist,plotsfolder, corr_param, outfile20):
        """
        pathactmat= path activity matrix file name, compoundsmat = compounds dump file.
        """
        # load path activity matrix and compound sensitivity matrix. skip first row of intercepts
        # mat1 is the drug matrix. mat2 is the activity matrix
        # Samples names in the compound data set
        mat1_hd = tl.get_header_mat(open(cmpsamples))
        # load the compound matrix object and the path activity
        mat1 = tl.pickleload_matrix_data(open(compoundsmat))
        mat2 = np.loadtxt(pathactmat,dtype=float, delimiter='\t') #tl.get_matrix_data(open(pathactmat))
        
        #Filter the pathway activity matrix to obtain only the data belonging to the NCI60 or other samples.
        #nci60sp = np.array(expMat.sampleInd.values())
        #mat2= mat2[:,nci60sp]
        
        # Rearrange the compound matrix to match the column order of the
        # activity matrix. This is required in order to do the correlations
        odmat1 = tl.order_arrayMat_bysamples(mat1, samplelist, mat1_hd)  
        '''
        print mat2.shape
        print odmat1.expVal.shape
        '''
        
        outa = tl.calc_correlation_mats(odmat1.expVal, mat2, odmat1.geneSym, corr_param[0], float(corr_param[1]), float(corr_param[2]), outfile20 )
        # pickle the correlation matrix
        print 'Dumping the correlation array'
        outa.dump(plotsfolder+'pathcorr_dump.txt')
        # Plot the correlation matrices
        myplt.write_drugpath_mat(odmat1.expVal,mat2, outa, outdirs['outdir'],odmat1.geneSym)
        myplt.plot_drugvsact(odmat1.expVal,mat2, outa, plotsfolder)



def write_array2file(outfile, H, rownames=[], select_names=[]):
    """
    It writes the and numpy array to a file. 
    """
    
    for i in range(H.shape[0]):
        el = map(str,list(H[i,:]))
        if rownames:
            if select_names:
                if rownames[i] in select_names:
                    outfile.write('T'+'\t'+str(rownames[i])+"\t"+",".join(el).replace(',','\t')+'\n')
                else:
                    outfile.write('N'+'\t'+str(rownames[i])+"\t"+",".join(el).replace(',','\t')+'\n')
            else:
                outfile.write(str(rownames[i])+"\t"+",".join(el).replace(',','\t')+'\n')
        else:
            outfile.write(",".join(el).replace(',','\t')+'\n')


def write_dict2file(outfile,thisDict):
    """
    """
    for mykey, myvals  in thisDict.iteritems():
        mykey = str(mykey)
        if type(myvals) == list:
            el = map(str,myvals)
            outfile.write(mykey+'\t'+",".join(el).replace(',','\t')+'\n')
        else:
            el = str(myvals)
            outfile.write(mykey+'\t'+str(myvals)+'\t'+'\n')


def get_correlated_pairs(indicators, noveltus, correlation_matrix, target_genes, comp='both'):
    """
    Returns a dictionary of correlated tu - genelist pairs
    """
    
    correlated_pairs=defaultdict()
    for j in indicators:
        thistu = noveltus[j]
        
        if comp == 'greater':
            corr_genes_ind = correlation_matrix[:,j] > corr_th
        elif comp == 'less':
            corr_genes_ind = correlation_matrix[:,j] < corr_th
        else:
            corr_genes_ind = abs(correlation_matrix[:,j]) > corr_th
            
        
        correlated_genes = list(target_genes[corr_genes_ind])
        correlated_pairs[thistu] = correlated_genes 
        
    return correlated_pairs






def get_specific_correlated_pairs(indicators, noveltus, target_genes):
    """
    Returns a dictionary of correlated tu - genelist pairs using the information contained 
    in a dictionary of indicators
    """
    
    correlated_pairs=defaultdict()
    for j in indicators.keys():
        thistu = noveltus[j]
        correlated_genes = list(target_genes[indicators[j]])
        correlated_pairs[thistu] = correlated_genes 
        #print correlated_genes
        
    return correlated_pairs



def get_corrpairs_filtered_by_thisgene(indicators, noveltus, correlation_matrix, target_genes, gene, comp='both'):
    """
    Returns a dictionary of correlated tu - genelist pairs
    """
    
    correlated_pairs=defaultdict()

    for j in indicators:
        thistu = noveltus[j]
        
        if comp == 'greater':
            corr_genes_ind = correlation_matrix[:,j] > corr_th
        elif comp == 'less':
            corr_genes_ind = correlation_matrix[:,j] < corr_th
        else:
            corr_genes_ind = abs(correlation_matrix[:,j]) > corr_th
            
        
        correlated_genes = list(target_genes[corr_genes_ind])
        correlated_pairs[thistu] = correlated_genes 
        
    return correlated_pairs


def samples_with_thisgene_expr(expMat, thisgene, threshold, comp):
    """
    It gets the sample indicators for the samples in which the expression of this genes
    is above/below this particular threshold
    """
    indicators = np.array(range(expMat.shape[1]))
    
    if comp=="above":
        sample_ind = expMat[thisgene,:] > threshold
        indicators = indicators[sample_ind]
    else:
        sample_ind = expMat[thisgene,:] < threshold
        indicators = indicators[sample_ind]
        
    return indicators


def get_translated_pairs(correlated_pairs, tga):
    """
    It translates a list of tu-genelist pairs to the HUGO nomenclature
    """
    
    hugo_correlated_pairs=defaultdict()
    for mykey, myvals in correlated_pairs.iteritems():
        hugo_names = tga.translate_refflat2hugo(myvals)
        hugo_correlated_pairs[mykey] = hugo_names 
    
    return hugo_correlated_pairs

def set_gene_list(genelist):
    """
    It creates a unique list of genes.
    """
    newgenelist = deque()
    for g in genelist:
        if g not in newgenelist:
            newgenelist.append(g)
            
    return newgenelist
    

def create_expression_array_structure(pipe,samples,samplelist, extgenlist, lvarlmed=[], fold_change=0, filterout=True):
    """
    Create the expression matrix array
    """
    pipe.mq.create_connector()
    
    ### Create the expMat array
    expMat_bfrm = pipe.mq.get_exp_mat_2(samplelist, extgenlist, pipe.tableName) 
    # Create sample index
    expMat_bfrm.create_sample_index()
    
    # Normalize the expression values data. log2 and oncomine scaling, or only log2 transform.
    # or it normalize using quantile normalization
    if options.norm:
        expMat_bfrm.normalization()
        #expMat_bfrm.quantile_norm()
    else:
        expMat_bfrm.log2_norm()
    
    if options.normals:
        normal_samples=tp.list_of_names(open(samples['normals']))
        expMat_normals = pipe.mq.get_exp_mat_2(normal_samples, extgenlist, pipe.tableName)
        if options.norm:
            expMat_bfrm.normalization()
            #expMat_normals.quantile_norm()
        else:
            expMat_normals.log2_norm()
            
        median_expr_normlas = expMat_normals.calc_median_expression(samplelist) 
        # normalize by the expression value of the normals.
        expMat_bfrm.normalize_by_thisexpval(median_expr_normlas)
        
        if fold_change != 0:
            tumor_samples=tp.list_of_names(open(samples['tumors']))
            tumor_indices = expMat_normals.get_thissample_indeces(tumor_samples)
        else:
            tumor_indices=[]
    else:
        tumor_indices=[]

    
    ######### Pre-process Raw Data ###############
    # NOTE: This parameter pipe.lvar=25 can be modified to control variability filter
    # Get the index of genes with var >=lvar, and med >=lmed     
    # Find variable genes and modify he expMat.
    
    if not lvarlmed:
        lvarlmed.append(pipe.lvar)
        lvarlmed.append(pipe.lmed)
        lvarlmed.append(pipe.lmasked)
        
    #genelist = tl.filter_out_constant_genes(expMat_bfrm, lvarlmed[0], lvarlmed[1], True)
    if filterout:
        genelist = tl.filter_out_constant_and_masked_genes(expMat_bfrm, lvarlmed[0], lvarlmed[1], \
                                                       int(lvarlmed[2]*expMat_bfrm.expVal.shape[1]), True, fold_change, tumor_indices)
    else:
        genelist = np.array(range(expMat_bfrm.expVal.shape[0]))

    # Get the index and name of the variable genes for a particular experiment. 
    #It returns a list of tuples with index and gene and a gen dictionary    
    mgenelist, dictmgl =  tl.get_variable_genes(expMat_bfrm, genelist)    
    expMat_bfrm = tl.get_filtered_expMat(expMat_bfrm,mgenelist,len(samplelist))
    
    # Index of genes belonging to the "universe" which also have var .=lvar and med >=lmedS
    expMat_bfrm.create_gen_index()
    genInd = expMat_bfrm.genInd.values()
    
    return expMat_bfrm
        


def filterout_notDiffExpGenes(log_fold_change):
    """
    It assumes that the data was already normalized according to the expression of normals samples.
    """
    tumor_samples=tp.list_of_names(open(samples['tumors']))
    tumor_indices = expMat_normals.get_thissample_indeces(tumor_samples)
    


##########



def get_tus_corrsample_dict(correlation_matrix, corr_th, recurrency, comparison):
    """
    It returns a dictionary of tus which correlation is > corr_th  with a number of 
    genes > recurrency
    """
    
    if comparison == "less":
        ind_up_threshold = correlation_matrix < -1*corr_th
    else:
        ind_up_threshold = correlation_matrix > corr_th
    
    indicators = np.array(range(correlation_matrix.shape[0]))
    tu_inddict = defaultdict(list)
    
    for j in range(correlation_matrix.shape[1]):
        num_corr = len(indicators[ind_up_threshold[:,j]==True])
        
        if num_corr > recurrency:
            tu_inddict[j].append(indicators[ind_up_threshold[:,j]])
            print num_corr
            #print tu_inddict[j]
    
    return tu_inddict


def get_gene_transcript_association(correlation_matrix, toptus, outfile, comparison):
    """
    It returns a toptus number of correlated tus with each of the genes in the matrix
    It sorts the the gene-TUs correlation matrix over the columns(TUs) and select the ones
    with the toptus
    
    """
    
    toptus=toptus+1
    indicators = np.array(range(correlation_matrix.shape[1]))
    tu_inddict = defaultdict(list)
    all_tus = []
    print 'sorting the correlation matrix ' 
    index_corr_sort_matrix = np.argsort(correlation_matrix, axis=1)
    geneTU_indicator_matrix = np.zeros((correlation_matrix.shape[0],toptus))
    
    toptus_correlation_matrix = np.zeros((correlation_matrix.shape[0],toptus))
    
    for i in range(correlation_matrix.shape[0]):
        if comparison== "less":
            thistus = indicators[index_corr_sort_matrix[i,0:toptus]]
        elif comparison== "greater":
            thistus = indicators[index_corr_sort_matrix[i,-toptus:]]

        geneTU_indicator_matrix[i,:] = thistus

        for tu in thistus:
            tu_inddict[i].append(tu)
            all_tus.append(tu)
        #[tu_inddict[i].append(tu) for tu in thistus] 
        
        
        #all_tus.append(tu)
        # get the correlation matrix for the TUs that become selected as the toptus
        toptus_correlation_matrix[i,:] = correlation_matrix[i,thistus]
        
    write_array2file(outfile, toptus_correlation_matrix)
        
    return geneTU_indicator_matrix, tu_inddict, all_tus



def get_gene_transcript_association_threshold(correlation_matrix, threshold, tunames, genenames, target_genes, outfile1,outfile2, comparison):
    """
    It returns a toptus number of correlated tus with each of the genes in the matrix
    It sorts the the gene-TUs correlation matrix over the columns(TUs) and select the ones
    with the toptus
    
    """    
    tus_indicators = np.array(range(correlation_matrix.shape[1]))
    gene_indicators = np.array(range(correlation_matrix.shape[0]))
    
    if comparison =="less":
        toptus_correlation_matrix = correlation_matrix < threshold
    elif comparison =="greater":
        toptus_correlation_matrix = correlation_matrix < threshold
        
    frequency_matrix = np.zeros((correlation_matrix.shape[0],correlation_matrix.shape[1]))
    
    for i in range(correlation_matrix.shape[0]):
        # TUs with correlation < than threshold
        thistus = tus_indicators[toptus_correlation_matrix[i,:]==True]
        frequency_matrix[i,thistus] += 1
    
    toptus = np.sum(frequency_matrix,axis=0)
    topgenes = np.sum(frequency_matrix,axis=1)
    
    toptus = np.reshape(toptus, (len(toptus),-1))
    topgenes = np.reshape(topgenes, (len(topgenes),-1))
    
    #print len(toptus_correlation_matrix[toptus_correlation_matrix == True])
    #write_array2file(outfile1, toptus, list(tunames))
    #write_array2file(outfile2, topgenes, list(genenames), list(target_genes))
        
    return  toptus, topgenes
    
    
def get_common_TUsbyIntersection(matrix_of_Tus):
    """
    It returns all tus common to all sets. If one intersection is empty, it will return an empty set.
    """
    for i in range(matrix_of_Tus.shape[0]):
        if i == 0:
            commonTUs = set(matrix_of_Tus[i,:])
        else:
            print set(matrix_of_Tus[i,:])
            sys.exit(0)
            commonTUs.intersection_update(set(matrix_of_Tus[i,:]))

    return commonTUs



def get_popular_TUsbyMode(All_Corr_TUs,tu_inddict, min_recurrency, outfile):
    """
    This function looks for the more popular TUs among the ones select as the toptus (usually top 100)
    It counts how many times a TUs made to the toptus
    It returns a list of tuples (tu, number of times tu was in the set of high correlation)
    """
    # Make a single list of of TUs  and also creates a list with not redundant TUs
    allTus = All_Corr_TUs
    #allTus = sum(map(list,tu_inddict.values()),[])
    allTus_set = list(set(allTus))
    allTudInd = defaultdict()
    
    
    # Creates a dictionary of TUs-ntimes observed
    
    print "Get the frequency info"
    
    
    for i, tu in enumerate(allTus_set):
        #print len(allTus_set) - i, len(allTus)
        thesetus = np.array(allTus)
        complement = thesetus[thesetus!=tu]
        allTudInd[tu] = len(allTus) - len(complement)
        allTus = complement
    
    pickle.dump(allTudInd,outfile)
    
    #print "pickle upload "
    #allTudInd = pickle.load(outfile)
    #write_dict2file(outfile,allTudInd)
    
    # Select TUs that were found in at least a number of genes = recurrency
    # If the min observed recurrency is greater than the one that is
    # proposed, use the observed one.  
    print "Minimum recurrency to use is the mean "
    print np.mean(allTudInd.values())
    
    #if np.min(allTudInd.values()[-top_recurr:]) > min_recurrency:
    #min_recurrency = np.mean(allTudInd.values())
    min_recurrency = sp.stats.scoreatpercentile(allTudInd.values(),99.5)
    print min_recurrency
    
    allTudInd_sorted = sorted(allTudInd.iteritems(), key=operator.itemgetter(1),reverse=True)
    
    print "The minimum recurrency evaluated is "
    print min_recurrency
    
    recurrent_TUs = defaultdict()
    associationTUgen = defaultdict(list)
    setofGenes = set()
    
    for i in range(len(allTudInd_sorted)):
        tu = allTudInd_sorted[i][0]
        tu_recurr = allTudInd_sorted[i][1]
        if tu_recurr >= min_recurrency:
            recurrent_TUs[tu]= tu_recurr
            #print tu,tu_recurr
            
            for gen, tuslist in tu_inddict.iteritems():
                if tu in list(tuslist):
                    associationTUgen[tu].append(gen)
                    setofGenes.add(gen)
        else:
            break
                    
    return recurrent_TUs, allTudInd_sorted, associationTUgen, setofGenes

def get_common_targets(geneset1, geneset2):
    """
    """
    common_genes = geneset1.intersection(geneset2)
    return common_genes



if __name__ == '__main__':
    
    optionparser = OptionParser("usage: %prog [options] ")
    optionparser.add_option("-f", "--annotFile", dest="annotFile",
                            help="annotation file for all files to use")
    optionparser.add_option("-d", action="store_true", dest="dbtrue",
                            help="create a database. Bool")
    optionparser.add_option("-u", action="store_true", dest="allgenes",
                            help="use genes in all uscs. Default use genes in universe file. Default universe. Bool")
    optionparser.add_option("-n", action="store_true", dest="norm",
                            help="normalize log2 and scale expression vals. Default only log2.  Bool")
    optionparser.add_option("-m", action="store_true", dest="normals",
                            help="normalize by the expression of normal samples. Default only log2.  Bool")
    optionparser.add_option("-s", action="store_true", dest="seed",
                            help="use all genes for bfrm. Default seed genes. Bool")
    
    optionparser.add_option("-R", action="store_true", dest="calccorr", 
                            help="Boolean to calc correlation. Default false")
    optionparser.add_option("-r", "--corr_th", dest="corr_th", type="float",
                            help="Defines the correlation threshold to use")
    optionparser.add_option("-c", "--comp", dest="comp", type="string",
                            help="Defines the type of comparison to make, two possible values less or greater")
    optionparser.add_option("-x", "--recurrency", dest="recurrency", type="float",
                            help="Defines the type of comparison to make, two possible values less or greater")

    optionparser.add_option("-g", "--genes", dest="usegene",
                            help="Values EZH2 or all to use EZH2 expression value, default do not use it")



    
    (options, args) = optionparser.parse_args()
       
    # start the pipeline. 
    # read input parameters
    pipe = pipeline("localhost", "oabalbin", "oscar", "rnaseq_signatures")
    # Set values for BFRM
    # Get the option from the input_param file
    expr_folder, seed_file, db_file, samples, bfrm, outdirs, run_param = pipe.parse_input_annotfiles(open(options.annotFile))
    
    # Create experiment folder tree and log file. 
    odir = cm.directoryTree()
    tstampedfolder, tstamp = odir.create_tstampdir(outdirs['outdir'])
    #' /bfrm, /drugsen, /drugsen/plots,/sdv' 
    
    
    dirs = odir.make_directory_tree()
    logfile = cm.logFile(tstampedfolder+'/'+tstamp+'_readme.txt') 
    
    ####### output files

    '''
    outfile1 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_coding_allgenes_hotair.txt'#dirs[3]+'/'+tstamp+'_expMat_bfrm_coding.txt'   
    outfile2 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_noveltu_allgenes_hotair.txt'#dirs[3]+'/'+tstamp+'_expMat_bfrm_noveltu.txt'   
    outfile3 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test_allgenes_hotair.txt'#dirs[3]+'/'+tstamp+'_correlation_test.txt'
    outfile4 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test2_allgenes_hotair.txt'#dirs[3]+'/'+tstamp+'_correlation_test2.txt'
    outfile5 = dirs[3]+'/'+tstamp+'_toptus_correlation_matrix_hotair.txt'
    outfile6 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/frequency_dump_test_hotair.txt' #dirs[3]+'/'+tstamp+'_frequency_toptus_correlation_matrix.txt'
    outfile7 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test3_allgenes_hotair.txt'
    
    
    outfile1 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_coding_allgenes_hotair_tissue.txt'#dirs[3]+'/'+tstamp+'_expMat_bfrm_coding.txt'   
    outfile2 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_noveltu_allgenes_hotair_tissue.txt'#dirs[3]+'/'+tstamp+'_expMat_bfrm_noveltu.txt'   
    outfile3 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test_allgenes_hotair_tissue.txt'#dirs[3]+'/'+tstamp+'_correlation_test.txt'
    outfile4 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test2_allgenes_hotair_tissue.txt'#dirs[3]+'/'+tstamp+'_correlation_test2.txt'
    outfile5 = dirs[3]+'/'+tstamp+'_toptus_correlation_matrix_hotair_tissue.txt'
    outfile6 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/frequency_dump_test_hotair_tissue.txt' #dirs[3]+'/'+tstamp+'_frequency_toptus_correlation_matrix.txt'
    outfile7 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test3_allgenes_hotair_tissue.txt' 
    '''
    '''
    outfile1 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_coding_allgenes_notmets.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_coding.txt'   
    outfile2 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_noveltu_allgenes_notmets.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_noveltu.txt'   
    outfile3 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test_allgenes_notmets.txt' #dirs[3]+'/'+tstamp+'_correlation_test.txt'
    outfile4 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test2_allgenes_notmets.txt' #dirs[3]+'/'+tstamp+'_correlation_test2.txt'
    outfile5 = dirs[3]+'/'+tstamp+'_toptus_correlation_matrix.txt'
    outfile5b = dirs[3]+'/'+tstamp+'_toptus_frequency.txt'
    outfile5c = dirs[3]+'/'+tstamp+'_topgenes_frequency.txt'
    outfile6 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/frequency_dump_test_notmets.txt' #dirs[3]+'/'+tstamp+'_frequency_toptus_correlation_matrix.txt'
    outfile7 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test3_allgenes_notmets.txt'
    '''
    '''
    outfile1 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_coding_allgenes2.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_coding_onlygenes.txt'   
    outfile2 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/expMat_bfrm_noveltu_allgenes_onlygenes.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_noveltu_onlygenes.txt'   
    outfile3 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test_allgenes_onlygenes.txt' #dirs[3]+'/'+tstamp+'_correlation_test_onlygenes.txt'
    outfile4 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test2_allgenes_onlygenes.txt' #dirs[3]+'/'+tstamp+'_correlation_test2_onlygenes.txt'
    outfile5 = dirs[3]+'/'+tstamp+'_toptus_correlation_matrix.txt'
    outfile5b = dirs[3]+'/'+tstamp+'_toptus_frequency.txt'
    outfile5c = dirs[3]+'/'+tstamp+'_topgenes_frequency.txt'
    outfile6 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/frequency_dump_test_onlygenes.txt' #dirs[3]+'/'+tstamp+'_frequency_toptus_correlation_matrix.txt'
    outfile7 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test3_allgenes_onlygenes.txt'
    outfile20 = dirs[1]+tstamp+'permutationsFile.txt'          # It tells were is the permutation file used for the drug permutation analysis
    '''


    outfile1 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/allgenes_v18/expMat_bfrm_coding_allgenes.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_coding.txt'   
    outfile2 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/allgenes_v18/expMat_bfrm_noveltu_allgenes.txt' #dirs[3]+'/'+tstamp+'_expMat_bfrm_noveltu.txt'   
    outfile3 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/allgenes_v18/correlation_test_allgenes.txt' #dirs[3]+'/'+tstamp+'_correlation_test.txt'
    outfile4 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/allgenes_v18/correlation_test2_allgenes.txt' #dirs[3]+'/'+tstamp+'_correlation_test2.txt'
    outfile5 = dirs[3]+'/'+tstamp+'_toptus_correlation_matrix.txt'
    outfile5b = dirs[3]+'/'+tstamp+'_toptus_frequency.txt'
    outfile5c = dirs[3]+'/'+tstamp+'_topgenes_frequency.txt'
    outfile6 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/frequency_dump_test.txt' #dirs[3]+'/'+tstamp+'_frequency_toptus_correlation_matrix.txt'
    outfile7 = '/exds/users/oabalbin/projects/rnaseq_data/noveltu_expression_data/test/correlation_test3_allgenes.txt'



    
    ######### Get raw data  ############
    # Get the database files
    #read folder for expression profiles
    exp_files = pipe.read_files_folder(expr_folder['expdir'],'.txt')

    tm = pm.parser_rnaseq()
    tp = pmd.parser()
    pipe.mt.create_connector()
    gtp = gpm.geneparser()
    tga = ga.gene_annotation("localhost", "oabalbin", "oscar", "gene_annotation")
    protgenes=False
    ######## Create database
    if options.dbtrue:
        pipe.initialize_database(exp_files,tm, protgenes, options.dbtrue)
    sys.exit(0)
        
    # list of samples and all potential genes to be included in the study based 
    # on a pre-compiled gene list. 
    #Note: if an unbiased initial analysis wanted, then you need to run using the ucsc
    # list of all ucsc potential genes to include in study
    #samplelist = tp.list_of_names_in_line(open(samples['samples']))
    samplelist = tp.list_of_names(open(samples['samples']))
    
        
    ######### Preprocess to the expMatrix regarding the samples to be included to run BFRM
    if options.allgenes:
        seedgenes = set_gene_list(tp.list_of_names(open(seed_file['allgene'])))  
        # Translate from Hugo to entrex geneNames 
        #seedgenes = set_gene_list(tga.translate_hugo2refflat(seedgenes))
    else:
        # Get seed genes    
        seedgenes = set_gene_list(tp.list_of_names(open(seed_file['seedfile'])))
        # Translate from Hugo to entrex geneNames 
        seedgenes = set_gene_list(tga.translate_hugo2refflat(seedgenes))
    
    # EZH2 targets
    EZH2_targets = set_gene_list(tp.list_of_names(open(seed_file['targets'])))
    EZH2_targets = set_gene_list(tga.translate_hugo2refflat(EZH2_targets))
    
    # Novel Transcriptional units names
    extgenlist = tp.list_of_names(open(seed_file['univfile']))
    

    print len(extgenlist)
    print len(seedgenes)
    
    
    if options.calccorr:
        
        lvarlmed=[25,25,pipe.lmasked]
        lvarlmed_hot=[0,0,pipe.lmasked]
        fold_change=0.0
        emptyparam=[]
        expMat_bfrm_coding = create_expression_array_structure(pipe,samples,samplelist, seedgenes, lvarlmed, fold_change,True)        
        expMat_bfrm_noveltu = create_expression_array_structure(pipe,samples,samplelist, extgenlist, lvarlmed, fold_change,True)
        
        # Dump the Arrays
        pickle.dump(expMat_bfrm_coding,open(outfile1,'w'))
        pickle.dump(expMat_bfrm_noveltu, open(outfile2,'w'))
        
        #expMat_bfrm_coding = pickle.load(open(outfile1))
        #expMat_bfrm_noveltu = pickle.load(open(outfile2))
        
        print expMat_bfrm_coding.expVal.shape
        print expMat_bfrm_noveltu.expVal.shape
        
        #sys.exit(0)
        
        if options.usegene=="EZH2":
            thisgene = expMat_bfrm_coding.genInd["NM_152998"]
            threshold = 0.0
            comp="above"
            exh2_indicators = samples_with_thisgene_expr(expMat_bfrm_coding.expVal, thisgene, threshold, comp)
                        
            corrtype = 'Sper'
            correlation_matrix = calc_matrix_correlation(corrtype, np.transpose(expMat_bfrm_coding.expVal[:,exh2_indicators].filled(fill_value=np.nan)), True, \
                                                         np.transpose(expMat_bfrm_noveltu.expVal[:,exh2_indicators].filled(fill_value=np.nan)) )
        
        elif options.usegene=="all":
            corrtype = 'Sper'
            print 'Correlation among the coding genes using python function'
            correlation_matrix = tl.calc_autocorrelation_matrix( expMat_bfrm_coding.expVal.filled(fill_value=np.nan), \
                                                            expMat_bfrm_coding.expVal.filled(fill_value=np.nan), corrtype)

          
        else:          
            corrtype = 'Sper'
            print 'Correlation among the coding genes and TUs using python function '
            correlation_matrix = tl.calc_correlation_matrix( expMat_bfrm_coding.expVal.filled(fill_value=np.nan), \
                                                            expMat_bfrm_noveltu.expVal.filled(fill_value=np.nan), corrtype)
            '''
            correlation_matrix = calc_matrix_correlation(corrtype, np.transpose(expMat_bfrm_coding.expVal.filled(fill_value=np.nan)), True, \
                                                         np.transpose(expMat_bfrm_noveltu.expVal.filled(fill_value=np.nan)) )
            '''
        
        print 'Done with the correlation'
        print correlation_matrix.shape
        correlation_matrix.dump(outfile4)
        pickle.dump(correlation_matrix,open(outfile3,'w'))
        
    else:
        expMat_bfrm_coding = pickle.load(open(outfile1))
        expMat_bfrm_noveltu = pickle.load(open(outfile2))
        correlation_matrix = np.load(outfile4)


    #sys.exit(0)
    
    
    print 'Starting Analysis of correlation '
            
    corr_th = options.corr_th
    target_genes = np.array(expMat_bfrm_coding.geneSym)
    noveltus = np.array(expMat_bfrm_noveltu.geneSym)
    # comparison can be one of three values: less, greater or both
    comparison = options.comp
    hugo_names = tga.translate_refflat2hugo(target_genes)
    hugo_names_arr = np.array(hugo_names)
    
    median="other" #"other"
    toptus=25
    #outfile7 = open(outfile7,'w')
    #outfile7.write("geneName1"+'\t'+"geneName"+'\t'+",".join(expMat_bfrm_noveltu.geneSym).replace(',','\t')+'\n')
    #write_array2file(outfile7, correlation_matrix, expMat_bfrm_coding.geneSym, EZH2_targets)
    
    #sys.exit(0)

    if median=="median":
        # runs an analysis using the median as the statistics
        print "Making an analysis using the median correlation "
        median_correlation = np.median(correlation_matrix,axis=0)
        ind_of_median = abs(median_correlation) > corr_th
        indicators = np.array(range(len(ind_of_median)))
        indicators = indicators[ind_of_median]
                
        # Find the Genes-tus pairs with correlation > corr_th
        correlated_pairs = get_correlated_pairs(indicators, noveltus, correlation_matrix, hugo_names_arr, comparison)
        #hugo_correlated_pairs = get_translated_pairs(correlated_pairs, tga)
        # First: what are the TUs that have a median correlation > than corr_th
        noveltus_upcorrth = noveltus[ind_of_median]
        new_correlation_matrix = correlation_matrix[:,ind_of_median]
    
    elif median=="group":
        # run an analysis using the group of TUs with correlation greater than threshold on a number of genes
        print "Making an analysis using a group correlation "
        recurrency = np.ceil(options.recurrency*correlation_matrix.shape[0])
        
        tus_indicators_dict = get_tus_corrsample_dict(correlation_matrix, corr_th, recurrency, comparison)        
        correlated_pairs = get_specific_correlated_pairs(tus_indicators_dict, noveltus, hugo_names_arr)
        
        noveltus_upcorrth = noveltus[tus_indicators_dict.keys()]
        new_correlation_matrix = correlation_matrix[:,tus_indicators_dict.keys()]

    else:
        # Run an analysis usig the target enrichment analysis 
        print "Making an analysis using the targets enrichment analysis "
        recurrency = np.ceil(options.recurrency*correlation_matrix.shape[0])
        print "Propossed recurrency "
        print recurrency
        # get toptus. sorting TUs for genes and picking the ones with mas negative correlation
        # geneTU_indicator_matrix= matrix of toptus by gene
        # tu_inddict= dictionary gen - toptus
        print 'sort tus '
        comparison="less"
        geneTU_indicator_matrix, tu_inddict, All_Corr_TUs = get_gene_transcript_association(correlation_matrix, toptus, open(outfile5,'w'), 
                                                                                            comparison)
        
        print 'TUs Frequency based on threshold '
        threshold= -0.7
        
        EZH2_targets_in_experiment = get_common_targets(set(EZH2_targets),set(target_genes))
        
        # Obtaining list of TUs negatively correlated with genes   
        # Frequent_tusL, frequent_genesL = It is a column array of the TUs and Genes 
        frequent_tusL, frequent_genesL = get_gene_transcript_association_threshold(correlation_matrix, threshold, noveltus, target_genes, \
                                                                                 EZH2_targets_in_experiment, open(outfile5b,'w'),\
                                                                                 open(outfile5c,'w'), comparison)
        
        # Obtaining list of TUs positively correlated with genes
        threshold= 0.7
        comparison="greater"
        frequent_tusG, frequent_genesG = get_gene_transcript_association_threshold(correlation_matrix, threshold, noveltus, target_genes, \
                                                                                 EZH2_targets_in_experiment, open(outfile5b,'w'),\
                                                                                 open(outfile5c,'w'), comparison)
        
        # A matrix of TUs indicators for each gene, and dictionari of TU[ind]=TUs        
        geneTU_indicator_matrixG, tu_inddictG, All_Corr_TUsG = get_gene_transcript_association(correlation_matrix, toptus, open(outfile5,'w'), 
                                                                                            comparison)
        
        print frequent_tusL
        print frequent_tusG

        sys.exit(0)

        
        
        #intersection_tusLG = set(list(frequent_tusL)).intersection(set(list(frequent_tusG))) 
        #print intersection_tusLG
        
        #write_array2file(outfile, H, rownames=[], select_names=[])
        
        #sys.exit(0)
        
        #get_common_TUsbyIntersection(geneTU_indicator_matrix)
        # Get recurrent TUs
        # recurrent_TUs=dictionary tu-recurrecny for all tus with recurrency > min, allTudInd_sorted=list all pairs tu-recurr
        # associationTUgen_dict = dictionary tu- gen in which tus is found, setofGenes= genes that commonto recurrent TUs.
        print 'get most popular TUs '
        recurrent_TUs, allTudInd_sorted, associationTUgen_dict, setofGenes = get_popular_TUsbyMode(All_Corr_TUs,tu_inddict, \
                                                                                                   recurrency, open(outfile6,'w'))
        
        print 'get specific correlated pairs tus '
        correlated_pairs = get_specific_correlated_pairs(associationTUgen_dict, noveltus, target_genes)
        
        noveltus_upcorrth = noveltus[recurrent_TUs.keys()]
        
        print ' Make enrichement analysis '
        glist = np.array(list(setofGenes))
        print len(setofGenes),len(recurrent_TUs.keys())
        
        hugo_names = list(target_genes[glist])
        
        # Create the new correlation matrix to export
        new_correlation_matrix_draft = np.copy(correlation_matrix)
                
        for tu, genes in associationTUgen_dict.iteritems():
            thistu_genes = set(genes)
            nottu_genes = list(setofGenes.difference(thistu_genes))
            new_correlation_matrix_draft[nottu_genes,tu]=np.nan
        
        new_correlation_matrix_a = new_correlation_matrix_draft[:,recurrent_TUs.keys()]
        print new_correlation_matrix_a.shape
        new_correlation_matrix = new_correlation_matrix_a[glist,:]
        print new_correlation_matrix.shape
        
        
        '''        
        new_correlation_matrix_a = correlation_matrix[:,recurrent_TUs.keys()]
        print new_correlation_matrix_a.shape
        new_correlation_matrix = new_correlation_matrix_a[glist,:]
        print new_correlation_matrix.shape
        '''

        ###### Making a hypergeometric test
        # Make an hypergeometric test        
        
        EZH2_targets_in_experiment = get_common_targets(set(EZH2_targets),set(target_genes))
        # parameters for hypergeometric test
        print len(setofGenes), len(target_genes), len(EZH2_targets)
        
        drawn_EZH2_targets= len(get_common_targets( set(list(target_genes[list(setofGenes)])), set(EZH2_targets_in_experiment) ))
        total_EZH2_targets= len(EZH2_targets_in_experiment)
        total_non_targets= len(target_genes) - drawn_EZH2_targets
        total_genes_correlated = len(setofGenes)
        # perform the test
        print drawn_EZH2_targets, total_EZH2_targets, total_non_targets, total_genes_correlated
        FDR = 0.05
        
        '''
        pvalue_hypertest, qvalue_hypertest = calc_hypergeometric_test(drawn_EZH2_targets, total_EZH2_targets, total_genes_correlated,total_non_targets, FDR)
        print "The probability of enrichment and the qvalue with and FDR of 0.05 is "
        print pvalue_hypertest, qvalue_hypertest
        '''


    #sys.exit(0)
    ################ Writing the files:
    outfile8 = dirs[3]+'/'+tstamp+'_correlated_pairs.txt'
    write_dict2file(open(outfile8,'w'),correlated_pairs)
    #outfile9 = dirs[3]+'/'+tstamp+'_hugo_correlated_pairs.txt'
    #write_dict2file(open(outfile9,'w'),hugo_correlated_pairs)
    
    outfile10 = open(dirs[3]+'/'+tstamp+'_correlation_matrix.txt','w')
    outfile10.write('geneName'+'\t'+",".join(list(noveltus_upcorrth)).replace(',','\t')+'\n')
    write_array2file(outfile10,new_correlation_matrix,hugo_names,list(EZH2_targets_in_experiment))
            
    outfile12 = dirs[3]+'/'+tstamp+'_plot_correlation2_best_'+str(comparison)+'.png'
    plot_heatmap_mod2(outfile12, new_correlation_matrix, hugo_names_arr, noveltus_upcorrth)
    
    #srt = np.argsort(median_correlation[ind_of_median])
    #plot_correlation_matrix_py(np.transpose(new_correlation_matrix[:,srt]))
    
    print 'Plotting correlation matrices 5'
    outfile11 = dirs[3]+'/'+tstamp+'_plot_correlation_best_'+str(comparison)+'.png'
    #plot_correlation_matrix(new_correlation_matrix, outfile11, dirs[3]+'/'+tstamp+'_correlation_matrix.txt')
    #print new_correlation_matrix.shape
    #plot_correlation_matrix2(new_correlation_matrix, outfile11, dirs[3]+'/'+tstamp+'_correlation_matrix.txt')
   
    sys.exit(0)
    
    
  
    
