'''
Created on Jun 25, 2011

@author: oabalbin

This script takes a list of vcfs, annotated positions, 
Outputs a list of concensus snvs
the consensus snvs are obtained with consensus function: 
democratic majority (as 06-25-11)

'''

import sys
import numpy as np
import array
from optparse import OptionParser
from collections import defaultdict,deque #, OrderedDict
from bx.intervals.intersection import Interval, IntervalTree

from exome.vcftools.vcf import *
import exome.vcftools.SNP_positions as snppos 

class Error(Exception):
    pass

class SnvError(Error):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)

class Gene:
    def __init__(self,start, end, value):
        self.start=start
        self.end=end
        self.value=value


class SequencesTree:
    
    def __init__(self):
        self.genome={}
        
        chrlist = ['chr1', 'chr2','chr3','chr4','chr5','chr6','chr7', 'chr8','chr9','chr10', 'chr11','chr12','chr6','chr7', 'chr8','chr9','chr10', 'chr11', 'chr12', \
                   'chr13', 'chr14','chr15','chr16','chr17','chr18','chr19', 'chr20','chr21','chr22', 'chrX','chrY']
        
        for chr in chrlist:
            self.genome[chr] = defaultdict(array.array('L'))


def actionable_genes(gene_list):
    '''
    '''
    actionable = IntervalTree()
    for gen in gene_list:
        start, end, obj = gen.start, gen.end, gen.value
        actionable.insert_interval( Interval(start, end, value=obj) )
    
    return actionable
        
        
def read_gene_list(ifile):
    '''
    Read a bed file with fields: chr, start, end, name, mark, strand
    '''
    ifile=open(ifile)
    actionable_genes=deque()
    for l in ifile:
        fields = l.strip('\n').split('\t')
        start, end, value=int(fields[1]),int(fields[2]), {'chr':fields[0], 'annot':fields[3],'strand':fields[4]}
        actionable_genes.append( Gene(start, end, value) )
    
    return actionable_genes
    
def mark_actionable_genes(ifile,snp_list):
    '''
    '''
    gene_list = read_gene_list(ifile)
    act_genes = actionable_genes(gene_list)
    act_snps = defaultdict()
    
    for snp in snp_list:
        loc = snp.split('@')
        #print snp, loc
        chr,pos=loc[0],int(loc[1].split('|')[0])
        chr,pos=loc[0],int(loc[1])
        genes = act_genes.find(pos,pos)
        
        for g in genes:
            if chr == g.value['chr']:
                act_snps[snp] = g.value['annot']
                print snp,act_snps[snp], g.value['annot']
    
    return act_snps
    
    
def read_file(ivcf,pfname):
    
    thisvcf=VCF()
    thisvcf.readFromFile(ivcf)
    snvs = thisvcf.keys()
    thisvcf.pickle(pfname)

    return snvs

def load_file(idump):
    thisvcf=VCF()
    thisvcf.readFromPickle(idump)
    
    return thisvcf
    

def read_file_vcf(ivcf):
    '''
    Reads a vcf returns a dictionary with snp location
    and set(id, ref and alt base)
    '''
    
    ivcf = open(ivcf)
    snps_dict = defaultdict(list)
    i=0
    for l in ivcf:
        i+=1
        if l.startswith('#'):
            continue

        fields = l.strip('\n').split('\t')
        loc = fields[0]+'@'+fields[1] 
        #loc = int(fields[1])
        snps_dict[loc].append([fields[0],fields[3],fields[4],fields[5]])
        
    print >> sys.stderr, "Reading vcf file is done"
    
    return snps_dict
    
def read_vcf_file_db(ivcf):
    '''
    '''
    
    snpsTree = SequencesTree()
    ivcf = open(ivcf)
    
    for i,l in enumerate(ivcf):
        
        if l.startswith('#'):
            continue
        
        fields = l.strip('\n').split('\t')
        #loc = fields[0]+'@'+fields[1] 
        chr, pos = fields[0], int(fields[1])
        snpsTree[chr].append(int(pos))
        
    print >> sys.stderr, "Reading vcf file is done"
    
    return snpsTree

    

def intersection_snvs(union_snvs, list_snvs_sets):
    '''
    
    '''
    # concesus matrix
    cm =  np.zeros( (len(union_snvs), len(list_snvs_sets)) )
    pm = defaultdict()
    
    for i, snv in enumerate(union_snvs):
        pm[snv]=i
        for j, thsnv_list in  enumerate(list_snvs_sets):
            if snv in thsnv_list: 
                cm[i,j]=1
    return pm, cm


def union_snvs_dict(union_snvs):
    '''
    It gives an index to each snp. 
    So it can be used later to find arrays
    '''
    pm = defaultdict()
    for i, snv in enumerate(union_snvs):
        pm[snv]=i
    return pm

def isec_snvs_annot(dict_snv_annot, dict_snvs_query):
    '''
    It intersect lists of snvs store 
    in a dictionary structure
    '''
    pm = defaultdict(dict)
    
    for ifile, snv_query in dict_snvs_query.iteritems():
        for annot, snv_annot in dict_snv_annot.iteritems():
            pm[ifile][annot] = set(snv_query).intersection(set(snv_annot))
            
    return pm

def isec_snvs_annot_union(snvs_union, snvs_annot_dbs):
    '''
    This functions obtains all query snvs that are 
    common to a particular database, ex: dbSNP
    '''
    pm = defaultdict()
    
    for db, snv_db in snvs_annot_dbs.iteritems():
        pm[db] = set(snvs_union).intersection(set(snv_db))
            
    return pm


def base_type(base):
    '''
    determines the base type
    ns not standard base
    '''
    try:        
        snv_type= {'A':'pu','G':'pu','C':'pi','T':'pi'}[base]
        return snv_type
    except KeyError:
        return None


def titv_mutation(ref_base, alt_base):
    '''
    It determines if the mutation is transition
    or a transversion
    return ti for transition
    return tv for transversion
    '''
    transition=1
    transversion=0
    
    ref =base_type(ref_base)
    alt =base_type(alt_base)
    
    if (ref=='pu' and alt =='pu') or (ref=='pi'and alt=='pi'):
        return transition
    elif (ref=='pu' and alt =='pi') or (ref=='pi'and alt=='pu'):
        return transversion
 

def fields_table():
    '''
    '''
    '''
    format={"QUAL":'%10.2f',"DP":'%10.0f', "MQ":'%10.0f', "DP2":'%10.0f','DPREF':'%10.0f',
            'DPALT':'%10.0f',"AF":'%10.4f',"SB":'%10.4f'}
    '''
    fields={"QUAL":0,"DP":1, "MQ":2, 'DPREF':3,'DPALT':4,
            "AF":5,"SB":6,'VQSLOD':7,'HRun':8}
    
    format=['%10.2f','%10.0f','%10.0f','%10.0f',
            '%10.0f','%10.4f','%10.4f','%10.4f','%10.0f']

    names=["QUAL","DP", "MQ", 'DPREF','DPALT',
            "AF","SB", 'VQSLOD','HRun']
    
    gene_annot={'NON_SYNONYMOUS_CODING':1,'STOP_GAINED':2,'STOP_LOST':3,
                     'NON_SYNONYMOUS_CODING(SPLICE_SITE)':4,'STOP_GAINED(SPLICE_SITE)':5,
                     'ESSENTIAL_SPLICE_SITE':6, 'SPLICE_SITE':7,
                     'SYNONYMOUS_CODING':8,'3\'UTR':9,'5\'UTR':10,'INTRONIC':11,'INTERGENIC':12,
                     'DOWSTREAM':13,'UPSTREAM':14}

    return fields, format, names, gene_annot
    

def annotation_array(annotated_snvs, this_annot_snvs, nfiles,actionable):
    '''
    This this_annot_snvs represent the snvs present in 
    particular vcf files
    Annotate_snvs is double dictionary file_name,annotation database.
    '''
    nrow = len(annotated_snvs)
    ncol = len(this_annot_snvs)
    
    amat = np.zeros( (nrow, ncol) )
    j=0
    adict=defaultdict()
    for f, snv_list in this_annot_snvs.iteritems():
        for snv in snv_list:
            i=annotated_snvs[snv]
            amat[i,j]=1
            if actionable and f=='Actionable':
                adict[i]=actionable[snv]
        j+=1
    
    return amat,adict
        

def make_consensus(Cmat, methods_cols):
    '''
    subselect a matrix from Cmat, that only includes the concensus snps
    concensus is done by majority rule.
    '''
    majority = np.sum(Cmat[:,methods_cols])
    indicator = np.array(range(Cmat.shape[0]))
    consensus_snps = indicator[majority == len(methods_cols)]
    Cmat = Cmat[consensus_snps,:]
    return Cmat, consensus_snps

def majority(Cmat,methods_cols):
    '''
    Define majority
    '''
    majority = np.sum(Cmat[:,methods_cols])
    if majority <= len(methods_cols): # Changed to include all snvs predict for at least n-1 methods. sam or gatk
        return True
    else:
        return False
    
    
def snp_filters(Cmat, filters_dict):
    '''
    Dictionary of filters: col-value
    '''
    #results = np.zeros(range(len(filters_dict)))
    results=[]
    for prop, value in filters_dict.iteritems():
        #print prop, value, Cmat[prop], len(Cmat)
        if value > 0 and Cmat[prop] >= value:
            #results[i]=1
            results.append(1)
        elif value < 0 and Cmat[prop] <= value:
            #results[i]=1
            results.append(1)
        else:
            results.append(0)
    #print len(filters_dict), np.sum(np.array(results))
    if np.sum(np.array(results)) == len(filters_dict):
        return True
    else:
        return False

def annot_filters(Cmat, annot_dict):
    '''
    This dictionary has annot_cols={"HMGD":4,"dbSNP":6,"COSMIC":7}
    database=col
    This filters is implemented such as: filters each snp already 
    present in dbSNP unless the snp is also in cosmic or hmgd    
    '''
    if Cmat[annot_dict["HMGD"]]==1 or  Cmat[annot_dict["COSMIC"]]==1 or Cmat[annot_dict["dbSNP"]]==0:
        return True
    elif Cmat[annot_dict["dbSNP"]]==1:
        return False
    
    
    

#### Main Function
def build_concesus_table(query_snvs, dumped_files,ofile, mmapfile, 
                         complement, snvs_annot_dbs,actionable=None):
    '''
    '''
    fields, format, names, gene_annot=fields_table()
    ncol = len(fields) # The one is for the gene_annotation column
    nrow = len(query_snvs)
    nfiles = len(dumped_files)
    mcol = len(dumped_files)+1
    ga = -1
    titvcol=2 # Size of the matrix for transversions and transitions
    
    
    # Functional Annotation: Majority Vote and Present or not in HapMap
    
    print >> sys.stderr, "Building concensus table ...."
    full_annot = ['dbSNP','HapMap', 'Actionable', 'HMGD','COSMIC','Validated']
    this_annot = set(full_annot).intersection(snvs_annot_dbs.keys())
    print this_annot
    
    # This function annotates each snps with respect to the annotation dbs
    # Only the location is used. Not matching of the ref and alt bases is verified yet.
    amat,adict = annotation_array(query_snvs, snvs_annot_dbs, nfiles,actionable)
    
    # Define Matrix for transition - transversion snvs
    titvmat = np.zeros( (nrow,titvcol) )
    
    ############## Function
    #newpm=defaultdict()
    imat = np.zeros( (nrow,mcol) ) 
    gen_names=defaultdict()
    gen_annot_array=np.empty((nrow))
    
    # Define an array to store the nucleotide change
    base_change=defaultdict() 
    k=0
    # Cretate the matrix to store all data for a given method.
     
    for ft, df in dumped_files.iteritems():
        ovcf=VCF()
        ovcf.readFromPickle(df)
        # create a matrix for this file
        cmat = np.empty( (nrow,ncol) )
        
        for coord, i in query_snvs.iteritems():
            #coord = coord.split('|')[0]
            snv_list = ovcf[[coord]] # list of 1 snv 
            if not snv_list:
                cmat[i,:] = np.nan
                #print >> sys.stderr, "Not snps for this coordinate %s"%(coord)
            else:
                # Intersection matrix
                imat[i,k]=1
                
                for snv in snv_list:
                    # Filling annotation matrix
                    # print snv
                    loc = snv['COORD']
                    
                    if len(snv_list) > 1: 
                        print >> sys.stderr, "There is more than one snp with the same location"
                    
                    transition = titv_mutation(snv['REF'], snv['ALT'])
                    #print snv['REF'], snv['ALT'], transition
                    if transition:
                        titvmat[i,0]=1
                    else:
                        titvmat[i,1]=1
                       
                    # Properties of the snp
                    base_change[i]='%s>%s'%(snv['REF'], snv['ALT'])
                    variant_frac=0
                    for prop,j in fields.iteritems():    
                        try:
                            cmat[i,j] = snv[prop]
                                                        
                        except KeyError:
                            #print >> sys.stderr, "%s, Error with this snv %s, it is missing field %s"%(df,coord,prop)
                            if prop=="SB":
                                # This number means the field was not present in the vcf
                                cmat[i,j]=999
                            else:
                                cmat[i,j] = np.nan
                    
                    # calculate the variant fraction
                    l,m,h = fields['DPREF'],fields['DPALT'],fields['AF']
                    cmat[i,h] = float(cmat[i,m])/(cmat[i,l]+cmat[i,m])  
                    
                    # The part below handles the snp annotation and the gene name associated to it. 
                    snv_annotation = set(gene_annot.keys()).intersection(snv.keys())
                    print gene_annot, snv.keys()
                    print snv_annotation
                    if snv_annotation:
                        tmpid=100                        
                        for gt in snv_annotation:
                            id = gene_annot[gt]
                            if id < tmpid:
                                tmp,tmpid=gt,id
                        gen_annot_array[i]=tmpid #id
                        gen_names[i]=snv[tmp]
                        
                    else:
                        gen_annot_array[i]=999
                        gen_names[i]='nan'
                    print  gen_names[i], gen_annot_array[i]
     
        # concatenate matrix for this file to gral matrix
        if k==0:
            Cmat=np.copy(cmat)
        else:
            Cmat = np.hstack( (Cmat,cmat) )
        k+=1
        
    
    # Get the majority vote for the snv calls
    fformat=[]
    fnames=[]
    fmethods=[]
    for i in range(len(dumped_files)):
        tn = ['M%d_%s'%(i,n) for n in names]
        fformat+=format
        fnames+=tn
        fmethods.append('M%d'%(i)) 
    
    this_annot = list(this_annot)
    print fmethods
    this_annot.insert(0,'Majority')
    print this_annot
    fmethods=fmethods+this_annot
    fmethods.reverse()
    [fformat.insert(0,'%d') for i in range(imat.shape[1])]
    [fnames.insert(0,f) for f in fmethods]
    
    fnames.extend(['Ti','Tv'])
    # Concatenate the intersection and property matrices
    print imat.shape, mcol
    imat[:,nfiles]=np.sum(imat[:,:mcol], axis=1)
    # Concatenate imat and annotation
    iamat = np.hstack( (imat,amat) )
    # Concatenate the annotation matrix
    Cmat = np.hstack( (iamat,Cmat) )
    # Concatenate the matrix and the titv matrix
    Cmat = np.hstack( (Cmat,titvmat) )
    '''
    fields={"QUAL":0,"DP":1, "MQ":2, 'DPREF':3,'DPALT':4,
            "AF":5,"SB":6,'VQSLOD':7,'HRun':8}
    '''
    if nfiles == 2:
        methods_cols=[0,1]
        #annot_dict={"HMGD":4,"dbSNP":6,"COSMIC":7}
        # When HMGD and COSMIC are included (otherwise 29,31)
        #filters_dict={31:8,33:-0.1}
        annot_dict={"HMGD":3,"dbSNP":5,"COSMIC":6}
        # When HMGD and COSMIC are included (otherwise 29,31)
        filters_dict={21:8,23:-0.1}

    elif nfiles ==1 and ("GATK" in dumped_files):
        # When you have only one method: 
        print "Analyzing GATK results only"
        methods_cols=[0]
        annot_dict={"HMGD":2,"dbSNP":4,"COSMIC":5}
        filters_dict={11:8,13:-0.0001} # Note 11-27-11 before I was using -0.1. I lowered to pick the TP53 mutatio in MO1014
    elif nfiles ==1 and ("SAM" in dumped_files):
        print "Analyzing SAM results only"
        # When you have only one method: 
        methods_cols=[0]
        annot_dict={"HMGD":2,"dbSNP":4,"COSMIC":5}
        filters_dict={11:8}

    else:
        print 'The are more than 2 files and we do not know how to process those cases'
        sys.exit(0)
    
    write_consensus_tables(Cmat,ofile,",".join(fformat),mmapfile, query_snvs, fnames, base_change, gen_annot_array, gen_names,
                           methods_cols, filters_dict, annot_dict,adict)
    
    return fnames


def write_table(mat,fname, format, mmapfile, snps, header, gen_annot=None, gene_names=None):
    '''
    np.memmap(array, dtype='float')
    numpy.savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n')
    Sequence formtas for sved file, so reduce size.
    1.0000          1.0000          1.0000        120.0000        207.0000         60.0000        225.0000        207.0000         60.0000       5594.8100    207.0000          60.0000
    Ask for the format sequence fromthe beggining as input parameters
    '''
    
    #np.savetxt(fname,mat,fmt=format,delimiter='\t')#,newline='\n') '%d,%d,%d,%d,%10.2f,%10.0f,%10.0f,%10.2f,%10.0f,%10.0f,%10.2f,%10.0f,%10.0f'
    cmat = np.memmap(mmapfile,dtype='float32',mode='w+',shape=(mat.shape[0],mat.shape[1]) )
    cmat[:] = mat[:]
    
    
    ofile = open(fname,'w')
    
    # print snvs list
    if gene_names is not None and gen_annot is not None:
        ofile.write("snpID\t"+",".join(header).replace(',','\t')+"\tmut_type"+"\tgene_name"+'\n')
        for s,i in snps.iteritems():
            l = [s] + map(str,list(mat[i,:])) + [str(gen_annot[i])] + ['\''+",".join(gene_names[i]).replace(',','&')+'\'']
            ofile.write(",".join(l).replace(',','\t')+'\n')
        ofile.close()
    else:
        ofile.write("snpID\t"+",".join(header).replace(',','\t')+'\n')
        for s,i in snps.iteritems():
            l = [s] + map(str,list(mat[i,:])) + ['\''+'nan'+'\''] + ['\''+'nan'+'\'']
            ofile.write(",".join(l).replace(',','\t')+'\n')
        ofile.close()



def write_consensus_tables(mat,fname, format, mmapfile, snps, header, base_change, gen_annot=None, gene_names=None, 
                           methods_cols=None, filters_dict=None, annot_dict=None,adict=None):
    '''
    It writes two files:
    file A: the concensus and filtered snps
    file B: snps not printed in A
    '''
    
    cmat = np.memmap(mmapfile,dtype='float32',mode='w+',shape=(mat.shape[0],mat.shape[1]) )
    cmat[:] = mat[:]
    
    
    ofile = open(fname,'w')
    rfile = open(fname+'_filtered','w')
    # see muation types table < 7 include all non-synonynous cases
    mut_type= 7 # splice site
    # print snvs list
    if gene_names is not None and gen_annot is not None:
        if methods_cols is not None and filters_dict is not None and annot_dict is not None and adict is not None:
            rfile.write("snpID\t"+"mut\t"+",".join(header).replace(',','\t')+"\tmut_type"+"\tAA_change"+"\tgene_name"+"\taction_name"+'\n')
            ofile.write("snpID\t"+"mut\t"+",".join(header).replace(',','\t')+"\tmut_type"+"\tAA_change"+"\tgene_name"+"\taction_name"+'\n')
            for s,i in snps.iteritems():
                # Print if satisfies majority and filter values
                # And it correspond to a potentially affecting mutation
                # Reformat the annotation.
                codons,genes=deque(),deque()
                for g in gene_names[i]:
                    if len(g.split(':')) == 2:
                        cod,gen=g.split(':')[1],g.split(':')[0]
                        if cod not in codons:
                            codons.append(cod)
                            genes.append(gen)
                    else:
                        codons.append("None")
                        genes.append(g)
                
                #l = [s] + map(str,list(mat[i,:])) + [str(gen_annot[i])] + ['\''+",".join(gene_names[i]).replace(',','&')+'\'']
                try: 
                    snp_istate=adict[i] 
                except KeyError: 
                    snp_istate='None'
                    
                l = [s] + [base_change[i]] +map(str,list(mat[i,:])) + [str(gen_annot[i])] + \
                ['\''+",".join(codons).replace(',','&')+'\'']+['\''+",".join(genes).replace(',','&')+'\''] +\
                [ snp_istate ]
                if majority(mat[i,:],methods_cols) and snp_filters(mat[i,:], filters_dict) and \
                   annot_filters(mat[i,:],annot_dict) and gen_annot[i] <= mut_type:                
                    rfile.write(",".join(l).replace(',','\t')+'\n')
                else:
                    ofile.write(",".join(l).replace(',','\t')+'\n')
            rfile.close()
            ofile.close()
        else:
            ##### 06-27-11 Outdated need to incorporate the base change and the change in the headers.                
            ofile.write("snpID\t"+",".join(header).replace(',','\t')+"\tmut_type"+"\tgene_name"+'\n')
            for s,i in snps.iteritems():
                l = [s] + map(str,list(mat[i,:])) + [str(gen_annot[i])] + ['\''+",".join(gene_names[i]).replace(',','&')+'\'']
                ofile.write(",".join(l).replace(',','\t')+'\n')
            ofile.close()
    else:
        if methods_cols is not None and filters_dict is not None:
            ofile.write("snpID\t"+",".join(header).replace(',','\t')+'\n')
            rfile.write("snpID\t"+",".join(header).replace(',','\t')+'\n')
            for s,i in snps.iteritems():
                l = [s] + map(str,list(mat[i,:])) + ['\''+'nan'+'\''] + ['\''+'nan'+'\'']
                if majority(mat[i,:],methods_cols) and snp_filters(mat[i,:], filters_dict):
                    rfile.write(",".join(l).replace(',','\t')+'\n')
                else:
                    ofile.write(",".join(l).replace(',','\t')+'\n')
        else:    
            ofile.write("snpID\t"+",".join(header).replace(',','\t')+'\n')
            for s,i in snps.iteritems():
                l = [s] + map(str,list(mat[i,:])) + ['\''+'nan'+'\''] + ['\''+'nan'+'\'']
                ofile.write(",".join(l).replace(',','\t')+'\n')
            ofile.close()




        
def generate_position_file(vcf_file, calling_tool, pfiles):
    '''
    It creates positions (.pos) file for each vcf
    '''        
    snvs_dict = defaultdict()
    fpos = vcf_file.replace('.vcf','.pos.base')
    snppos.read_file(vcf_file,fpos)
    snvs_dict[vcf_file]=snppos.read_positions(fpos)
    pf = vcf_file+'.dump'
    pfiles[calling_tool]=pf
    print >> sys.stderr, "Reading vcf file %s"%(vcf_file)
    # read the vcf file
    thsnvs = read_file(vcf_file,pf)
    # Get the positions and bases. 
    tsnvs_pos_base = snppos.read_positions(fpos)

    return tsnvs_pos_base #thsnvs 


def main_concesus(vcf_files, list_annot_files, ofile, mmapfile='',complement=False):
    '''
    '''
    snvs_union = set()
    pfiles=defaultdict()
    
    snvs_annot_dbs = defaultdict()
    
    
    for ft, f in vcf_files.iteritems():
        # Generate a positions files for query vcf.
        print ft, f
        thsnvs = generate_position_file(f, ft, pfiles)
        snvs_union.update(set(thsnvs))
        
    snvs_query = union_snvs_dict(snvs_union)
    # Annotation files
    for fname, afile in list_annot_files.iteritems():
        print fname, afile
        if fname=="Actionable":
            # annotate snps that land in actionable genes
            snvs_annot_dbs[fname]=mark_actionable_genes(afile, snvs_union)
        else:
            # read position of vcf file
            snvs_annot_dbs[fname] = snppos.read_positions(afile)
    #snvs_annot_dbs['Validated'] = read_file_vcf(list_annot_files[3])    
    
    # Intersection of snvs and annotated snvs
    annotated_snvs = isec_snvs_annot_union(snvs_union, snvs_annot_dbs)
    
    print >> sys.stderr, "Building concensus"
    
    header = build_concesus_table(snvs_query, pfiles, ofile, mmapfile,
                                  complement, annotated_snvs, snvs_annot_dbs['Actionable'])
    
    # print snvs list
    snvs = ofile.replace('.txt','.snvs')
    sfile = open(snvs,'w')
    for s,i in snvs_query.iteritems():
        sfile.write(s+'\t'+str(i)+'\n')
    sfile.close()
    print >> sys.stderr,"DONE!"
        
        
if __name__ == '__main__':
    optionparser = OptionParser("usage: %prog [options] ")
    
    optionparser.add_option("--local", dest="local", action="store_true", default=False)
    optionparser.add_option("--cluster", dest="cluster", action="store_true", default=False)
    # Specific options
    # Data files
    optionparser.add_option("-g", "--gatk", dest="gatk",
                            help="gatk vcf file")
    optionparser.add_option("-s", "--sam", dest="sam",
                            help="sam vcf file")
    optionparser.add_option("-v", "--vars", dest="vars",
                            help="vars vcf file")
        
    # Annotation files
    optionparser.add_option("-a", "--actionable", dest="actionable",
                            help="actionable genes bed file")    
    optionparser.add_option("-d", "--dbsnp", dest="dbsnp",
                            help="dbSNP positions file")    
    optionparser.add_option("-p", "--hapmap", dest="hapmap",
                            help="hapmap positions file")
    optionparser.add_option("-m", "--hmgd", dest="hmgd",
                            help="hmgd positions file")
    optionparser.add_option("-i", "--cosmic", dest="cosmic",
                            help="cosmic positions file")


    # Output files
    optionparser.add_option("-o", "--output_file", dest="output_file",
                            help="output_file")
    optionparser.add_option("-c", "--complement", action='store_true', default=False,
                            help="output_file")
    
    


    (options, args) = optionparser.parse_args()
    outfile =options.output_file+'.txt'
    mmapfile =options.output_file+'.memmap'
    print options
    
    # Input files:
    vcf_files, annot_files =defaultdict(),defaultdict()
    
    #if not (options.gatk ^ options.sam ^ options.vars):
    #    print >> sys.stderr, "You need a minimum of 1 vcf file to do the analysis"
    #    sys.exit(0)
    if options.gatk:
        vcf_files['GATK']=options.gatk
    if options.sam:
        vcf_files['SAM']=options.sam
    if options.vars:
        vcf_files['VARS']=options.vars
    if options.vars=="" and options.gatk=="" and options.sam=="":
        print >> sys.stderr, "You need a minimum of 1 vcf file to do the analysis"
        sys.exit(0)

        
    if options.dbsnp:
        annot_files["dbSNP"]=options.dbsnp
    if options.hapmap:
        annot_files["HapMap"]=options.hapmap
    if options.hmgd:
        annot_files["HMGD"]=options.hmgd
    if options.cosmic:
        annot_files["COSMIC"]=options.cosmic
    if options.actionable:
        annot_files["Actionable"]=options.actionable
    if options.dbsnp=="" and options.hapmap=="" and options.hmgd=="" and options.actionable=="":
        print >> sys.stderr, "You need a minimum of 1 annotation file to do the analysis"
    

    
    main_concesus(vcf_files, annot_files, outfile, mmapfile, options.complement)