'''
Created on Mar 29, 2011

@author: oabalbin
'''
import sys
import numpy as np
import array
from optparse import OptionParser
from collections import defaultdict,deque #, OrderedDict
from bx.intervals.intersection import Interval, IntervalTree

from exome.vcftools.vcf import *
from exome.vcftools.dbSNP_positions import read_positions

class Error(Exception):
    pass

class SnvError(Error):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)

class Gene:
    def __init__(self,start, end, value):
        self.start=start
        self.end=end
        self.value=value


class SequencesTree:
    
    def __init__(self):
        self.genome={}
        
        chrlist = ['chr1', 'chr2','chr3','chr4','chr5','chr6','chr7', 'chr8','chr9','chr10', 'chr11','chr12','chr6','chr7', 'chr8','chr9','chr10', 'chr11', 'chr12', \
                   'chr13', 'chr14','chr15','chr16','chr17','chr18','chr19', 'chr20','chr21','chr22', 'chrX','chrY']
        
        for chr in chrlist:
            self.genome[chr] = defaultdict(array.array('L'))


def actionable_genes(gene_list):
    '''
    '''
    actionable = IntervalTree()
    for gen in gene_list:
        start, end, obj = gen.start, gen.end, gen.value
        actionable.insert_interval( Interval(start, end, value=obj) )
    
    return actionable
        
        
def read_gene_list(ifile):
    '''
    Read a bed file with fields: chr, start, end, name, mark, strand
    '''
    ifile=open(ifile)
    actionable_genes=deque()
    for l in ifile:
        fields = l.strip('\n').split('\t')
        start, end, value=int(fields[1]),int(fields[2]), {'chr':fields[0], 'annot':fields[3],'strand':fields[5]}
        actionable_genes.append( Gene(start, end, value) )
    
    return actionable_genes
    
def mark_actionable_genes(ifile,snp_list):
    '''
    '''
    gene_list = read_gene_list(ifile)
    act_genes = actionable_genes(gene_list)
    act_snps = defaultdict()
    
    for snp in snp_list:
        loc = snp.split('@')
        chr,pos=loc[0],int(loc[1])
        genes = act_genes.find(pos,pos)
        for g in genes:
            if chr == g.value['chr']:
                act_snps[snp] = g.value['annot']
    
    return act_snps
    
    
def read_file(ivcf,pfname):
    
    thisvcf=VCF()
    thisvcf.readFromFile(ivcf)
    snvs = thisvcf.keys()
    thisvcf.pickle(pfname)

    return snvs

def load_file(idump):
    thisvcf=VCF()
    thisvcf.readFromPickle(idump)
    
    return thisvcf
    

def read_file_vcf(ivcf):
    '''
    Reads a vcf returns a dictionary with snp location
    and set(id, ref and alt base)
    '''
    
    ivcf = open(ivcf)
    snps_dict = defaultdict(list)
    i=0
    for l in ivcf:
        i+=1
        if l.startswith('#'):
            continue

        fields = l.strip('\n').split('\t')
        loc = fields[0]+'@'+fields[1] 
        #loc = int(fields[1])
        snps_dict[loc].append([fields[0],fields[3],fields[4],fields[5]])
        
    print >> sys.stderr, "Reading vcf file is done"
    
    return snps_dict
    
def read_vcf_file_db(ivcf):
    '''
    '''
    
    snpsTree = SequencesTree()
    ivcf = open(ivcf)
    
    for i,l in enumerate(ivcf):
        
        if l.startswith('#'):
            continue
        
        fields = l.strip('\n').split('\t')
        #loc = fields[0]+'@'+fields[1] 
        chr, pos = fields[0], int(fields[1])
        snpsTree[chr].append(int(pos))
        
    print >> sys.stderr, "Reading vcf file is done"
    
    return snpsTree

    

def intersection_snvs(union_snvs, list_snvs_sets):
    
    # concesus matrix
    cm =  np.zeros( (len(union_snvs), len(list_snvs_sets)) )
    pm = defaultdict()
    
    for i, snv in enumerate(union_snvs):
        pm[snv]=i
        for j, thsnv_list in  enumerate(list_snvs_sets):
            if snv in thsnv_list: 
                cm[i,j]=1
    return pm, cm

def fields_table():
    '''
    '''
    fields={"QUAL":0,"DP":1, "MQ":2, 'DPREF':3,'DPALT':4,
            "AF":5,"SB":6}
    '''
    format={"QUAL":'%10.2f',"DP":'%10.0f', "MQ":'%10.0f', "DP2":'%10.0f','DPREF':'%10.0f',
            'DPALT':'%10.0f',"AF":'%10.4f',"SB":'%10.4f'}
    '''
    format=['%10.2f','%10.0f','%10.0f','%10.0f',
            '%10.0f','%10.4f','%10.4f']
    names=["QUAL","DP", "MQ", 'DPREF','DPALT',
            "AF","SB"]

    return fields, format,names
    

def build_concesus_table(pm, cm, dumped_files,ofile, mmapfile, 
                         complement, act_snps=None, dbsnp=None, hapmap=None, validated=None):
    '''
    '''
    fields, format, names=fields_table()
    ncol = len(fields)
    nrow = len(pm)
    # Functional Annotation: Majority Vote and Present or not in HapMap
    
    print >> sys.stderr, "Building concensus table ...."
    
    if act_snps is not None and dbsnp is not None and hapmap is not None and validated is not None:
        annot = ['Majority','dbSNP','HapMap', 'Actionable', 'Validated']
        mcol, dcol, hcol, acol, vcol = -5, -4,-3,-2,-1
            
    elif act_snps is not None and dbsnp is not None and hapmap is not None:
        annot = ['Majority','dbSNP','HapMap', 'Actionable']
        mcol, dcol, hcol, acol = -4,-3,-2,-1
        
    elif act_snps is not None and validated is not None and hapmap is not None:
        annot = ['Majority','HapMap', 'Actionable', 'Validated']
        mcol, hcol, acol, vcol = -4,-3,-2,-1
        
    elif dbsnp is not None and hapmap is not None:
        annot = ['Majority','dbSNP','HapMap']
        mcol, dcol, hcol = -3,-2,-1
        
    elif dbsnp is not None and act_snps is not None:
        annot = ['Majority','dbSNP','Actionable']
        mcol, dcol, acol = -3,-2,-1

    elif hapmap is not None and act_snps is not None:
        annot = ['Majority','HapMap','Actionable']
        mcol, hcol, acol = -3,-2,-1
        
    elif act_snps is not None and validated is not None:
        annot = ['Majority','Actionable', 'Validated']
        mcol, acol, vcol = -3,-2,-1

    else:
        annot = ['Majority']
        mcol=-1

        
    ############## Function
    #newpm=defaultdict()
    imat = np.zeros( (nrow,len(dumped_files)+len(annot) ) ) 
    #act_gen_names = np.empty( nrow, dtype='str')
    act_gen_names=defaultdict()
    #[act_gen_names.append('nan') for i in range(nrow)]
    
    # file1, file2, file3, file n.  
    
    
    for k, df in enumerate(dumped_files):
        ovcf=VCF()
        ovcf.readFromPickle(df)
        # create a matrix for this file
        cmat = np.empty( (nrow,ncol) )
        
        for coord, i in pm.iteritems():
            snv_list = ovcf[[coord]] # list of 1 snv 
            if not snv_list:
                cmat[i,:] = np.nan
                #print >> sys.stderr, "Not snps for this coordinate %s"%(coord)
            else:
                # Intersection matrix
                imat[i,k]=1

                for snv in snv_list:
                    # Filling annotation matrix
                    #print snv
                    loc = snv['COORD']
                    #newpm[loc]=i
                    if len(snv_list) > 1: 
                        print >> sys.stderr, "There is more than one snp with the same location"
                    
                    if act_snps is not None:
                        if loc in act_snps.keys():
                            
                            imat[i,acol] = 1
                            #act_gen_names[i]=act_snps[loc]
                            #act_gen_names.insert(i,act_snps[loc])
                            act_gen_names[i]=act_snps[loc]
                            
                        else:
                            imat[i,acol] = 0
                            if 'NON_SYNONYMOUS_CODING(SPLICE_SITE)' in snv.keys():
                                gen_name=snv['NON_SYNONYMOUS_CODING(SPLICE_SITE)']
                            elif 'NON_SYNONYMOUS_CODING' in snv.keys():
                                gen_name=snv['NON_SYNONYMOUS_CODING']
                            else:
                                gen_name='NAN'
                                
                            act_gen_names[i]='NotAct'+'_'+gen_name
                            
                                                
                    if  dbsnp is not None:
                        imat[i,dcol] = 1 if loc in dbsnp else 0
                    if hapmap is not None:
                        imat[i,hcol] = 1 if loc in hapmap else 0
                    if validated is not None:
                        imat[i,vcol] = 1 if loc in validated else 0
                    
                    # Properties of the snp
                    for prop,j in fields.iteritems():
                        
                        try:
                            cmat[i,j] = snv[prop]
                        except KeyError:
                            print >> sys.stderr, "%s, Error with this snv %s, it is missing field %s"%(df,coord,prop)
                            if prop=="SB":
                                # This number means the field was not present in the vcf
                                cmat[i,j]=999
                            else:
                                cmat[i,j] = np.nan
                            
     
        # concatenate matrix for this file to gral matrix
        if k==0:
            Cmat=np.copy(cmat)
        else:
            Cmat = np.hstack( (Cmat,cmat) )
            
    # Get the majority vote for the snv calls
    fformat=[]
    fnames=[]
    fmethods=[]
    for i in range(len(dumped_files)):
        tn = ['M%d_%s'%(i,n) for n in names]
        fformat+=format
        fnames+=tn
        fmethods.append('M%d'%(i)) 
    
    fmethods=fmethods+annot
    fmethods.reverse()
    [fformat.insert(0,'%d') for i in range(imat.shape[1])]
    [fnames.insert(0,f) for f in fmethods]
    
    imat[:,mcol]=np.sum(imat[:,:mcol], axis=1)
    Cmat = np.hstack( (imat,Cmat) )
    write_table(Cmat,ofile,",".join(fformat),mmapfile, pm, fnames, act_gen_names)
    
    return fnames


def write_table(mat,fname, format, mmapfile, snps, header, act_names):
    '''
    np.memmap(array, dtype='float')
    numpy.savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n')
    Sequence formtas for sved file, so reduce size.
    1.0000          1.0000          1.0000        120.0000        207.0000         60.0000        225.0000        207.0000         60.0000       5594.8100    207.0000          60.0000
    Ask for the format sequence fromthe beggining as input parameters
    '''
    
    #np.savetxt(fname,mat,fmt=format,delimiter='\t')#,newline='\n') '%d,%d,%d,%d,%10.2f,%10.0f,%10.0f,%10.2f,%10.0f,%10.0f,%10.2f,%10.0f,%10.0f'
    cmat = np.memmap(mmapfile,dtype='float32',mode='w+',shape=(mat.shape[0],mat.shape[1]) )
    cmat[:] = mat[:]
    
    
    ofile = open(fname,'w')
    ofile.write("snpID\t"+",".join(header).replace(',','\t')+"\tActionable"+'\n')
    
    # print snvs list
    for s,i in snps.iteritems():
        l = [s] + map(str,list(mat[i,:])) + [act_names[i]]

           
        ofile.write(",".join(l).replace(',','\t')+'\n')
    ofile.close()
        

def main_concesus(list_vcf_files, list_annot_files, ofile, mmapfile='',complement=False):
    '''
    '''
    snvs_union = set()
    snvs_sets=deque()
    pfiles=deque()
    
    
    for f in list_vcf_files:
        # pf = file to pickle vcfObject
        pf = f+'.dump'
        pfiles.append(pf)
        print >> sys.stderr, "Reading vcf file %s"%(f)
        thsnvs = read_file(f,pf)
        snvs_union.update(set(thsnvs))
        snvs_sets.append(thsnvs)
    
    
    pm, cm = intersection_snvs(snvs_union, snvs_sets)
    
    # Actionable, dbSNP and HapMap SNPs
    act_snps = mark_actionable_genes(list_annot_files[0], snvs_union)
    dbSNP = read_positions(list_annot_files[1])
    #hapmap = read_file_vcf(list_annot_files[2])
    hapmap = read_positions(list_annot_files[2])
    #validated = read_file_vcf(list_annot_files[3])
        
    #hapmap = load_file(list_annot_files[2])
    print >> sys.stderr, "Building concensus"
    '''
    build_concesus_table(pm, cm, dumped_files,ofile, mmapfile, 
                         complement, act_snps=None, dbsnp=None, hapmap=None, validated=None)
    Annotate pm, cm, pfiles, ofile, mmapfile, complement,
    '''
    header = build_concesus_table(pm, cm, pfiles, ofile, mmapfile,
                                  complement, act_snps, dbSNP, hapmap,None)  # , validated.keys() For Now dbSNP not used because of the size of this file.
    
    hfile = outfile.replace('.txt','.header')
    # print table header
    hfile = open(hfile,'w')
    hfile.write(",".join(header).replace(',',',\t')+'\n')
    hfile.close()
    # print snvs list
    snvs = outfile.replace('.txt','.snvs')
    ofile = open(snvs,'w')
    for s,i in pm.iteritems():
        ofile.write(s+'\t'+str(i)+'\n')
    ofile.close()
    print >> sys.stderr,"DONE!"
        
        
if __name__ == '__main__':
    optionparser = OptionParser("usage: %prog [options] ")
    
    optionparser.add_option("--local", dest="local", action="store_true", default=False)
    optionparser.add_option("--cluster", dest="cluster", action="store_true", default=False)
    # Specific options
    optionparser.add_option("-f", "--vcf_files", action="append", type="str",
                            dest="vcf_files",
                            help="others_vcf_files. Pass vcf files without .gz extention even if the file .gz is there")
    optionparser.add_option("-a", "--annot_files", action="append", type="str",
                            dest="annot_files",
                            help="other annotation files. ")
    optionparser.add_option("-o", "--output_file", dest="output_file",
                            help="output_file")
    optionparser.add_option("-c", "--complement", action='store_true', default=False,
                            help="output_file")
    
    


    (options, args) = optionparser.parse_args()
    outfile =options.output_file+'.txt'
    mmapfile =options.output_file+'.memmap'
    print options
    main_concesus(options.vcf_files, options.annot_files, outfile, mmapfile, options.complement)