
import glob
import os
import sys
import pysam
from time import time
from math import fsum
from copy import deepcopy
import cPickle
sys.path.append(os.path.join(os.getcwd(), "modules"))
from allfunctions import gethms, mean, stdev, metric_system, synopsis
from modefunctions import find_exon

bold = "\033[1m"
reset = "\033[0;0m"

def output_alldata(data, outputdir, filename, designmode):

    # get some performance statistics
    Tm5=[] # get Tm for 5' primer
    Tm3=[] # get Tm for 3' primer
    gc5=[] # get GC for 5' primer
    gc3=[] # get GC for 3' primer
    gc_count_40= 0 # count pairs with both GC% <40
    gc_count_40_60= 0 # count pairs with both GC% in 40-60 range
    avgS=[] # get Score for all pairs
    avgSize=[] # get amplicon of all pairs

    # first sort each data for its own type
    # then return to the main script for further processing
    # except Display, it cannot be sorted so keep track of which file it came from
    # now write to file
    temp= data.pop("display")
    with open(os.path.join(outputdir, "%s_display.txt" %filename), "w") as output:
        for key in sorted(temp.keys()):
            output.writelines(temp.pop(key))

    picked= [i for j in sorted(data["picked"].keys()) for i in data["picked"][j]]
    # sort picked according to target number
    
    order= open(os.path.join(outputdir, "%s_order.txt" %filename), "w")
    track= open(os.path.join(outputdir, "%s_track.bed" %filename), "w")
    with open(os.path.join(outputdir, "%s_picked.txt" %filename), "w") as output:
        # also generate order and track
        if designmode == 'PCR':
            output.write('Score\tName\tSequence\tTm\tGC\tLength\tTarget_Location\tF_Arm_Chr\tF_Arm_Start\tF_Arm_Stop\tRelLoc\tName\tSequence\tTm\tGC\tLength\tTarget_Location\tR_Arm_Chr\tR_Arm_Start\tR_Arm_Stop\tRelLoc\tSize\tPairCode\tTubeGroup\tGene.Exon:#\n')

            for line in picked:
                avgS.append(line[0])
                avgSize.append(line[21])
                Tm5.append(line[3])
                Tm3.append(line[13])
                gc5.append(line[4])
                gc3.append(line[14])
                if line[4] < 40 and line[14] < 40:
                    gc_count_40+= 1
                elif 40 <= line[4] <= 60 and 40 <= line[14] <= 60:
                    gc_count_40_60+= 1

                line[0]= "%.3f" %line[0]
                line[3]= "%.1f" %line[3]
                line[4]= "%.1f" %line[4]
                line[13]= "%.1f" %line[13]
                line[14]= "%.1f" %line[14]
                line= [str(p) for p in line]

                order.write('\t'.join([line[22]+line[23][-1], line[2], line[12], line[24]])+'\n')
                track.write('\t'.join([line[7], line[8], line[19], line[24]])+'\n')
                output.write("\t".join(line)+"\n")

        elif designmode == 'MIP':
            output.write('Score\tName\tSequence\tTm\tGC\tLength\tTarget_Location\tF_Arm_Chr\tF_Arm_Start\tF_Arm_Stop\tRelLoc\tName\tSequence\tTm\tGC\tLength\tTarget_Location\tR_Arm_Chr\tR_Arm_Start\tR_Arm_Stop\tRelLoc\tSize\tPairCode\tTarget\tStrand\tGene.Exon:#\n')

            for line in picked:
                avgS.append(line[0])
                avgSize.append(line[21])
                if line[24]=='plus':
                    Tm5.append(line[3])
                    Tm3.append(line[13])
                    gc5.append(line[4])
                    gc3.append(line[14])
                elif line[24]=='minus':
                    Tm5.append(line[13])
                    Tm3.append(line[3])
                    gc5.append(line[14])
                    gc3.append(line[4])
                if line[4] < 35 and line[14] < 35:
                    gc_count_40+= 1
                elif 35 <= line[4] <= 55 and 35 <= line[14] <= 55:
                    gc_count_40_60+= 1

                line[0]= "%.3f" %line[0]
                line[3]= "%.1f" %line[3]
                line[4]= "%.1f" %line[4]
                line[13]= "%.1f" %line[13]
                line[14]= "%.1f" %line[14]
                line= [str(p) for p in line]
                order.write('\t'.join([line[22], line[23], line[24], line[25]])+'\n')
                track.write('\t'.join([line[7], line[8], line[19], line[25]])+'\n')
                output.write("\t".join(line)+"\n")
    track.close()
    order.close()
    #pickedlist.sort(key= lambda x:(x[7], x[8]))
    return Tm5, Tm3, gc5, gc3, gc_count_40, gc_count_40_60, avgS, avgSize


def merger(output_data, filename, START, runDB, silence):
    cum_tile= 0 # this will be reset with the proper number

    outputdir= runDB["outputdir"]
    #filename= os.path.basename(outputdir).split("-")[0]

    Epos= runDB['Epos']
    designmode= runDB['designmode']
    POI= runDB['POI']
    dbSNP_version= runDB['dbSNP_version']
    assembly= runDB["gbuild"]

    # write everthing in runDB to __fmode__.txt
    with open(os.path.join(outputdir, '__%s__.txt' %designmode), "a") as outfile:
        for each in runDB.items():
            outfile.write("\t".join([str(s) for s in each])+"\n")

    # if each file creates a separate output folder
    # than it is easier to collect everything in the order submitted

    # collect data from individual subfoders    
    alldata= {"display":{}, "picked":{}}
    len_pickedlist= 0
    for data in output_data:
        temp= data["display"].popitem()
        alldata["display"].update({temp[0]: temp[1]})
        try:
            alldata["picked"].update({int(data["picked"][0][1][:data["picked"][0][1].find("F")]):data["picked"]})
            len_pickedlist+= len(data["picked"])
        except IndexError:
            pass
        
    # now merge all data
    Tm5, Tm3, gc5, gc3, gc_count_40, gc_count_40_60, avgS, avgSize= output_alldata(alldata, outputdir, filename, designmode)

    with open(os.path.join(outputdir, "Headers.pkl")) as pkl_obj:
        Headers= cPickle.load(pkl_obj)

    TRegion= len(Headers)
    # create the summary file
    # keep exonDBs in place
    ExonDB= {}
    with open(os.path.join(outputdir, '%s_summary.txt' %filename), "w") as summary:
        summary.write('Filename is %s\n\n' %outputdir)
        summary.write('Genome targetted is %s \n\n' % runDB["gbuild"])
        summary.write('SNP database used is %s \n\n' % dbSNP_version)
        summary.write('Base threshols for dimers is %.1f \n\n' % runDB["pntcutoff"])
        summary.write('GC range is %i to %i \n\n' % (runDB["MinGC"], runDB["MaxGC"]))
        summary.write('Tm range is %i to %i \n\n' % (runDB["MinTm"], runDB["MaxTm"]))
        summary.write('Primer length range is %i to %i \n\n' % (runDB["MinLen"], runDB["MaxLen"]))
        summary.write('Amplicon size range is %i to %i \n\n' % (runDB["Smin"], runDB["Smax"]))
        summary.write('Exon position was set at %i \n\n' % (runDB["Epos"]))
        summary.write('Tiling overlap allowed is %i \n\n' % (runDB["Olap"]))
        summary.write('\nNumber of total regions is %s\n\n' % (TRegion)) 
        if designmode == 'PCR':
            summary.write('%s PCR primer pairs are picked\n\n' % (len_pickedlist))
        elif designmode == 'MIP':
            summary.write('%s MIPs are picked\n' % (len_pickedlist))

        #calculate how many of the regions are covered at 100%
        Tiling= {}
        short_fail= []
        mark= 0
        Ecov= 0
        Tbasecovg= 0 # total bases to be covered
        basecovg= 0 # bases actually covered
        ptarget= ''
        if not POI:
            failed= open(os.path.join(outputdir, '%s_failed_regions.txt' %filename),'w')
            for tcount, target in enumerate(Headers, 1):
                Chr= target[0]
                Tstart= target[1] # target start
                Tstop= target[2] # target end
                Epos=  runDB['Epos'] + target[3] # put offset into Epos

                Estart= Tstart + Epos # exon start
                Estop= Tstop - Epos # exon stop
                Tbasecovg+= (Estop-Estart+1) # total exonic bases targeted

                # generate a mock list for targets
                temp= dict((i,0) for i in xrange(Estart, Estop+1, 1))
                    
                if tcount in alldata["picked"]:
                    TTemp2= alldata["picked"].pop(tcount)
                    # now calculate tiling and coverage
                    """
                    for pair in TTemp2:
                        # anything in between the Fstop and Rstart,
                        # ends not included are targeted
                        for i in xrange(pair[9]+1, pair[18], 1):
                            Tiling["%s-%s" %(Chr, i)]= Tiling.get("%s-%s" %(Chr, i), 0) + 1
                            try:
                                temp.pop(i)
                            except KeyError:
                                pass
                    """
                    for pair in TTemp2:
                        # anything in between the Fstop and Rstart,
                        # ends not included are targeted
                        for i in xrange(max(pair[9]+1, Estart), min(pair[18], Estop+1), 1):
                            temp[i]+= 1
                            
                    # temp has all the base coverage info
                    # transfer that to Tiling dict
                    for p, j in temp.items():
                        Tiling["%s-%s" %(Chr, p)]= j
                        if j > 0:
                            temp.pop(p)
 
                    # now temp has the uncovered bases
                    if len(temp) == 0:
                        Ecov+= 1
                    else:
                        short_fail.extend([(Chr, p) for p in temp])
                    basecovg+= (Estop-Estart+1 - len(temp))

                else:
                    # failed region
                    exonDB= pysam.Tabixfile(os.path.join(runDB["pathDB"]['exonDB'], "%s.tabix.gz" %Chr))
                    failed.write('%s:%s-%s\t%s\n' %(Chr, Tstart, Tstop, find_exon(exonDB, Chr, Tstart, Tstop)))
                    
                    for p, j in temp.items():
                        Tiling["%s-%s" %(Chr, p)]= j

            if len(short_fail):
                short_fail= sorted(set(short_fail), reverse=True)
                temp= [short_fail.pop()]
                short_fail.reverse()
                for Chr, pos in short_fail:
                    if Chr == temp[-1][0] and pos == temp[-1][1] + 1:
                        temp.append([Chr, pos])
                    else:
                        exonDB= pysam.Tabixfile(os.path.join(runDB["pathDB"]['exonDB'], "%s.tabix.gz" %Chr))

                        failed.write("%s:%s-%s\t%s\t%s bases\n" %(temp[0][0], temp[0][1], temp[-1][1], find_exon(exonDB, Chr, temp[0][1], temp[-1][1]), temp[-1][1]-temp[0][1]+1))
                        temp= [[Chr, pos]]
                # write what ever is left
                try:
                    exonDB= pysam.Tabixfile(os.path.join(runDB["pathDB"]['exonDB'], "%s.tabix.gz" %Chr))

                    failed.write("%s:%s-%s\t%s\t%s bases\n" %(temp[0][0], temp[0][1], temp[-1][1], find_exon(exonDB, Chr, temp[0][1], temp[-1][1]), temp[-1][1]-temp[0][1]+1))
                except IndexError:
                    pass

            failed.close()

        else:
            # POI mode
            # all you care is how many regions vs how many picks
            basecovg= Ecov= len_pickedlist
            Tbasecovg= TRegion
            if basecovg != Tbasecovg:
                with open(os.path.join(outputdir, '%s_failed_regions.txt' %filename),'w') as failed:
                    for tcount, target in enumerate(Headers, 1):
                        TTemp2= alldata["picked"].get(tcount, list())
                        if not len(TTemp2):
                            failed.write("\t".join([str(p) for p in target]))
                            exonDB= pysam.Tabixfile(os.path.join(runDB["pathDB"]['exonDB'], "%s.tabix.gz" %Chr))
                            failed.write("\t%s\n" %find_exon(exonDB, Chr, target[1], target[2]))

        Exons_cov= 100.0 * Ecov / TRegion
        Bases_cov= 100.0 * basecovg / Tbasecovg
        summary.write('\nPercent of exons fully tiled is %.1f\n\n' % Exons_cov)
        summary.write('\tOverall coverage is %.1f%%\n\n' % (Bases_cov))
        if basecovg > 0 and not POI:
            summary.write("Length of region targeted is %sbases\n\n" %(metric_system(Tbasecovg)))
            summary.write("Length of region covered is %sbases\n\n" %(metric_system(basecovg)))

        # now calculate tiling parameters
        if runDB["tiling"]:
            tiling_freq= 2
            covg= {}
            for tile_count in Tiling.values():
                covg[tile_count]= covg.get(tile_count, 0) + 1

            Tiling= {}
            covg= covg.items()
            covg.sort()
            X= [p[0] for p in covg]
            Y= [p[1] for p in covg]
            Y2= [100.0*p[1]/sum(Y) for p in covg]

            covg.reverse()
            Ycum= []
            summary.write("\nBase tiling results at %sX\n" %tiling_freq)
            for Count, Cum, Covg in zip(X, Y2, Y):
                Ycum.append(fsum([100.0*p[1]/sum(Y) for p in covg]))
                summary.write("\t%.1f%% of bases are covered at %s (cumulative %.1f%%)\n" %(Cum, Count, Ycum[-1]))
                if Count == tiling_freq:
                    cum_tile= Ycum[-1]
                covg.pop()

            summary.write("%.1f%% of bases are covered at %sX or more\n" %(cum_tile, tiling_freq))
            #print "%.1f%% of bases are covered at %sX or more\n" %(cum_tile, runDB["tiling"])

            if sys.platform == "darwin":
                import matplotlib
                matplotlib.use("Agg")
                import matplotlib.pyplot as plt
                import numpy as np
                
                fig= plt.figure()
                ax1= fig.add_subplot(111)
                width= 0.5
                ind= np.arange(len(Y))+0.1
                rects1= ax1.bar(ind, Y, color= 'b')

                def autolabel(rects):
                    global rect
                    # attach some text labels
                    getlim= ax1.get_ylim()
                    ylimit= getlim[1]
                    for rect, percent in zip(rects, Y2):
                        height = rect.get_height()
                        ax1.text(rect.get_x()+rect.get_width()/2., height+ylimit/100, "%.1f%%" %percent, ha='center', va='bottom', fontsize= 10)

                autolabel(rects1)

                ax2= ax1.twinx()
                ax2.plot(ind+rect.get_width()/2, Ycum, 'rs')
                ax2.axhline(y=10, color='k')
                ax2.axhline(y=50, color='k')
                ax2.axhline(y=80, color='k')
                ax2.set_ylabel("% coverage")
                ax1.set_ylabel("raw count")

                plt.xticks(ind+rect.get_width()/2, X, fontsize= 12)        
                plt.savefig(os.path.join(outputdir,'%s_figure.pdf' %(filename)), format='pdf')
                plt.close()

        #print '\tOverall coverage is %s %.1f%% %s' % (bold, Bases_cov, reset)

        if len_pickedlist:
            if designmode=='PCR':
                summary.write('\naverage score for picked is %.1f\n\n' % (mean(avgS)))
                summary.write('average amplicon size is %.1f +- %.1f\n\n' % (mean(avgSize), stdev(avgSize)))
                summary.write('average Tm of F_primers is %.1f +- %.1f\n\n' % (mean(Tm5), stdev(Tm5)))
                summary.write('average Tm of R_primers is %.1f +- %.1f\n\n' % (mean(Tm3), stdev(Tm3)))
                summary.write('average GC%% of F_primers is %.1f +- %.1f\n\n' % (mean(gc5), stdev(gc5)))
                summary.write('average GC%% of R_primers is %.1f +- %.1f\n\n' % (mean(gc3), stdev(gc3)))
                summary.write('\tin %.1f%% of pairs both primers have GC%% <40\n\n' %(100*gc_count_40/len_pickedlist)) 
                summary.write('\tin %.1f%% of pairs both primers have GC%% in 40-60\n\n' %(100*gc_count_40_60/len_pickedlist)) 

            elif designmode == 'MIP':
                summary.write('average score for picked is %.1f\n\n' % (mean(avgS)))
                summary.write('average amplicon size is %.1f +- %.1f\n\n' % (mean(avgSize), stdev(avgSize)))
                summary.write('average Tm of Ligation Arm is %.1f +- %.1f\n\n' % (mean(Tm5), stdev(Tm5)))
                summary.write('average Tm of Extension Arm is %.1f +- %.1f\n\n' % (mean(Tm3), stdev(Tm3)))
                summary.write('average GC%% of 5-Arm is %.1f +- %.1f\n\n' % (mean(gc5), stdev(gc5)))
                summary.write('average GC%% of 3-Arm is %.1f +- %.1f\n\n' % (mean(gc3), stdev(gc3)))
                summary.write('\tin %.1f%% of pairs both primers have GC%% <35\n\n' %(100*gc_count_40/len_pickedlist)) 
                summary.write('\tin %.1f%% of pairs both primers have GC%% in 35-55\n\n' %(100*gc_count_40_60/len_pickedlist))


        #print 'File/data merge complete'
        #print "Split files/folders have been cleaned up"

        # rate the whole process
        #if (TRegion*3600/time.time()-START))>=1.0:

        if (TRegion/(time()-START))<1.0:
            #print 'Rate of analysis is %.1f regions/min' % ((TRegion*60)/(time()-START))
            summary.write('\nRate of analysis is %.1f regions/min\n' % ((TRegion*60)/(time()-START)))
        elif (TRegion/(time()-START))>=1.0:
            #print 'Rate of analysis is %.1f regions/sec' % ((TRegion)/(time()-START))
            summary.write('\nRate of analysis is %.1f regions/sec\n' % ((TRegion)/(time()-START)))

        summary.write('\nOverall run completed in %s\n\n' % (gethms(time()-START)))

    # final clean-up
    os.remove(os.path.join(outputdir, 'Headers.pkl'))
    #[os.remove(p) for p in temp_files]

    return Bases_cov, cum_tile
