#!/usr/bin/env python
# a batch process script

#Imports
import os
import sys

import cPickle
import glob
import subprocess
import zipfile

from itertools import product
from optparse import OptionParser
from time import time, sleep, strftime
from multiprocessing import Process, Queue, Pool, cpu_count

# make sure you include numpy as a dependency!!!
from numpy import mean
from numpy import std as stdev

### GO INTO OPTIONS ###
parser= OptionParser(usage="%prog [-f]", version="%prog 0.0.1")

# MUST ARGUMENTS
parser.add_option("", "--email", dest="Email", help="users email address to send the data files", default="")
parser.add_option("","--obed", dest="obed", help="output file name")
parser.add_option("","--ozip", dest="ozip", help="output file name")
parser.add_option("-f", "--file", dest="inputFile", help="the input file for primer design required as a fullpath could should be 2 column (chr and position) or 3+ column (chr, start, stop, and optional stuff) bed track")
parser.add_option("-c", "--cores", dest="cores", help="number of cores to be used for this process (defaults to number of cores on your machine)", type="int", default=cpu_count())
parser.add_option("-m", "--mode", dest="designmode", help="the designmode default is PCR [DEFAULT=%default]", default="PCR")
###

# additional options
# the usual ones
parser.add_option("--smin", dest="Smin", help="the minimum amplicon length", type="int")
parser.add_option("--smax", dest="Smax", help="the maximum amplicon length", type="int")
parser.add_option("--epos", dest="Epos", help="length of additional sequence flanking your target region eg: length of flanking sequences at either end of region-of-interest [DEFAULT=%default]", type="int", default=50)
parser.add_option("--minlen", dest="MinLen", help="the minimum length for the primers", type="int")
parser.add_option("--maxlen", dest="MaxLen", help="the maximum length for the primers", type="int")
parser.add_option("--olap", dest="Olap", help="length of overlapping allowed between tiling primers", type="int")
parser.add_option("--repeats", dest="Repeats", help="how to handle repeats [do nothing = ignore, score them as well = score, remove them if they fail = remove]", type="choice", choices=("ignore", "score", "remove", "IGNORE", "STORE", "REMOVE"))
parser.add_option("--genome", dest="gbuild", help="the genome and build  [%default]", default="hg19") # I guess in galaxy this one is already handled
parser.add_option("--tiling", dest="tiling", help="if you require 2X base tiling, otherwise no base tiling only region tiling [DEFAULT=%default]", action="store_true", default=False)

parser.add_option("--poi", dest="poi", action="store_true", help="if you want to design primers around a point-of-interest [DEFAULT=%default]", default=False)

# the advanced onces
parser.add_option("--mingc", dest="MinGC", help="the minimum GC% for the primers", type="int")
parser.add_option("--maxgc", dest="MaxGC", help="the maximum GC% for the primers", type="int")
parser.add_option("--mintm", dest="MinTm", help="the minimum Tm for the primers", type="int")
parser.add_option("--maxtm", dest="MaxTm", help="the maximum Tm for the primers", type="int")
###

# these are the variables required for Tm calculation. 
parser.add_option("--dna", dest="dna", help="concentration of DNA (nM) in final reaction", type="float")
parser.add_option("--Na_ion", dest="Na", help="concentration of Na+ ion (mM) in final reaction", type="float")
parser.add_option("--K_ion", dest="K", help="concentration of K+ ion (mM) in final reaction", type="float")
parser.add_option("--Tris_ion", dest="Tris", help="concentration of Tris+ ion (mM) in final reaction", type="float")
parser.add_option("--Mg_ion", dest="Mg", help="concentration of Mg+2 ion (mM) in final reaction", type="float")
parser.add_option("--dNTP", dest="dNTP", help="concentration of dNTP (mM) in final reaction", type="float")
parser.add_option("--DMSO", dest="DMSO", help="concentration of DMSO (%) in final reaction", type="float")

###
# MIP specific options
parser.add_option("--redict", dest="REdict", help="only in MIP mode, the list of restriction enzymes to be avoided enter as 'name1:seq1:how_many_allowed,name2:seq2:how_many_allowed,...'")
parser.add_option("--priF", dest="priF", help="only in MIP mode, the sequence of the 5' universal priming site")
parser.add_option("--priR", dest="priR", help="only in MIP mode, the sequence of the 3' universal priming site")
parser.add_option("--middle", dest="Middle", help="only in MIP mode, the sequence of the backbone")
parser.add_option("--arm_max", dest="Arm_max", help="only in MIP mode, the total length of the forward and reverse capture arms", type="int")
parser.add_option("--oligo_len", dest="Oligo_len", help="only in MIP mode, the total length of the preMIP oligos", type="int")

###
# ADVANCED OPTIONS
# maybe a user may want to have more stringent setting
parser.add_option("--dimer", dest="pntcutoff", help="cut off value for dimer checking (0-5, 0:will not allow any complementation, 5:will allow 100% complementation, default is 3.5 (less than 70% allowed", type="float")

# when run locally you can pass the location for the DBs if it is not default
parser.add_option("--dbdir", dest="DBdir", help="only required for the first run of this tool and only if you want to custamize the location of your final database folders", default= os.path.join(os.path.dirname(sys.argv[0]), "DBs"))

# if it is not a standard setup, define the path to the files
parser.add_option("--mainpath", dest="mainpath", help="only required if you have custom locations for the script files. It is however, advised to keep pckme.py (main script) together with the modules directory", default= os.path.join("./", os.path.dirname(sys.argv[0])))

# do you want to silence outputs for web-based usage
parser.add_option("--silence", dest="silence", help="if need silence 0 or else default 1 (no silence)", type="int", default=1)

# do you want to speed it up and just pick a random subset of F/R primers for design
parser.add_option("--random", dest="random", help="do you want random (1) or the default is not (0)", type="int", default=0)

parser.add_option("--update", dest="update", action="store_true")

parser.add_option("--gfserver", dest="gfserver", help="gfServer post 2bit file location string for in-silico PCR pass 'false' to disable else provide as 'host port(default=17779) local_path_to_2bit' [%default]", default="camus.nci.nih.gov 17779 /home/public/sequences/ucsc/hg19/")

parser.add_option("--gfPcr", dest="gfPcr", help="path to gfPcr [%default]", default="~/bin/isPcr/gfPcr")

(option, args)= parser.parse_args()

# also import custom functions from the modules directory
sys.path.append(os.path.join(option.mainpath, "modules"))

from allfunctions import gethms, path_dict, progress_info, DBgenome, synopsis
from defaults import default_values
from pckme_merge import merger
from pckme_lg import pck_large
from pckme_sm import pck_small

# LETS GET STARTED !!!

bold = "\033[1m"
reset = "\033[0;0m"

# establish that a proper filename is given
filename= option.inputFile #.replace(os.path.dirname(sys.argv[0]), "").lstrip(os.sep)
"""
if filename == None or os.path.isfile(filename) == False:
    filename= raw_input('Enter filename to be processed: ')
"""

Epos= option.Epos
##########################################
# ENTER RUN SPECIFIC CRITERIA
# STORE AS A DICT AND SHARE IT WITH ALL THE SUBPROCESSES
runDB= {'MinGC': option.MinGC,
        'MaxGC': option.MaxGC,
        'MinTm': option.MinTm,
        'MaxTm': option.MaxTm,
        'MinLen': option.MinLen,
        'MaxLen': option.MaxLen,
        'Epos': option.Epos,
        'Smin': option.Smin,
        'Smax': option.Smax,
        'Olap': option.Olap,
        'pntcutoff': option.pntcutoff,
        'gbuild': option.gbuild,
        'designmode':option.designmode.upper(),
        'dna': option.dna,
        'Na': option.Na,
        'K': option.K,
        'Tris': option.Tris,
        'Mg': option.Mg,
        'dNTP': option.dNTP,
        'DMSO': option.DMSO,
        'cores': option.cores,
        'repeats': option.Repeats,
        'tiling': option.tiling,
        'random': option.random}
############################################

# ESTABLISH DEFAULT VALUES
if runDB["designmode"] == "MIP":
    runDB['Arm_max']= option.Arm_max
    runDB['PriF']= option.priF
    runDB['Middle']= option.Middle
    runDB['PriR']= option.priR
    runDB['Oligo_len']= option.Oligo_len
    runDB["REdict"]= option.REdict
    # if you entered a redict parse it, or use defaults
    if option.REdict != None:
        runDB["REdict"]= {}
        # generate the REdict from options
        for each in option.REdict.split(","):
            key, value, allowed= each.split(":")
            try:
                runDB["REdict"].update({key: [value, int(allowed)]})
            except ValueError:
                print "Please enter your restriction site number of allowed values as int"
                sys.exit()

# load default values from defaults.py
defaultDB= default_values(runDB["designmode"])
# for each option whose value is None, load the default value
for each in runDB:
    if runDB[each] is None:
        runDB[each] = defaultDB[each]

cores= runDB["cores"]

# also correct for olap
# if olap is > smin/2 there are design issues
if runDB["Olap"] >= runDB["Smin"] / 2:
    runDB["Olap"] = runDB["Smin"] / 2 - 5

# DICTIONARY:
# Epos= # relative position of the exon in the target (from each end)
# Smin and Smax= Amplicon size minumum and maximum respectively
# Olap= Maximum tiling overlap allowed
# Arm_max= [ FOR MIP ] total length of forward and reverse arms allowed 
# PriF= [ FOR MIP ] Forward MIPamp primer site
# Middle= [ FOR MIP ] The backbone
# PriR=  [ FOR MIP ] Reverse MIPamp primer site
# Oligo_len=  [ FOR MIP ] Length of the final product for ordering
# REdict= [ FOR MIP ] the list of restriction enzyme sites and their expected number of occurances
# pntcutoff= value when you call a seq self-complementary, the smaller the number the more stringent the dimer setting is. It is a percentile representation. Deafult is 2.5 which means a the deltag of the dimer should be less than 50% of the full-length, so 3.5 means that it should be less than 70% of full-length
###

# check to see if there is an update to the exonDB
# then you can create your DBs in multiprocessor mode
subprocess.call("python %s -g %s -D %s -c %s --silence=%s" %(os.path.join(option.mainpath, "modules", "pckme_dbs.py"), option.gbuild, option.DBdir, option.cores, option.silence), shell=True)
# keep it running it only takes a few second even to update
# if you are running it for the first time it will generate the DBs
# this may take a while
if option.update:
    sys.exit()

# also check to see if the gfServer is running
if option.gfserver != "false":
    runDB['gfserver'] = " ".join([option.gfPcr, option.gfserver])
    proc = subprocess.Popen("%s %s %s stdout -out=bed" %(runDB["gfserver"],
                                                         "aaaacaagggaaagcttgag", "tcgggtttttgtgttaagtt"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    proc_stdout, proc_stderr = proc.communicate()
    if len(proc_stderr):
        print "gfServer stderr, exiting:", proc_stderr
        # try turning it on
        #subprocess.call("%s start %s/%s.2bit" %(os.path.join(os.path.dirname, "gfPcr"), option.gfserver, option.gbuild))
        sys.exit()
    elif not len(proc_stdout.strip().split("\t")) == 6:
        print "gfServer stdout, exiting:", proc_stdout
        sys.exit()
    else:
        print "gfServer is running"
else:
    runDB['gfserver'] = "false"
        
##### IMPORTANT FILE PATHS ###########
# establish paths to required directories
pathDB = dict((p, os.path.join(option.DBdir, option.gbuild, p)) for p in ('genomeDB', 'exonDB', 'repeatDB', 'snpDB'))
runDB["pathDB"]= pathDB

#######################################
timestamp= strftime('%y.%m.%d_%H.%M.%S')
START= time()
########## END OF OPTIONS #############


# start processing
# first create an output directory
synopsis("Preparing the worker list...", option.silence)
with open(filename) as bedfile:
    dirname, filename= os.path.split(filename)
    filename= os.path.splitext(filename)[0]
    if option.ozip == None:
        # running locally or no zip file required
        outputdir= os.path.join(dirname, filename, "%s-%s" %(filename, timestamp))
        os.makedirs(outputdir)    
    else:
        outputdir= ""

    #print "\noutput to %s" %outputdir, filename
    runDB["outputdir"] = outputdir

    # convert the input file to a workable seq file
    cline= []
    inputfile= [] # the master input file
    strip= str.strip
    split= str.split

    Headers= [] # keep the original headers
    line= bedfile.readline()
    # what if the first line is the title line
    # the very first thing in line should be "chr"
    if line.find("chr") != 0:
        # the first thing not -1 not >0
        # it is probably the title line
        # skip it read another line
        # you can update the genome from the header
        line= bedfile.readline()
    # carry-on
    line= line.strip().split()
    # test to see if there is a gene_exon column
    if len(line) > 1 and not line[-1].isdigit():
        line= line[:-1]
    # sample one line to see the structure
    if len(line) > 2:
        if option.poi:
            runDB['Epos'] = runDB["Smax"] / 2
            
        # this is a 3+ column bed file
        def process_line(line):
            lstart= int(line[1]) - runDB["Epos"]
            lstop= int(line[2]) + runDB["Epos"]
            # make sure the target region is long enough fo a proper design
            offset= 0
            #min_seq_len= min(runDB["Smin"] * 1.2, runDB["Smax"])
            while lstop - lstart < runDB["Smax"]:
                offset+= 10         
                lstart-= offset
                lstop+= offset
            seq= DBgenome(pathDB["genomeDB"], line[0], lstart, lstop)
            # try to make the bed file look like the galaxy output file
            cline.append(len(seq))
            header= [runDB["gbuild"], line[0], lstart, lstop, offset]
            Headers.append([line[0], lstart, lstop, offset])
            return header, seq

    elif len(line) > 0:
        option.poi = True
        
        # simplfy the design process. you don't need anything longer that your Smax
        runDB['Epos'] = runDB["Smax"] / 2

        # now you can either enter two columns or
        # just one column of chr:pos
        if len(line) == 2:
            def process_line(line):
                lstart= int(line[1]) - runDB['Epos']
                lstop= int(line[1]) + runDB['Epos']
                seq= DBgenome(pathDB["genomeDB"], line[0], lstart, lstop)
                # try to make the bed file look like the galaxy output file
                cline.append(len(seq))
                header= [runDB["gbuild"], line[0], lstart, lstop, 0]
                Headers.append([line[0], lstart, lstop, "0"])
                return header, seq
            
        elif len(line) == 1:
            if line[0].find(":") > 0:
                # your are good
                def process_line(line):
                    line= line[0]
                    line= line.split(":")
                    lstart= int(line[1]) - runDB['Epos']
                    lstop= int(line[1]) + runDB['Epos']
                    seq= c
                    # try to make the bed file look like the galaxy output file
                    cline.append(len(seq))
                    header= [runDB["gbuild"], line[0], lstart, lstop, 0]
                    Headers.append([line[0], lstart, lstop, "0"])
                    return header, seq
            else:
                print "unrecognized .bed file : please provide you input as 'chr:pos'"
                sys.exit()
            
    else:
        # in case there is something wrong with the file
        print "unrecognized .bed file column size: required at least two, one chr# other position of interest or three column chr, start, stop"
        sys.exit()
    
    # from now on everthing is the same
    jobs_list= []
    #counter= '%'+'0%s' %len("%s" %len(inputfile))+'d'
    #hcount= 1 # header count from the last file, goes with the file 

    fcounter= 1
    header, seq= process_line(line)
    jobs_list.append((outputdir, filename, fcounter, header, seq))
    
    # now work the rest of the file
    for fcounter, line in enumerate(bedfile, 2):
        line= strip(line)
        line= split(line)
        header, seq= process_line(line)

        if header[3] - header[2] + 1 != len(seq):
            print header, len(seq), "size does not match"

        jobs_list.append((outputdir, filename, fcounter, header, seq))

runDB["POI"] = option.poi

len_Headers= fcounter
synopsis('There are %s targets in { %s }' %(len_Headers, filename), option.silence)

# find the latest dbSNP and record the version, if there is one
try:
    with open(os.path.join(pathDB['snpDB'], 'updated.txt')) as infile:
        dbSNP_version= infile.readline().strip()
except IOError:
    dbSNP_version= "None"
runDB["dbSNP_version"]= dbSNP_version


# save the run variables and pass them to subprecesses
# add more variables
with open(os.path.join(outputdir, 'runDB.pkl'),'w') as pkl_obj:
    cPickle.dump(runDB, pkl_obj)
with open(os.path.join(outputdir, 'Headers.pkl'),'w') as pkl_obj:
    cPickle.dump(Headers, pkl_obj)


MEAN= mean(cline)
SD= stdev(cline)
synopsis('The average seq length is %.0f +- %0.f bp ' % (MEAN, SD), option.silence)
# any length of sequence that is larget than    os.makedirs(outputdir) 5 times of Smax
# will be considered giant
MSD= 5*runDB["Smax"]

# get ready to load exonDB and snpDB
exonDB= {}
snpDB= {}

#print 'Batch process queue initiated for %s design\twith POI=%s' %(runDB["designmode"], bool(runDB['POI']))

def big_or_small(arg):
    outputdir, filename, fcounter, header, seq = arg
    target_p= pck_small
    if not option.poi and len(seq) >= MSD:
        #print header, "large"
        target_p= pck_large

    # now go go go, fireup another process
    return target_p(outputdir, filename, fcounter, header, seq, runDB, pathDB, option.random)

synopsis("Starting design processes, please wait...", option.silence)

# define a process queue
PQ= Pool(min(len(jobs_list), cores))
# define the process tuple
PQ_results= PQ.map_async(big_or_small, jobs_list)
if option.silence == 1:
    while not PQ_results.ready():
        synopsis("Elapsed %s" %gethms(time()-START))
        sleep(1)
#PQ_results.wait()
output_data= PQ_results.get()
PQ.close()
PQ.join()

#print output_data


########################
# the clean-up and merge process
synopsis("Starting file clean-up process", option.silence)
# work as a subprocess so if anything fails you can run as a separate script
Coverage, tiling= merger(output_data, filename, START, runDB, option.silence)
synopsis('File/data merge complete', option.silence)
print "Overall coverage is %.1f%%" % Coverage
if option.tiling:
    print "Base coverage at 2X or more is %.1f%%" %(tiling)
        

# create a zip file for everything
if option.ozip != None:
    zfile= zipfile.ZipFile(option.ozip, "w")
    for each in glob.glob("*"):
        zfile.write(each)
    zfile.close()
    os.rename("%s_track.bed" %filename, option.obed)


# send user an email saying the zip file is ready to download
if len(option.Email):
    import smtplib

    FROM = "admin@meltzerlab.galaxy"
    TO = [option.Email.replace("__at__", "@")] # must be a list
    SUBJECT = 'Your MeltzerLab Galaxy job completed'
    TEXT = "Your primer design for your file is complete. Please logon to Meltzer Lab galaxy ( http://pressa.nci.nih.gov:8080 ) to download the results\n\n\nMeltzer Lab Galaxy Admin\nDO NOT REPLY TO THIS EMAIL\n\n"
    # Prepare actual message
    message = """\
    From: %s
    To: %s
    Subject: %s

    %s
    """ % (FROM, ", ".join(TO), SUBJECT, TEXT)

    # Send the message via our own SMTP server
    s = smtplib.SMTP("mailfwd.nih.gov", '25')
    s.sendmail(FROM, TO, message)
    s.quit()
    synopsis("Email is send to user(s) %s" %", ".join(TO), option.silence)

print 'Run completed in %s\n' %(gethms(time()-START))
