# create all the DBs

import time
START= time.time()

import os
import urllib
import sys
import gzip
import tarfile
import glob
import cPickle
import datetime
import zipfile
import subprocess
import pysam
from ftplib import FTP
from optparse import OptionParser
from itertools import product
from multiprocessing import Process, Pool, cpu_count

from allfunctions import gethms, synopsis, complement, revcomp

xmer= 12 # for repeatDB

def make_tabix(ftabix, pC=1, pS=2, pE=3):
    # take the file in path
    # and make a tabix file out of it
    subprocess.call('sort -S 2G -k%s,%s -k%s,%sn -k%s,%sn %s | bgzip > %s.gz' %(pC, pC, pS, pS, pE, pE, ftabix,ftabix), shell=True)
    subprocess.call('tabix -p bed -s %s -b %s -e %s -0 -f %s.gz' %(pC, pS, pE, ftabix), shell=True)
    os.remove(ftabix)
    return


def dummy_proc(s):
    return


def check_local_files(outputdir, new_file):
    fname = os.path.basename(new_file)
    local_file = os.path.join(outputdir, fname)
    if os.path.isfile(local_file) and os.path.getsize(local_file)>10000:
        return False

    file_ext = fname[fname.find("."):]
    local_file = os.path.join(outputdir, os.path.basename(new_file)).rstrip(file_ext)
    if os.path.isfile(local_file) and os.path.getsize(local_file)>10000:
        return False

    return new_file


def check_for_db_update(ftp, outputdir, filename, source_path, no_update, silence):
    need_update = False
    # if database is present, compare the dates of each file creation
    if os.path.isdir(outputdir):
        if os.path.isfile(os.path.join(outputdir, "updated.txt")):
            if no_update:
                return False

            # this file is created when to db is created
            # so for genome and snps they are not updated
            # you can skip all this

            old_date= time.gmtime(os.path.getmtime(os.path.join(outputdir, "updated.txt")))
            # year, month, day
            old_date= datetime.date(old_date[0], old_date[1], old_date[2])
            current_date= datetime.date.today()
            if current_date > old_date:
                # if the database is a day old
                synopsis('Checking for %s updates' % filename, silence)
                # what happens when the filename is ambiquous
                # find the file
                new_file = ftp.nlst(filename)
                new_file.sort()
                new_file = new_file[-1]
                
                junk, Ndate= ftp.sendcmd("MDTM %s" %new_file).split() 
                Nyear, Nmonth, Nday = Ndate[:4], Ndate[4:6], Ndate[6:8]
                new_date= datetime.date(int(Nyear), int(Nmonth), int(Nday))
                if new_date <= old_date:
                    return False
                else:
                    return new_file
            else:
                return False
        else:
            return sorted(ftp.nlst(filename))[-1]
    else:
        return sorted(ftp.nlst(filename))[-1]
    return need_file
        
    
def create_exondb(source_path, assembly, outdir, filename, silence, server, no_update, dbname):
    outputdir = os.path.join(outdir, dbname)
    
    ftp = FTP(server)
    ftp.login()
    ftp.cwd(source_path)

    new_file = check_for_db_update(ftp, outputdir, filename,
                                   source_path, no_update, silence)

    if new_file == False:
        ftp.close()
        return

    local_file= os.path.join(outputdir, os.path.basename(new_file))
    if check_local_files(outputdir, new_file):
        synopsis('Downloading new %s from server...' % dbname, silence)
        ftp.retrbinary('RETR %s'  % new_file, open(local_file, 'wb').write)

    ftp.close()
    if os.path.isfile(local_file):
        subprocess.call(["gunzip -f %s" %local_file], shell=True)
        local_file = local_file.rstrip(".gz")

    synopsis('Genereating new %s files...' % dbname, silence)
    tempDB = dict() # read all the exons into this dict first
    with open(local_file) as inf:
        for line in inf:
            Chr = line[line.find("chr"):line.find("\t",line.find("chr")+1)]
            try:
                tempDB[Chr].append(line)
            except KeyError:
                tempDB[Chr] = [line]

    for K,V in tempDB.items():
        # now you can make a tabix file out of this
        outfile = os.path.join(outputdir, "%s.tabix" %K)
        with open(outfile, "w") as outf:
            outf.writelines(V)
        jobP.apply_async(make_tabix, (outfile, 3, 5, 6), callback=dummy_proc)

    with open(os.path.join(outputdir, "updated.txt"), "w") as outfile:
        outfile.write("%s\n" %time.strftime('%Y-%m-%d'))

    os.remove(local_file)
    return


# remove some of the redandunt tabix queries to find exons
def create_exondb2(source_path, assembly, outdir, filename, silence, server, no_update, dbname):
    outputdir = os.path.join(outdir, dbname)
    
    ftp = FTP(server)
    ftp.login()
    ftp.cwd(source_path)

    new_file = check_for_db_update(ftp, outputdir, filename,
                                   source_path, no_update, silence)

    if new_file == False:
        ftp.close()
        return

    local_file= os.path.join(outputdir, os.path.basename(new_file))
    if check_local_files(outputdir, new_file):
        synopsis('Downloading new %s from server...' % dbname, silence)
        ftp.retrbinary('RETR %s'  % new_file, open(local_file, 'wb').write)

    ftp.close()
    if os.path.isfile(local_file):
        subprocess.call(["gunzip -f %s" %local_file], shell=True)
        local_file = local_file.rstrip(".gz")

    synopsis('Genereating new %s files...' % dbname, silence)
    with open(local_file) as inf:
        for line in inf:
            junk, Acc, Chr, Strand, Gstart, Gstop, Cstart, Cstop, Exoncnt,Estarts, Estops, junk, Gname, Status, junk, Frames = line.strip().split("\t")

            try:
                tempDB[Chr]
            except NameError:
                tempDB = {Chr : list()}
            except KeyError:
                K,V = tempDB.popitem()
                # now you can make a tabix file out of this
                outfile = os.path.join(outputdir, "%s.tabix" %K)
                with open(outfile, "w") as outf:
                    outf.writelines(V)
                    jobP.apply_async(make_tabix,
                                     (outfile, 0, 1, 2),
                                     callback=dummy_proc)
                # reset DB    
                tempDB[Chr] = list()

            if Status == 'cmpl':
                if Strand == '+':
                    [tempDB[Chr].append("\t".join([Chr,
                                                   Estarts[i],
                                                   Estops[i],
                                                   "%s:%s" %(Gname, i+1)]) + "\n") for i in xrange(int(Exoncnt))]

                else:
                    Estarts.reverse()
                    Estops.reverse()
                    Frames.reverse()

                    [tempDB[Chr].append("\t".join([Chr,
                                                   Estarts[i],
                                                   Estops[i],
                                                   "%s:%s" %(Gname, i+1)]) + "\n") for i in xrange(int(Exoncnt))]

    for K,V in tempDB.items():
        # now you can make a tabix file out of this
        outfile = os.path.join(outputdir, "%s.tabix" %K)
        with open(outfile, "w") as outf:
            outf.writelines(V)
        jobP.apply_async(make_tabix, (outfile, 0, 1, 2), callback=dummy_proc)

    with open(os.path.join(outputdir, "updated.txt"), "w") as outfile:
        outfile.write("%s\n" %time.strftime('%Y-%m-%d'))

    os.remove(local_file)
    return



def create_genomedb(source_path, assembly, outdir, filename, silence, server, no_update, dbname):
    outputdir = os.path.join(outdir, dbname)
    
    ftp= FTP(server)
    ftp.login()
    ftp.cwd(source_path)

    new_file = check_for_db_update(ftp, outputdir, filename,
                                   source_path, no_update, silence)

    if new_file == False:
        ftp.close()
        return

    local_file= os.path.join(outputdir, os.path.basename(new_file))
    if check_local_files(outputdir, new_file):
        synopsis('Downloading new %s from server...' % dbname, silence)
        ftp.retrbinary('RETR %s'  % new_file, open(local_file, 'wb').write)
        
    ftp.close()
    synopsis('Genereating new %s files...' % dbname, silence)
    if os.path.isfile(local_file):
        if ".zip" in os.path.basename(local_file):
            # extract all member of the zip file
            infile= zipfile.ZipFile(local_file)
            infile.extractall(outputdir) 
            infile.close()
        elif ".tar.gz" in os.path.basename(local_file):
            subprocess.call("tar -xzf %s -C %s" %(local_file, os.path.dirname(local_file)), shell=True)
        elif ".gz" in  os.path.basename(local_file):
            subprocess.call(["gunzip -f %s" %local_file], shell=True)

        else:
            print "unknown Chrom.* file extension"
            return

    with open(os.path.join(outputdir, "updated.txt"), "w") as outfile:
        outfile.write("%s\n" %time.strftime('%Y-%m-%d'))

    """
    # init repeat counting
    if not os.path.isdir("repeatDB"):
        repeatdir= os.path.join(outdir, "repeatDB")
    jobP.apply_async(create_repeatdb, (outputdir, xmer, silence), callback=dummy_proc)
    """
    # remove the downloaded file
    os.remove(local_file)
    return


def create_snpdb(source_path, assembly, outdir, filename, silence, server, no_update, dbname):
    outputdir = os.path.join(outdir, dbname)
    
    ftp= FTP(server)
    ftp.login()
    ftp.cwd(source_path)

    #print ftp.nlst("snp*Common.txt.gz")

    new_file = check_for_db_update(ftp, outputdir, filename,
                                   source_path, no_update, silence)

    if new_file == False:
        ftp.close()
        return

    local_file= os.path.join(outputdir, os.path.basename(new_file))
    if check_local_files(outputdir, new_file):
        synopsis('Downloading new %s from server...' % dbname, silence)
        ftp.retrbinary('RETR %s'  % new_file, open(local_file, 'wb').write)

    ftp.close()
    if os.path.isfile(local_file):
        subprocess.call(["gunzip -f %s" %local_file], shell=True)
        local_file = local_file.rstrip(".gz")

    synopsis('Genereating new %s files...' % dbname, silence)
    # read the data into a dict
    with open(local_file) as inf:
        for line in inf:
            Chr = line[line.find("chr"):line.find("\t",line.find("chr")+1)]
            try:
                tempDB[Chr].append(line)
            except KeyError:
                # you hit new chr
                # output the old one
                K, V = tempDB.popitem()
                # now you can make a tabix file out of this
                outfile = os.path.join(outputdir, "%s.tabix" %K)
                with open(outfile, "w") as outf:
                    outf.writelines(V)
                jobP.apply_async(make_tabix, (outfile, 2, 3, 4), callback=dummy_proc)
                tempDB[Chr] = [line]
            except NameError:
                tempDB = {Chr:[line]}
    
    # there is always going to be one more Chr in temp
    K, V = tempDB.popitem()
    # now you can make a tabix file out of this
    outfile = os.path.join(outputdir, "%s.tabix" %K)
    with open(outfile, "w") as outf:
        outf.writelines(V)
    jobP.apply_async(make_tabix, (outfile, 2, 3, 4), callback=dummy_proc)
    
    # get the dbsnp version
    fname= os.path.basename(local_file)
    db_version= fname[3:fname.find(".")]
    with open(os.path.join(outputdir, "updated.txt"), "w") as outfile:
        outfile.write(db_version+"\n")
        outfile.write("%s\n" %time.strftime('%Y-%m-%d'))

    os.remove(local_file)
    return


def permutations(items, s):
    if s == 0:
        yield ''
    else:
        for i in xrange(len(items)):  
            for item in permutations(items, s-1):
                yield items[i]+item


def count_repeat(fpath, outputdir):
    repeatDB = dict()
    for each in glob.glob(os.path.join(fpath, "*.fa")):
        with open(each) as infile:           
            # remove the fasta header
            infile.readline()
            seq = ""
            for line in infile:
                seq += line.strip().upper()
                for i in xrange(len(seq) - xmer + 1):
                    S = seq[i:i+xmer]
                    if "N" not in S:
                        try:
                            repeatDB[S] += 1
                        except KeyError:
                            repeatDB[S] = 1
                seq = seq[i:]

    len_max= len(str(max(repeatDB.values())))
    len_format= "%0"+str(len_max)+"d"

    with open(os.path.join(outputdir, '%smer_repeats.txt' %xmer), 'w') as outfile:
        outfile.write('%s\t%s\n' %(xmer, len_max))
        [outfile.write('%s\t%s\n' %(p, len_format %repeatDB.get(p, 0))) for p in permutations('ACGT', xmer)]

    synopsis('repeatDB created', silence)
    return


def create_repeatdb(source_path, assembly, outdir, filename, silence, server, no_update, dbname):
    outputdir = os.path.join(outdir, dbname)
    genomedb_path = os.path.join(outdir, "genomeDB")

    # check if there are proper files
    if os.path.isfile(os.path.join(outputdir, "12mer_repeats.txt")) and os.path.getsize(os.path.join(outputdir, "12mer_repeats.txt")) > 1000:
        return

    elif os.path.isfile(os.path.join(outputdir, "12mer_repeats.pkl")) and os.path.getsize(os.path.join(outputdir, "12mer_repeats.pkl")) > 1000:
        return

    synopsis("starting repeat counting...", silence)
    # it should run in the background
    jobP.apply_async(count_repeat, (genomedb_path, outputdir,), callback=dummy_proc)
    return

        
def get_ucscfile(assembly, outputdir, silence):
    # outputdir is dbdir
    outdir= os.path.join(outputdir, assembly)

    if not os.path.isdir(outdir):
        os.makedirs(outdir)
        if silence:
            print "generating the necessary databases for %s" %assembly
        synopsis("this may take a few hours please be patient ...", silence)
    else:
        synopsis("using local databases for %s, checking for updates first" %assembly, silence)

    if os.path.isdir(os.path.join(outdir, "exonDB")):
        # check to see if it is more than a day old
        if time.time() - os.path.getmtime(os.path.join(outdir, "exonDB")) < 24*60*60:
            synopsis("DBs recently updated", silence)
            return
        
    # first check if there is an update to any of the files
    # check exonDB, genomeDB, snpDB
    dbs= {"genomeDB":{"source_path":"goldenPath/%s/bigZips",
                      "filename": "chromFa.*",
                      "func":create_genomedb,
                      "no_update": True},
          
          "exonDB":{"source_path": "goldenPath/%s/database",
                    "filename": "refGene.txt.gz",
                    "func":create_exondb,
                    "no_update": False},
        
          "snpDB":{"source_path": "goldenPath/%s/database",
                   "filename": "snp*Common.txt.gz",
                   "func":create_snpdb,
                   "no_update": False},

          "repeatDB":{"source_path": "%s/genomeDB",
                      "filename": None,
                      "func":create_repeatdb,
                      "no_update": True}}

    for database in ("genomeDB", "repeatDB", "snpDB", "exonDB"):
        filename = dbs[database]["filename"]
        outputdir = os.path.join(outdir, database)

        # if you have been this far then you need a new db
        if not os.path.isdir(outputdir):
            os.mkdir(outputdir)

        # call the function that does the magic of creating a DB
        dbs[database]["func"](dbs[database]["source_path"] % assembly,
                              assembly,
                              outdir,
                              dbs[database]["filename"],
                              silence,
                              option.server,
                              dbs[database]["no_update"],
                              database)
    synopsis("All databases are up-to-date (took %s)" %(gethms(time.time()-START)))

    return


if __name__ == "__main__":
    parser= OptionParser(usage="%prog [-f]", version="%prog 0.0.1")

    # MUST ARGUMENTS
    parser.add_option("-c", "--cores", dest="cores", help="number of cores to be used [DEFAULT=%default]", type="int", default=cpu_count())

    parser.add_option("-g", dest="genome", default="hg19")
    
    parser.add_option("-D", dest="dbdir")

    parser.add_option("--silence", dest="silence", default=1)

    parser.add_option("--server", dest="server", help="DEFAULT=%default", default='hgdownload.cse.ucsc.edu')

    (option, args)= parser.parse_args()

    jobP = Pool(option.cores)

    # now create DBs
    print "Databases are in %s" %os.path.join(option.dbdir, option.genome)
    get_ucscfile(option.genome, option.dbdir, option.silence)

else:
    jobP = Pool()

jobP.close()
jobP.join()

# now look at all the dbs and see if they are present or updated
# YOU CAN GENERATE THE DBs FOR OTHER ORGANISMS
# ftp://ftp.ncbi.nih.gov/snp/organisms/
# has the genome files 
