#!/usr/bin/env python
"""

    map_fastaq_to_reference.py
    [--log_file PATH]
    [--verbose]

"""

################################################################################
#
#   map_fastaq_to_reference
#
#
#   Copyright (c) 27/July/2011 Leo Goodstadt
#
#   Permission is hereby granted, free of charge, to any person obtaining a copy
#   of this software and associated documentation files (the "Software"), to deal
#   in the Software without restriction, including without limitation the rights
#   to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#   copies of the Software, and to permit persons to whom the Software is
#   furnished to do so, subject to the following conditions:
#
#   The above copyright notice and this permission notice shall be included in
#   all copies or substantial portions of the Software.
#
#   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#   IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#   AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#   OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#   THE SOFTWARE.
#################################################################################

import sys, os

# add self to search path for testing
if __name__ == '__main__':
    exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
    module_name = os.path.split(sys.argv[0])[1]
    module_name = os.path.splitext(module_name)[0];
else:
    module_name = __name__

# Use import path from <<../python_modules>>
if __name__ == '__main__':
    sys.path.append("/home/lg/python_modules")



#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   options


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888


if __name__ == '__main__':
    import argparse
    from common_parser import get_common_pipeline_parser

    parser = argparse.ArgumentParser(description='Map fastq files to reference genome using stampy',
                                    #formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                    parents=[get_common_pipeline_parser()],
                                     )
    #required_options = argparse._ArgumentGroup(parser, 'mandatory arguments')
    #parser._action_groups.insert(0, required_options)

    #
    #   Multiple input files
    #
    parser.add_argument("-i", "--input_file", dest="input_files",
                                    metavar="FILE",
                                    type= str,
                                    action="append",
                                    help="Name and path of input file.")
    parser.add_argument('-t', '--trim',             type=int)
    parser.add_argument("--species_name",           type=str)
    parser.add_argument("--assembly",               type=str)
    parser.add_argument("--name",                   type=str)
    parser.add_argument("--reference_genome",       type=str)
    parser.add_argument("--sample_count",           type=int, metavar="COUNT")
    parser.add_argument("--sample",                 type=str, help ="A/B = map A samples out of each B reads. Otherwise, set 'true' to use the first COUNT reads.")
    parser.add_argument("--summary_directory",      type=str)
    parser.add_argument("--working_directory",      type=str)
    parser.add_argument("--extra_stampy_options",   type=str)
    parser.add_argument("--stampy_sensitivity",     type=str, choices=["low", "normal", "high"])
    parser.set_defaults(input_files     = [[
                                        "/data/mus/lg/projects/de_novo_assembly/data_qc/islay.qb3.berkeley.edu/VCGSL_FTP/110622_Illumina/B0011DACXX/Sample_MK_02_*/*R1_???.fastq.gz",
                                        #"/data/mus/lg/projects/de_novo_assembly/data_qc/islay.qb3.berkeley.edu/VCGSL_FTP/110622_Illumina/B0011DACXX/Sample_MK_02_ACAGTG/*R1_???.fastq.gz",
                                        ]],
                        species_name            = "mouse",
                        assembly                = "mm9",
                        name                    = "mm9",
                        trim                    = 10,
                        sample                  = "",
                        sample_count            = 50000,
                        stampy_sensitivity      = "low",
                        extra_stampy_options    = "--insertsize2=-40000 --insertsd2=15000"
                        )

    options = parser.parse_args()



    default_base_dir = "/data/mus/lg/projects/de_novo_assembly/"
    if not options.reference_genome:
        options.reference_genome    = default_base_dir + "mm9.fa"
    if not options.log_file:
        options.log_file            = default_base_dir + "map_fastaq_to_reference.log"
    if not options.summary_directory:
        if options.sample:
            options.summary_directory = default_base_dir + "sample_summary"
        else:
            options.summary_directory = default_base_dir + "summary"
    if not options.working_directory:
        if options.sample:
            options.working_directory = default_base_dir + "sample_working"
        else:
            options.working_directory = default_base_dir + "working"
    if not options.target_tasks and not options.forced_tasks:
            options.target_tasks = ["per_library_histogram_insert_sizes", "per_file_histogram_insert_sizes"]





#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   imports


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

from ruffus import *
from ruffus.proxy_logger import make_shared_logger_and_proxy
from ruffus.ruffus_exceptions import JobSignalledBreak

#from json import dumps
from collections import defaultdict
from nested_dict import nested_dict
from commify import commify
import time
from split_file_name import split_file_name
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Functions


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Logger


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

from lg_program_logging import MESSAGE, getLogger

#   Allow logging across Ruffus pipeline
#
global_logger  = getLogger(module_name, options.log_file, options.verbose)
def logger_factory_for_proxy (logger_name, args):
    return global_logger
logger, logging_mutex =  make_shared_logger_and_proxy (logger_factory_for_proxy, module_name, {})


if __name__ == '__main__':

    #
    #   set up log
    #



    #
    #   log programme parameters
    #
    if not options.skip_parameter_logging:
        logger.info(" ".join(sys.argv))



#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Pipeline


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#_________________________________________________________________________________________

#   build genome (.stidx) file

#_________________________________________________________________________________________
@transform(options.reference_genome, suffix(".fa"), [".stidx", ".stampy_genome_file_built"])
def build_genome_file (input_file, output_files):
    output_file, flag_file = output_files
    cmd = "stampy.py --species=%(species_name)s --assembly=%(assembly)s -G %(name)s  %(reference_genome)s"
    cmd = cmd % {   "species_name"      : options.species_name      ,
                    "assembly"          : options.assembly          ,
                    "name"              : options.name              ,
                    "reference_genome"  : options.reference_genome  }
    if not os.system( cmd ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   Building a hash (.sthash) file:

#_________________________________________________________________________________________
@follows(build_genome_file)
@transform(options.reference_genome, suffix(".fa"), [".sthash", ".stampy_genome_hash_built"])
def build_genome_hash_file (input_file, output_files):
    output_file, flag_file = output_files
    cmd = "stampy.py -g  %(name)s -H  %(name)s"
    cmd = cmd % {   "name"              : options.name}
    if not os.system( cmd ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   set up input files

#_________________________________________________________________________________________
input_files = set()
import glob
for fff in options.input_files:
    for ff in fff:
        input_files |= set(glob.glob(ff))
input_files = sorted(input_files)
WDIR = options.working_directory
SDIR = options.summary_directory



#_________________________________________________________________________________________

#   create_working_subdirectories

#_________________________________________________________________________________________
@collate(input_files,
           regex("(.+)(/[^/ ]+)/[^/ ]+R1_\d+.fastq.gz"),
           WDIR + r"\2")                                      # working sub directory
def create_working_subdirectories (ignore, output_subdir):
    os.makedirs(output_subdir)

##_________________________________________________________________________________________
#
##   create_summary_subdirectories
#
##_________________________________________________________________________________________
#@collate(input_files,
#           regex("(.+)(/[^/ ]+)/[^/ ]+R1_\d+.fastq.gz"),
#           SDIR + r"\2")                                      # summary sub directory
#def create_summary_subdirectories (ignore, output_subdir):
#    os.makedirs(output_subdir)



#_________________________________________________________________________________________

#   strip_10bp_tag_from_fastq

#_________________________________________________________________________________________
@jobs_limit(10)
@follows(build_genome_file, build_genome_hash_file)
@follows(create_working_subdirectories)
@transform(input_files, regex("(.+)(/[^/ ]+)(/[^/ ]+)R1(_\d+).fastq.gz"),
           [WDIR + r"\2\3" + "RX"+ r"\4.stripped_finished",    # flag_file
            WDIR + r"\2\3" + "R1"+ r"\4.fastq.stripped.gz",    # output_file1
            WDIR + r"\2\3" + "R2"+ r"\4.fastq.stripped.gz"],   # output_file2
            r"\1\2\3" + "R2"+ r"\4.fastq.gz")                  # input_file2
def strip_10bp_tag_from_fastq (input_file1, output_files, input_file2):
    #
    #   strip initial tag from fastq file
    #
    flag_file, output_file1, output_file2 = output_files
    cmd1 = "gunzip %s -c | fastx_trimmer -f %d -Q33 -z -o %s" % (input_file1,
                                                                options.trim +1,
                                                                output_file1)
    cmd2 = "gunzip %s -c | fastx_trimmer -f %d -Q33 -z -o %s" % (input_file2,
                                                                options.trim +1,
                                                                output_file2)
    if not os.system( cmd1 ) and not os.system( cmd2 ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   Map full or sample using stampy

#_________________________________________________________________________________________
@follows(create_working_subdirectories)
@transform(strip_10bp_tag_from_fastq,
           suffix(".stripped_finished"),
           [    ".sam",
                ".summary",
                ".stampy_finished"])
def map_with_stampy (input_files, output_files):
    ignore_flag_file, input_file1, input_file2  = input_files
    mapped_file, summary_file, flag_file        = output_files

    # sample only
    sample_cmd = ""
    if len(options.sample):
        if options.sample == "true":
            sample_cmd = "--numrecords=%d " % options.sample_count
        else:
            sample_cmd = "--processpart=%s " % options.sample


    # stampy sensitivity
    sensitivity_cmd = ""
    if options.stampy_sensitivity == "low":
        sensitivity_cmd = "--fast "
    elif options.stampy_sensitivity == "high":
        sensitivity_cmd = "--sensitive "


    cmd = ("stampy.py "
                + sample_cmd +
                "--inputformat=fastq "
                + options.extra_stampy_options + " "
                "-g %(name)s "
                "-h %(name)s "
                "-M %(input_file1)s %(input_file2)s "
                    "> %(mapped_file)s 2> %(summary_file)s")
    cmd = cmd % {   "name"       :  options.name,
                    "input_file1":  input_file1,
                    "input_file2":  input_file2,
                    "mapped_file":  mapped_file,
                    "summary_file": summary_file,}
    if not os.system( cmd ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   sam to bam

#_________________________________________________________________________________________
@transform(map_with_stampy, suffix(".sam"), [".unsorted_bam", ".bam_conversion_finished"] )
def sam_to_bam (input_files, output_files):
    input_file          = input_files[0]
    bam_file, flag_file = output_files
    cmd = ("samtools view -bSt %(reference_genome)s.fai  %(input_file)s >| %(bam_file)s ")
    cmd = cmd % {
                    "input_file":       input_file,
                    "bam_file":         bam_file,
                    "reference_genome": options.reference_genome,
                }
    if not os.system( cmd ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   sort and index bam

#_________________________________________________________________________________________
@transform(sam_to_bam, suffix(".unsorted_bam"), [".bam", ".bam_sort_finished"] )
def sort_and_index_bam (input_files, output_files):
    input_file          = input_files[0]
    bam_file, flag_file = output_files
    # note samtools sort adds .bam suffix to bam_file name!!
    cmd = ("samtools sort %(input_file)s %(bam_file)s; samtools index  %(bam_file)s.bam")
    cmd = cmd % {
                    "input_file":   input_file,
                    "bam_file":     bam_file[:-4],
                }
    if not os.system( cmd ):
        open(flag_file, "w")


#_________________________________________________________________________________________

#   calculate_insert_sizes

#_________________________________________________________________________________________
@transform(sort_and_index_bam, regex("(.+)(/[^/ ]+/[^/ ]+).bam"),
                            [r"\1\2.insert_sizes_txt",
                             r"\1\2.picard_hist.pdf",
                             r"\1\2.insert_sizes_finished",
                             r"\1\2.insert_sizes_errors"],
                            logger, logging_mutex )
def calculate_insert_sizes(input_files, output_files, logger, logging_mutex):
    input_file                                          = input_files[0]
    output_file, hist_pdf_file, flag_file, error_file   = output_files
    with logging_mutex:
        logger.debug("calculate_insert_sizes for " + os.path.join(*split_file_name(input_file, 2)[1:]))
    cmd = ("java -Xmx2g "
                "-jar /data/mus/lg/bin/picard-tools-1.48/CollectInsertSizeMetrics.jar "
                "REFERENCE_SEQUENCE=%(reference_genome)s "
                "INPUT=%(input_file)s "
                "OUTPUT=%(output_file)s "
                "HISTOGRAM_FILE=%(hist_pdf_file)s "
                "HISTOGRAM_WIDTH=100000 "
                "MAX_RECORDS_IN_RAM=10000000 M=0.00001"
                "2> %(error_file)s")
    cmd = cmd % {
                    "input_file":           input_file,
                    "output_file":          output_file,
                    "hist_pdf_file":        hist_pdf_file,
                    "reference_genome":     options.reference_genome,
                    "error_file":           error_file,
                }
    with logging_mutex:
        logger.log(MESSAGE, cmd)
    if not os.system( cmd ):
        open(flag_file, "w")
    if not os.path.exists(output_file) or not os.path.exists(hist_pdf_file):
        # log error
        with logging_mutex:
            for l in open(error_file):
                logger.log(MESSAGE, l.rstrip())
        open(output_file,  "w")
        open(hist_pdf_file,  "w")

    with logging_mutex:
        logger.debug("calculate_insert_sizes finished for " + os.path.join(*split_file_name(input_file, 2)[1:]))


#_________________________________________________________________________________________

#   do_histogram_insert_sizes

#_________________________________________________________________________________________
def do_histogram_insert_sizes(input_files, output_files, logger, logging_mutex):
    input_files                    = " ".join(("--input_file " + f[0]) for f in input_files)
    hist_pdf_file, flag_file       = output_files
    with logging_mutex:
        logger.debug("Summarising to pdf " + os.path.join(*split_file_name(hist_pdf_file, 2)[1:]))
    cmd = ("python merge_insert_sizes.py "
                "%(input_files)s "
                "--output_pdf_file %(hist_pdf_file)s ")
    cmd = cmd % {
                    "input_files":           input_files,
                    "hist_pdf_file":         hist_pdf_file,
                }
    with logging_mutex:
        logger.log(MESSAGE, cmd)
    if not os.system( cmd ):
        open(flag_file, "w")

#_________________________________________________________________________________________

#   per_file_histogram_insert_sizes

#_________________________________________________________________________________________
#@follows(create_summary_subdirectories)
@follows(mkdir(SDIR))
@transform(calculate_insert_sizes,
            regex("(.+)(/[^/ ]+)(/[^/ ]+).insert_sizes_txt"),
            [SDIR + r"\3.hist.pdf",
             r"\1\2\3.histogram_pdf_finished"],
            logger, logging_mutex )
def per_file_histogram_insert_sizes(input_files, output_files, logger, logging_mutex):
    do_histogram_insert_sizes([input_files], output_files, logger, logging_mutex)

#_________________________________________________________________________________________

#   per_library_histogram_insert_sizes

#_________________________________________________________________________________________
#@follows(create_summary_subdirectories)
@follows(mkdir(SDIR))
@collate(calculate_insert_sizes,
            regex("(.+)(/[^/ ]+)(/[^/ ]+).insert_sizes_txt"),
            [SDIR + r"\2.hist.pdf", r"\1\2.histogram_pdf_finished"],
            logger, logging_mutex )
def per_library_histogram_insert_sizes(input_files, output_files, logger, logging_mutex):
    do_histogram_insert_sizes(input_files, output_files, logger, logging_mutex)






#REFERENCE_SEQUENCE=/data/mus/lg/projects/de_novo_assembly/mm9.fa  INPUT=sample_summary/MK_02_ACAGTG_ACAGTG_L008_RX_001.bam OUTPUT=output.tmp HISTOGRAM_FILE=hist.pdf

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Main logic


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':

    if options.just_print:
        pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks,
                            verbose=options.verbose)

    elif options.flowchart:
        pipeline_printout_graph (   open(options.flowchart, "w"),
                                    options.flowchart_format,
                                    options.target_tasks,
                                    options.forced_tasks,
                                    draw_vertically = not options.draw_horizontally,
                                    no_key_legend   = not options.key_legend_in_graph)
    else:
        pipeline_run(   options.target_tasks, options.forced_tasks,
                        multiprocess    = options.jobs,
                        logger          = global_logger,
                        verbose         = options.verbose)







