'''
Created on Dec 2, 2010

@author: oabalbin
'''
import os
import exome.gatk_cluster.snps_calling_cluster as snp
#import exome.variantEval.snps_annotator as ann
from collections import defaultdict, deque
from optparse import OptionParser

from exome.jobs.base import JOB_SUCCESS, JOB_ERROR
from exome.jobs.job_runner import qsub, run_local
from exome.jobs.config import ExomePipelineConfig, ExomeAnalysisConfig


# Global Variables
NODE_MEM=45000.0
NODE_CORES=12
SINGLE_CORE=1
MEM_PER_CORE= int(float(NODE_MEM) / NODE_CORES)
# wt=walltime
WT_SHORT= "24:00:00"
WT_LONG= "60:00:00" #"100:00:00"


def check_create_dir(root_path, dir_name=None):
    '''
        if not os.path.isdir(full_path_name):
            os.mkdir( full_path_name )
    '''
    if not os.path.isdir(root_path):
            os.mkdir( root_path )
    if dir_name is not None:
        subfolder=os.path.join(root_path,dir_name)
        if not os.path.isdir(subfolder):
                os.mkdir( subfolder )
    else:             
        subfolder=root_path

    return subfolder


def snps_calling_pairedSamples(analysis, configrun, num_processors, jobrunfunc, deps):
    '''
    This function calls snps using the gatk Unified genotyper
    and then filter the calls according to basic quality,
    parameters and other things such as neighborinimport exome.gatk_cluster.snps_calling_cluster as snpg indels
    or cluster snps. 
    It makes the calls for each type of samples, e.g. tumors
    and normals.
    '''
    # Parameters
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    #path_to_vcfCodSNPs = configrun.vcfCodSNPs_path
    # for gatk
    path_to_Rscript = configrun.rscript_bin
    resources_folder = configrun.gatk_resource_path
    temp_dir = configrun.gatk_temp_dir 
    fdr_filter_level = configrun.gatk_snps_fdr_threshold 
    target_exons = configrun.gatk_target_exons
    call_parameters = configrun.gatk_unifiedGenotyper_params
    genomes = configrun.genomes['human']
    ref_genome, snpdb_vcf, indeldb_file = genomes.gatk_ref_genome, genomes.snpdb, \
                                         genomes.indeldb
    hapmap_vcf, tgk_vcf = genomes.hapmap, genomes.OneKgenomes 
    #vcfCodSNP_genome
    #genelist_annotations = vcfCodSNP.genelist_annotations
    
    my_email=configrun.email_addresses

    
    # Run parameters
    mask_indel_padding = 10
    
    # Cohort correspond to biological meaningful groups
    # Breast Tumor, Breast normals for example
    
    cohort_samples = defaultdict(deque)
    # make a cohort of samples.
    # divide it benign and tumor samples
    
    
    for sp in analysis.samples:
        if sp.category=='benign':
            cohort_samples['benign'].append(sp.sorted_mmarkdup_bam)
        else:
            cohort_samples['tumor'].append(sp.sorted_mmarkdup_bam)
            
    # Calling raw indels and creating a mask indel file
    print analysis.samples
    print cohort_samples
    jobn='gtk_paired'
    check_create_dir(analysis.gatk_calls_dir)
    
    list_bam_files_normal = list(cohort_samples['benign'])
    list_bam_files_tumor = list(cohort_samples['tumor'])   
    indels_vcf = analysis.indels_vcf
    indels_bed = analysis.indels_bed
    indels_masked_bed=analysis.indels_masked_bed
    
    '''
    command = snp.indelGenotyper_pairedSample(ref_genome, list_bam_files_normal, list_bam_files_tumor, 
                                 indels_vcf, indels_bed, extra_mem, SINGLE_CORE, path_to_gatk, 
                                 interva_list=target_exons, call_parameters=None)
    
    jobidig = jobrunfunc(jobn+'_ig', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                              deps=deps, stdout=None, email_addresses=my_email)
    
    #indels_masked_bed = out_dir+cohort+'.gatk_indels.mask.bed'
    command = snp.generate_indel_maskfile(indels_bed, resources_folder, mask_indel_padding, 
                                          indels_masked_bed)
    
    jobidim = jobrunfunc(jobn+'_im', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                              deps=jobidig, stdout=None, email_addresses=my_email) #NOTE::::: jobidig
    
    '''
    deps_list=[]
    for sptype, list_bam_samples in cohort_samples.iteritems():
        jobn='gtk'
        jobn=jobn+'_'+sptype#+str(i)        
        # Do the snps calls
        # an tumors independently
        if sptype == 'benign':
            list_bam_all_samples = list_bam_files_normal
            snps_raw_vcf = analysis.benign_snps_raw_vcf
            snps_basic_vcf = analysis.benign_snps_basic_vcf
            snps_hard_vcf = analysis.benign_snps_hard_vcf
            snps_hard_annotated_vcf = analysis.benign_snps_hard_annotated_vcf
            snps_hard_annotated_log = analysis.benign_snps_hard_annotated_log
            
            cluster_ouput_file = analysis.benign_cluster_ouput_file
            snps_variantRecal_vcf = analysis.benign_snps_variantRecal_vcf
            tranches_file = analysis.benign_tranches_file
            snps_model_vcf = analysis.benign_snps_model_vcf
            snps_model_annotated_vcf = analysis.benign_snps_model_annotated_vcf
            snps_model_annotated_vcf = analysis.benign_snps_model_annotated_vcf

                        
        elif sptype == 'tumor':
            list_bam_all_samples = list_bam_files_tumor
            snps_raw_vcf = analysis.tumor_snps_raw_vcf
            snps_basic_vcf = analysis.tumor_snps_basic_vcf
            snps_hard_vcf = analysis.tumor_snps_hard_vcf
            snps_hard_annotated_vcf = analysis.tumor_snps_hard_annotated_vcf
            snps_hard_annotated_log = analysis.tumor_snps_hard_annotated_log

            cluster_ouput_file = analysis.tumor_cluster_ouput_file
            snps_variantRecal_vcf = analysis.tumor_snps_variantRecal_vcf
            tranches_file = analysis.tumor_tranches_file
            snps_model_vcf = analysis.tumor_snps_model_vcf
            
            snps_model_annotated_vcf = analysis.tumor_snps_model_annotated_vcf
            snps_model_annotated_vcf = analysis.tumor_snps_model_annotated_vcf


            
        # Calling raw snps
        #snps_raw_vcf=out_dir+cohort+'.gatk_snps.raw.vcf'
        
        command = snp.unified_genotyper_multiSample(ref_genome,list_bam_all_samples,
                                   snps_raw_vcf, extra_mem, num_cores, path_to_gatk, 
                                   target_exons, call_parameters)
        
        jobidrc = jobrunfunc(jobn+'_ug', command, num_cores, cwd=None, walltime=WT_LONG, pmem=None, 
                                  deps=deps, stdout=None, email_addresses=my_email)
        
        # Basic snp filtering
        #jobidrc=None # To eliminate
        command = snp.snps_basicFiltering(ref_genome, snps_raw_vcf, indels_masked_bed,
                       snps_basic_vcf, extra_mem, SINGLE_CORE, path_to_gatk,
                       snp_cluster_window=10)
        
        jobidbf = jobrunfunc(jobn+'_bf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=[jobidrc], stdout=None, email_addresses=my_email) #deps=[jobidrc,jobidim]
        
        
        # Final snp filtering: two methods a) hard filtering
        # model based filtering. Each of them constitute a 
        # vote in the majority snp calling
        
        # Method I)  Hard Filtering
        # Generates snps_hard_vcf file
        #snps_hard_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.hard.vcf')
        command = snp.snps_hardFiltering(ref_genome, snps_basic_vcf,
                       snps_hard_vcf, extra_mem, SINGLE_CORE, path_to_gatk)
        
        jobidhf = jobrunfunc(jobn+'_hf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)
        
        # Method II) Model based recalibration
        # Three steps: a) cluster generation
        # b) calculation of variant posterior probability
        # c) select variants according to specific fdr
        # Generates snps_model_vcf file
        #cluster_ouput_file = snps_basic_vcf.replace('filtered.basic.vcf','.cluster')
        #snps_variantRecal_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.variantRecal.vcf')
        #snps_model_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.vcf')
        #tranches_file = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.tranches')
        
        # a) cluster generation
        command = snp.snps_generateVariantClusters(ref_genome, snps_basic_vcf, 
                                                   cluster_ouput_file,
                                 hapmap_vcf, tgk_vcf, snpdb_vcf, path_to_gatk, extra_mem)
        
        jobidvc = jobrunfunc(jobn+'_vc', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)
        
        # b) calculation of variant posterior probability
        command = snp.snps_variantRecalibration(ref_genome, snps_basic_vcf, cluster_ouput_file,
                              snps_variantRecal_vcf, tranches_file, hapmap_vcf, 
                              tgk_vcf, snpdb_vcf, path_to_gatk, path_to_Rscript,
                              resources_folder,extra_mem)
        
        jobidvr = jobrunfunc(jobn+'_vr', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                  deps=jobidvc, stdout=None, email_addresses=my_email)
        
        # c) select variants according to specific fdr.
        # Other tranches can be also selected by calling 
        # this function with other fdr thresholds
        command =  snp.snps_applyVariantCuts(ref_genome, snps_variantRecal_vcf, snps_model_vcf, 
                          tranches_file, fdr_filter_level, snpdb_vcf, path_to_gatk, extra_mem)
        
        jobidtc = jobrunfunc(jobn+'_tc', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidvr, stdout=None, email_addresses=my_email)
                
        deps_list.extend([jobidhf,jobidtc])
        
    return deps_list 


def snps_calling_multipleSamples(analysis, configrun, num_processors, jobrunfunc):
    '''
    This function calls snps using the gatk Unified genotyper
    and then filter the calls according to basic quality,
    parameters and other things such as neighboring indels
    or cluster snps. 
    '''
        
    # Parameters
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    path_to_Rscript = configrun.rscript_bin
    resources_folder = configrun.gatk_resource_path
    temp_dir = configrun.gatk_temp_dir 
    fdr_filter_level = configrun.gatk_snps_fdr_threshold 
    target_exons = configrun.gatk_target_exons
    genomes = configrun.genomes['human']
    ref_genome, snpdb_vcf, indeldb_file = genomes.gatk_ref_genome, genomes.snpdb, \
                                         genomes.indeldb
    hapmap_vcf, tgk_vcf = genomes.hapmap, genomes.OneKgenomes 
    my_email=configrun.email_addresses
    # Run parameters
    mask_indel_padding = 10
    
    # Cohort correspond to biological meaningful groups
    # Breast Tumor, Breast normals for example
    jobn='gtk'
    
    cohort_samples = defaultdict(deque)
    
    # make a cohort of samples.
    # divide it benign and tumor samples
    for sp in analysis.samples:
        if sp.category=='benign':
            cohort_samples['benign'].append(sp.sorted_mmarkdup_bam)
        else:
            cohort_samples['tumor'].append(sp.sorted_mmarkdup_bam)
        
    #for cohort_samples in cohort_samples.iteritems():
    ### Recuerde Cambiar la plataforma de las muestras a ILLUMINA
    # no illumina_ga2 porque gatk tiene unas plataformas definidas
    # con keywords
    list_bam_files_normal = list(cohort_samples['benign'])
    list_bam_files_tumor = list(cohort_samples['tumor'])
    list_bam_all_samples = list_bam_files_normal+list_bam_files_tumor
    #print list_bam_all_samples

    jobn=jobn+'_'+sp.name#str(i)
    check_create_dir(analysis.gatk_calls_dir)
    # Calling raw snps
    #snps_raw_vcf=out_dir+cohort+'.gatk_snps.raw.vcf'
    command = snp.unified_genotyper_multiSample(ref_genome,list_bam_all_samples,
                               analysis.snps_raw_vcf, extra_mem, num_cores, path_to_gatk, 
                               interva_list=target_exons, call_parameters=None)
    
    jobidrc = jobrunfunc(jobn+'_ug', command, num_cores, cwd=None, walltime=WT_LONG, pmem=None, 
                              deps=None, stdout=None, email_addresses=my_email)
    
    # Calling raw indels and creating a mask indel file
    #indels_vcf = out_dir+cohort+'.gatk_indels.raw.vcf'
    #indels_bed = out_dir+cohort+'.gatk_indels.raw.bed'
    
    command = snp.indelGenotyper_pairedSample(ref_genome, list_bam_files_normal, list_bam_files_tumor, 
                                 analysis.indels_vcf, analysis.indels_bed, extra_mem, SINGLE_CORE, path_to_gatk, 
                                 interva_list=target_exons, call_parameters=None)
    
    jobidig = jobrunfunc(jobn+'_ig', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                              deps=jobidrc, stdout=None, email_addresses=my_email) # Indel genotyper step should not depend on unified genotyper they can run paralel
                                                                                   # Test 
    #indels_masked_bed = out_dir+cohort+'.gatk_indels.mask.bed'
    command = snp.generate_indel_maskfile(analysis.indels_bed, resources_folder, mask_indel_padding, 
                                          analysis.indels_masked_bed)
    
    jobidim = jobrunfunc(jobn+'_im', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidig, stdout=None, email_addresses=my_email)
    
    # Basic snp filtering
    #snps_basic_vcf = snps_raw_vcf.replace('raw.vcf','filtered.basic.vcf')
    command = snp.snps_basicFiltering(ref_genome, analysis.snps_raw_vcf, analysis.indels_masked_bed,
                   analysis.snps_basic_vcf, extra_mem, SINGLE_CORE, path_to_gatk,
                   snp_cluster_window=10)
    
    jobidbf = jobrunfunc(jobn+'_bf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidim, stdout=None, email_addresses=my_email) #deps=
    
    
    # Final snp filtering: two methods a) hard filtering
    # model based filtering. Each of them constitute a 
    # vote in the majority snp calling
    # Method I)  Hard Filtering
    # Generates snps_hard_vcf file
    #snps_hard_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.hard.vcf')
    command = snp.snps_hardFiltering(ref_genome, analysis.snps_basic_vcf,
                   analysis.snps_hard_vcf, extra_mem, SINGLE_CORE, path_to_gatk)
    
    jobidhf = jobrunfunc(jobn+'_hf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidbf, stdout=None, email_addresses=my_email)
    
    # Method II) Model based recalibration
    # Three steps: a) cluster generation
    # b) calculation of variant posterior probability
    # c) select variants according to specific fdr
    # Generates snps_model_vcf file
    #cluster_ouput_file = snps_basic_vcf.replace('filtered.basic.vcf','.cluster')
    #snps_variantRecal_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.variantRecal.vcf')
    #snps_model_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.vcf')
    #tranches_file = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.tranches')
    
    # a) cluster generationjobidhf
    command = snp.snps_generateVariantClusters(ref_genome,analysis.snps_basic_vcf, 
                                               analysis.cluster_ouput_file,
                             hapmap_vcf, tgk_vcf, snpdb_vcf, path_to_gatk, extra_mem)
    
    jobidvc = jobrunfunc(jobn+'_vc', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidbf, stdout=None, email_addresses=my_email)
    
    # b) calculation of variant posterior probability
    command = snp.snps_variantRecalibration(ref_genome,analysis.snps_basic_vcf, analysis.cluster_ouput_file,
                          analysis.snps_variantRecal_vcf, analysis.tranches_file, hapmap_vcf, 
                          tgk_vcf, snpdb_vcf, path_to_gatk, path_to_Rscript,
                          resources_folder,extra_mem)
    '''
    Annotate both hard threshold and model based called snps
    # Hard Filtering
    commandA = ann.create_snps_annotation(analysis.snps_hard_vcf, analysis.snps_hard_annotated_vcf, 
                            analysis.snps_hard_annotated_log, vcfCodSNP_genome, 
                            genelist_annotation, path_to_vcfCodSNPs)
    
    commandB = ann.create_snps_annotation(analysis.snps_model_vcf, analysis.snps_model_annotated_vcf, 
                            analysis.snps_model_annotated_log, vcfCodSNP_genome, 
                            analysis.snps_model_annotated_log, path_to_vcfCodSNPs)
    
    
    command = commandA+'\n'+commandB
    
    jobidann = jobrunfunc(jobn+'_ann', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=[jobidtc,jobidhf], stdout=None, email_addresses=my_email)
    
    # files for config file
    analysis.snps_hard_annotated_vcf
    analysis.snps_hard_annotated_log
    analysis.snps_model_annotated_vcf
    analysis.snps_model_annotated_log
    ....
    vcfCodSNP_genome
    path_to_vcfCodSNPs
    genelist_annotations
    
    '''

    jobidvr = jobrunfunc(jobn+'_vr', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidvc, stdout=None, email_addresses=my_email)
    
    # c) select variants according to specific fdr.
    # Other tranches can be also selected by calling 
    # this function with other fdr thresholds
    command =  snp.snps_applyVariantCuts(ref_genome, analysis.snps_variantRecal_vcf, analysis.snps_model_vcf, 
                      analysis.tranches_file, fdr_filter_level, snpdb_vcf, path_to_gatk, extra_mem)
    
    jobidtc = jobrunfunc(jobn+'_tc', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidvr, stdout=None, email_addresses=my_email)
    
    
    ###### SNPs Annotation
    
    '''
    Annotate both hard threshold and model based called snps
    # Hard Filtering
    commandA = ann.create_snps_annotation(analysis.snps_hard_vcf, analysis.snps_hard_annotated_vcf, 
                            analysis.snps_hard_annotated_log, vcfCodSNP_genome, 
                            genelist_annotation, path_to_vcfCodSNPs)
    
    commandB = ann.create_snps_annotation(analysis.snps_model_vcf, analysis.snps_model_annotated_vcf, 
                            analysis.snps_model_annotated_log, vcfCodSNP_genome, 
                            genelist_annotation, path_to_vcfCodSNPs)
    
    
    command = commandA+'\n'+commandB
    
    jobidann = jobrunfunc(jobn+'_ann', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=[jobidtc,jobidhf], stdout=None, email_addresses=my_email)
    
    '''
    
    return [jobidhf,jobidtc]
        

# This pipeline needs to be checked to see if it runs and complete the job



if __name__ == '__main__':
        
    optionparser = OptionParser("usage: %prog [options] ")
    optionparser.add_option("-r", "--config_file", dest="config_file",
                            help="file with run configuration")
    optionparser.add_option("-a", "--analysis_file", dest="analysis_file",
                            help="file with experiment configuration") 
    optionparser.add_option("--paired_samples", dest="paired_samples", action="store_true", default=False,
                            help="paired samples snv calling") 
    optionparser.add_option("--multi_samples", dest="multi_samples", action="store_true", default=False,
                            help="multi-sample snv calling") 
    optionparser.add_option("--local", dest="local", action="store_true", default=False)
    optionparser.add_option("--cluster", dest="cluster", action="store_true", default=False)
    optionparser.add_option("-p", "--processes", type=int, dest="num_processors", default=1)

    (options, args) = optionparser.parse_args()    

    config = ExomePipelineConfig()
    config.from_xml(options.config_file)
    analysis = ExomeAnalysisConfig()
    analysis.from_xml(options.analysis_file, config.output_dir)
    # Default when called from the command line
    depends=None
    
    if not (options.local ^ options.cluster):
        optionparser.error("Must set either --local or --cluster to run job")
    if options.local:
        jobrunfunc = run_local
    elif options.cluster:
        jobrunfunc = qsub
    
    if options.multi_samples:
        snps_calling_multipleSamples(analysis, config, options.num_processors, jobrunfunc, depends)
    elif options.paired_samples:
        snps_calling_pairedSamples(analysis, config, options.num_processors, jobrunfunc, depends)
        