'''
Created on May 18, 2011

@author: oabalbin
'''

'''
Created on Dec 2, 2010

@author: oabalbin
'''
import os
import exome.gatk_cluster.snps_calling_cluster as snp
#import exome.variantEval.snps_annotator as ann
from collections import defaultdict, deque
from optparse import OptionParser

from exome.jobs.base import JOB_SUCCESS, JOB_ERROR
from exome.jobs.job_runner import qsub_cac, qsub_loc, run_local
from exome.jobs.config import ExomePipelineConfig, ExomeAnalysisConfig
from exome.variantEval.variant_intersector import SNPs_isec
from exome.variantEval.snps_annotator import create_snps_annotation


# Global Variables
NODE_MEM=45000.0
NODE_CORES=12
SINGLE_CORE=1
MEM_PER_CORE= int(float(NODE_MEM) / NODE_CORES)
# wt=walltime
WT_SHORT= "24:00:00"
WT_LONG= "60:00:00" #"100:00:00"


def check_create_dir(root_path, dir_name=None):
    '''
        if not os.path.isdir(full_path_name):
            os.mkdir( full_path_name )
    '''
    if not os.path.isdir(root_path):
            os.mkdir( root_path )
    if dir_name is not None:
        subfolder=os.path.join(root_path,dir_name)
        if not os.path.isdir(subfolder):
                os.mkdir( subfolder )
    else:             
        subfolder=root_path

    return subfolder



def myddict(): 
    return defaultdict(deque)
  
def nested_ddict(): 
    return defaultdict(myddict)

def define_patients_cohort(analysis):
    '''
    Define a patient and samples that correspond to that patient.
    Divide patient samples into benign and tumor samples
    '''
    cohort_patients = nested_ddict()
    for sp in analysis.samples:
        #cohort_samples = defaultdict(deque)
        if sp.category=='benign':
            cohort_patients[sp.patient_id]['benign'].append(sp.sorted_mmarkdup_bam)
        else:
            cohort_patients[sp.patient_id]['tumor'].append(sp.sorted_mmarkdup_bam)
        print sp.patient_id
        
        #cohort_patients[sp.patient_id] = cohort_samples

    return cohort_patients

def snps_calling_pairedSamples(analysis, configrun, num_processors, jobrunfunc, deps, First=True):
    '''
    This function calls snps using the gatk Unified genotyper
    and then filter the calls according to basic quality,
    parameters and other things such as neighborinimport exome.gatk_cluster.snps_calling_cluster as snpg indels
    or cluster snps. 
    It makes the calls for each type of samples, e.g. tumors
    and normals.
    '''
    # Parameters
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    #path_to_vcfCodSNPs = configrun.vcfCodSNPs_path
    # for gatk
    path_to_Rscript = configrun.rscript_bin
    resources_folder = configrun.gatk_resource_path
    temp_dir = configrun.gatk_temp_dir 
    fdr_filter_level = configrun.gatk_snps_fdr_threshold 
    target_exons = configrun.gatk_target_exons
    call_parameters = configrun.gatk_unifiedGenotyper_params
    genomes = configrun.genomes['human']
    ref_genome, snpdb_vcf, indeldb_file = genomes.gatk_ref_genome, genomes.snpdb, \
                                         genomes.indeldb
    hapmap_vcf, tgk_vcf = genomes.hapmap, genomes.OneKgenomes 
    #vcfCodings parameters
    vcfCodSNP_genome = genomes.vcf_ref_genome
    annot_genelist = configrun.annot_genelist
    path_to_vcfCodSNPs = configrun.vcf_annot_path
    # email.
    my_email=configrun.email_addresses

    
    # Run parameters
    mask_indel_padding = 10
    # Define cohort of patients
    cohort_patients = define_patients_cohort(analysis)
    # Calling raw indels and creating a mask indel file
      
    for pid, cohort_samples in cohort_patients.iteritems():
        
        print cohort_samples
        
        list_bam_files_normal = list(cohort_samples['benign'])
        list_bam_files_tumor = list(cohort_samples['tumor'])   
        print list_bam_files_normal, list_bam_files_tumor

        jobn='gtk_paired'
        check_create_dir(analysis.gatk_calls_dir)
        
        indels_vcf = analysis.indels_vcf
        indels_bed = analysis.indels_bed
        indels_masked_bed=analysis.indels_masked_bed

                
        if First:
            
            command = snp.indelGenotyper_pairedSample(ref_genome, list_bam_files_normal, list_bam_files_tumor, 
                                         indels_vcf, indels_bed, extra_mem, SINGLE_CORE, path_to_gatk, 
                                         interva_list=target_exons, call_parameters=None)
            
            jobidig = jobrunfunc(jobn+'_ig', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                      deps=deps, stdout=None, email_addresses=my_email)
            
            #indels_masked_bed = out_dir+cohort+'.gatk_indels.mask.bed'
            command = snp.generate_indel_maskfile(indels_bed, resources_folder, mask_indel_padding, 
                                                  indels_masked_bed)
            
            jobidim = jobrunfunc(jobn+'_im', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                      deps=jobidig, stdout=None, email_addresses=my_email) #NOTE::::: jobidig        
            First=False
    
        deps_list=[]
        thpatient_snvs = defaultdict(dict)
        
        for sptype, list_bam_samples in cohort_samples.iteritems():
            print list_bam_samples
            jobn='gtk'
            jobn=jobn+'_'+sptype#+str(i)        
            # Do the snps calls on benign and tumors independently
            if sptype == 'benign':
                list_bam_all_samples = list_bam_files_normal
                snps_raw_vcf = analysis.benign_snps_raw_vcf
                snps_basic_vcf = analysis.benign_snps_basic_vcf
                snps_hard_vcf = analysis.benign_snps_hard_vcf
                snps_hard_annotated_vcf = analysis.benign_snps_hard_annotated_vcf
                snps_hard_annotated_log = analysis.benign_snps_hard_annotated_log
                
                cluster_ouput_file = analysis.benign_cluster_ouput_file
                snps_variantRecal_vcf = analysis.benign_snps_variantRecal_vcf
                tranches_file = analysis.benign_tranches_file
                snps_model_vcf = analysis.benign_snps_model_vcf
                #snps_model_annotated_vcf = analysis.benign_snps_model_annotated_vcf
                #snps_model_annotated_vcf = analysis.benign_snps_model_annotated_log
                
                ####
                thpatient_snvs['benign']['hard']=snps_hard_vcf
                thpatient_snvs['benign']['model']=snps_model_vcf
        
                            
            elif sptype == 'tumor':
                list_bam_all_samples = list_bam_files_tumor
                snps_raw_vcf = analysis.tumor_snps_raw_vcf
                snps_basic_vcf = analysis.tumor_snps_basic_vcf
                snps_hard_vcf = analysis.tumor_snps_hard_vcf
                snps_hard_annotated_vcf = analysis.tumor_snps_hard_annotated_vcf
                snps_hard_annotated_log = analysis.tumor_snps_hard_annotated_log
        
                cluster_ouput_file = analysis.tumor_cluster_ouput_file
                snps_variantRecal_vcf = analysis.tumor_snps_variantRecal_vcf
                tranches_file = analysis.tumor_tranches_file
                snps_model_vcf = analysis.tumor_snps_model_vcf
                
                #snps_model_annotated_vcf = analysis.tumor_snps_model_annotated_vcf
                #snps_model_annotated_vcf = analysis.tumor_snps_model_annotated_log
                
                thpatient_snvs['tumor']['hard']=snps_hard_vcf
                thpatient_snvs['tumor']['model']=snps_model_vcf

        
        
                
            # Calling raw snps
            #snps_raw_vcf=out_dir+cohort+'.gatk_snps.raw.vcf'
            
            command = snp.unified_genotyper_multiSample(ref_genome,list_bam_all_samples,
                                       snps_raw_vcf, extra_mem, num_cores, path_to_gatk, 
                                       target_exons, call_parameters)
            
            jobidrc = jobrunfunc(jobn+'_ug', command, num_cores, cwd=None, walltime=WT_LONG, pmem=None, 
                                      deps=deps, stdout=None, email_addresses=my_email)
            
            # Basic snp filtering
            #jobidrc=None # To eliminate
            command = snp.snps_basicFiltering(ref_genome, snps_raw_vcf, indels_masked_bed,
                           snps_basic_vcf, extra_mem, SINGLE_CORE, path_to_gatk,
                           snp_cluster_window=10)
            if First: 
                thdeps=[jobidrc,jobidim]
            else:
                thdeps=[jobidrc]

            jobidbf = jobrunfunc(jobn+'_bf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=thdeps, stdout=None, email_addresses=my_email) #deps=[jobidrc,jobidim]
            
            
            # Final snp filtering: two methods a) hard filtering
            # model based filtering. Each of them constitute a 
            # vote in the majority snp calling
            
            # Method I)  Hard Filtering
            # Generates snps_hard_vcf file
            #snps_hard_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.hard.vcf')
            command = snp.snps_hardFiltering(ref_genome, snps_basic_vcf,
                           snps_hard_vcf, extra_mem, SINGLE_CORE, path_to_gatk)
            
            jobidhf = jobrunfunc(jobn+'_hf', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=jobidbf, stdout=None, email_addresses=my_email)
            
            # Method II) Model based recalibration
            # Three steps: a) cluster generation
            # b) calculation of variant posterior probability
            # c) select variants according to specific fdr
            # Generates snps_model_vcf file
            #cluster_ouput_file = snps_basic_vcf.replace('filtered.basic.vcf','.cluster')
            #snps_variantRecal_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.variantRecal.vcf')
            #snps_model_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.vcf')
            #tranches_file = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.tranches')
            
            # a) cluster generation
            command = snp.snps_generateVariantClusters(ref_genome, snps_basic_vcf, 
                                                       cluster_ouput_file,
                                     hapmap_vcf, tgk_vcf, snpdb_vcf, path_to_gatk, extra_mem)
            
            jobidvc = jobrunfunc(jobn+'_vc', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                      deps=jobidbf, stdout=None, email_addresses=my_email)
            
            # b) calculation of variant posterior probability
            command = snp.snps_variantRecalibration(ref_genome, snps_basic_vcf, cluster_ouput_file,
                                  snps_variantRecal_vcf, tranches_file, hapmap_vcf, 
                                  tgk_vcf, snpdb_vcf, path_to_gatk, path_to_Rscript,
                                  resources_folder,extra_mem)
            
            jobidvr = jobrunfunc(jobn+'_vr', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem, 
                                      deps=jobidvc, stdout=None, email_addresses=my_email)
            
            # c) select variants according to specific fdr.
            # Other tranches can be also selected by calling 
            # this function with other fdr thresholds
            command =  snp.snps_applyVariantCuts(ref_genome, snps_variantRecal_vcf, snps_model_vcf, 
                              tranches_file, fdr_filter_level, snpdb_vcf, path_to_gatk, extra_mem)
            
            jobidtc = jobrunfunc(jobn+'_tc', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=jobidvr, stdout=None, email_addresses=my_email)
                    
            #deps_list.extend([jobidhf,jobidtc])
            
        
        # Intersect the tumor and the benign snvs calls in order
        # to call the somatic snvs
        # Change this hard coding of the names ...
        # lio ! como obtener un archivo con el nombre del paciente
        # Maybe it implies to change the herarchy at the analysis level file names 
        somatic_snps_hard_vcf = thpatient_snvs['tumor']['hard'].replace('.vcf','.somatic_'+pid+'.vcf') 
        somatic_snps_model_vcf = thpatient_snvs['tumor']['model'].replace('.vcf','.somatic_'+pid+'.vcf')
        
        somatic_snps_hard_annotated_vcf = somatic_snps_hard_vcf.replace('.vcf','.annot.vcf') 
        somatic_snps_model_annotated_vcf = somatic_snps_model_vcf.replace('.vcf','.annot.vcf')

        do_complement=True        
        jobidvcfh = SNPs_isec(thpatient_snvs['tumor']['hard'], [thpatient_snvs['benign']['hard']], configrun, 
                              jobrunfunc, do_complement,somatic_snps_hard_vcf, jobidhf)
        jobidvcfm = SNPs_isec(thpatient_snvs['tumor']['model'], [thpatient_snvs['benign']['model']], configrun, 
                              jobrunfunc, do_complement,somatic_snps_model_vcf, jobidtc)
        
        
        ###### SNPs Annotation
        #Annotate both hard threshold and model based called snps
        # Hard Filtering
        commandA = create_snps_annotation(somatic_snps_hard_vcf, somatic_snps_hard_annotated_vcf, 
                                somatic_snps_hard_annotated_vcf, vcfCodSNP_genome, 
                                annot_genelist, path_to_vcfCodSNPs)
        
        commandB = create_snps_annotation(somatic_snps_model_vcf, somatic_snps_model_annotated_vcf, 
                                somatic_snps_model_annotated_vcf, vcfCodSNP_genome, 
                                annot_genelist, path_to_vcfCodSNPs)
        
        jobidannA = jobrunfunc(jobn+'_ann', commandA, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidvcfh, stdout=None, email_addresses=my_email)
        
        jobidannB = jobrunfunc(jobn+'_ann', commandB, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidvcfm, stdout=None, email_addresses=my_email)
    
        
        deps_list.extend([jobidvcfh,jobidvcfm])
        
    
    return deps_list         

def snps_calling_multipleSamples(analysis, configrun, num_processors, jobrunfunc):
    pass


if __name__ == '__main__':
        
    optionparser = OptionParser("usage: %prog [options] ")
    optionparser.add_option("-r", "--config_file", dest="config_file",
                            help="file with run configuration")
    optionparser.add_option("-a", "--analysis_file", dest="analysis_file",
                            help="file with experiment configuration") 
    optionparser.add_option("--paired_samples", dest="paired_samples", action="store_true", default=False,
                            help="paired samples snv calling") 
    optionparser.add_option("--multi_samples", dest="multi_samples", action="store_true", default=False,
                            help="multi-sample snv calling") 
    optionparser.add_option("--do_indel", dest="do_indel", action="store_true", default=False,
                            help="multi-sample snv calling") 

    optionparser.add_option("--local", dest="local", action="store_true", default=False)
    optionparser.add_option("--cluster", dest="cluster", action="store_true", default=False)
    optionparser.add_option("-p", "--processes", type=int, dest="num_processors", default=1)

    (options, args) = optionparser.parse_args()    

    config = ExomePipelineConfig()
    config.from_xml(options.config_file)
    analysis = ExomeAnalysisConfig()
    analysis.from_xml(options.analysis_file, config.output_dir)
    # Default when called from the command line
    depends=None
    
    if not (options.local ^ options.cluster):
        optionparser.error("Must set either --local or --cluster to run job")
    if options.local:
        jobrunfunc = run_local
    elif options.cluster:
        jobrunfunc = qsub_cac
    
    if options.multi_samples:
        snps_calling_multipleSamples(analysis, config, options.num_processors, jobrunfunc, depends)
    elif options.paired_samples:
        snps_calling_pairedSamples(analysis, config, options.num_processors, jobrunfunc, depends,options.do_indel)
        
