'''
Created on Dec 2, 2010

@author: oabalbin
'''

import exome.gatk_cluster.cluster_jobs_header as jh
import exome.gatk_cluster.snps_calling_cluster as snp
from collections import defaultdict, deque



def snps_calling_pairedSamples(cohort_dict,gatk_run_dict):
    '''
    This function calls snps using the gatk Unified genotyper
    and then filter the calls according to basic quality,
    parameters and other things such as neighboring indels
    or cluster snps. 
    '''
    
    use_mem, num_cores = gatk_run_dict['use_mem'], gatk_run_dict['num_cores']
    path_to_gatk = gatk_run_dict['path_to_gatk']
    ref_genome, dbsnp_vcf, indeldb_file = gatk_run_dict['ref_genome'], gatk_run_dict['snpdb_file'], gatk_run_dict['indeldb_file'] 
    hapmap_vcf, tgk_vcf = gatk_run_dict['hapmap_vcf'], gatk_run_dict['tgk_vcf']
    path_to_intervals = gatk_run_dict['path_to_intervals']
    resources_folder = gatk_run_dict['resources_folder']
    path_to_Rscript = gatk_run_dict['rscipt_path']
    temp_dir, out_dir = gatk_run_dict['temp_dir'],gatk_run_dict['out_dir']
    fdr_filter_level = gatk_run_dict['snps_fdr_threshold']
    target_exons = gatk_run_dict['target_exons'] 
    my_email=['alebalbin@gmail.com']
    
    # cluster parameters
    node_memory = 45000.0
    node_processors = 12
    single_processor=1
    mem_per_processors = int(float(node_memory) / node_processors)
    extra_mem=8000
    # wt=walltime
    short_wtime = "24:00:00"
    long_wtime="100:00:00"
    # Run parameters
    mask_indel_padding = 10
    
    # Cohort correspond to biological meaningful groups
    # Breast Tumor, Breast normals for example
    
    for cohort, cohort_samples in cohort_dict.iteritems():
        list_bam_files_normal = cohort_samples['normals']
        list_bam_files_tumor = cohort_samples['tumors']
                
        # Calling raw snps
        snps_raw_vcf=out_dir+cohort+'.snps.raw.vcf'
        command = snp.unified_genotyper_pairedSample(ref_genome,list_bam_files_normal, list_bam_files_tumor,
                                   snps_raw_vcf, extra_mem, node_processors, path_to_gatk, 
                                   interva_list=target_exons, call_parameters=None)
        
        jobidrc = jh.qsub(cohort+'_ug', command, node_processors, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=None, stdout=None, email_addresses=my_email)
        
        # Calling raw indels and creating a mask indel file
        indels_vcf = out_dir+cohort+'.indels.raw.vcf'
        indels_bed = out_dir+cohort+'.indels.raw.bed'
        
        command = snp.indelGenotyper_pairedSample(ref_genome, list_bam_files_normal, list_bam_files_tumor, 
                                     indels_vcf, indels_bed, extra_mem, single_processor, path_to_gatk, 
                                     interva_list=target_exons, call_parameters=None)
    
        jobidig = jh.qsub(cohort+'_ig', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidrc, stdout=None, email_addresses=my_email)
        
        indels_masked_bed = out_dir+cohort+'_'+'.indels.mask.bed'
        command = snp.generate_indel_maskfile(indels_bed, resources_folder, mask_indel_padding, indels_masked_bed)
        
        jobidim = jh.qsub(cohort+'_im', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidig, stdout=None, email_addresses=my_email)
        
        # Basic snp filtering
        snps_basic_vcf = snps_raw_vcf.replace('raw.vcf','filtered.basic.vcf')
        command = snp.snps_basicFiltering(ref_genome, snps_raw_vcf, indels_masked_bed,
                       snps_basic_vcf, extra_mem, single_processor, path_to_gatk,
                       snp_cluster_window=10)
        
        jobidbf = jh.qsub(cohort+'_bf', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidim, stdout=None, email_addresses=my_email)
        
        # Final snp filtering: two methods a) hard filtering
        # model based filtering. Each of them constitute a 
        # vote in the majority snp calling
        
        # Method I)  Hard Filtering
        # Generates snps_hard_vcf file
        snps_hard_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.hard.vcf')
        command = snp.snps_hardFiltering(ref_genome, snps_basic_vcf,
                       snps_hard_vcf, use_mem, num_cores, path_to_gatk)
        
        jobidhf = jh.qsub(cohort+'_hf', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)

        # Method II) Model based recalibration
        # Three steps: a) cluster generation
        # b) calculation of variant posterior probability
        # c) select variants according to specific fdr
        # Generates snps_model_vcf file
        cluster_ouput_file = snps_basic_vcf.replace('.filtered.basic.vcf','.cluster')
        snps_variantRecal_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.variantRecal.vcf')
        snps_model_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.vcf')
        tranches_file = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.tranches')
        
        # a) cluster generation
        command = snp.snps_generateVariantClusters(ref_genome,snps_basic_vcf, cluster_ouput_file,
                                 hapmap_vcf, tgk_vcf, dbsnp_vcf, path_to_gatk, use_mem)
        
        jobidvc = jh.qsub(cohort+'_vc', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)
        
        # b) calculation of variant posterior probability
        command = snp.snps_variantRecalibration(ref_genome,snps_basic_vcf, cluster_ouput_file,
                              snps_variantRecal_vcf, tranches_file, hapmap_vcf, 
                              tgk_vcf, dbsnp_vcf, path_to_gatk, path_to_Rscript,
                              resources_folder,use_mem)
        
        jobidvr = jh.qsub(cohort+'_vr', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidvc, stdout=None, email_addresses=my_email)
        
        # c) select variants according to specific fdr.
        # Other tranches can be also selected by calling 
        # this function with other fdr thresholds
        command =  snp.snps_applyVariantCuts(ref_genome, snps_variantRecal_vcf, snps_model_vcf, 
                          tranches_file, fdr_filter_level, dbsnp_vcf, path_to_gatk, use_mem)
        
        jobidtc = jh.qsub(cohort+'_tc', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidvr, stdout=None, email_addresses=my_email)



def snps_calling_multipleSamples(cohort_dict,gatk_run_dict):
    '''
    This function calls snps using the gatk Unified genotyper
    and then filter the calls according to basic quality,
    parameters and other things such as neighboring indels
    or cluster snps. 
    '''
    
    use_mem, num_cores = gatk_run_dict['use_mem'], gatk_run_dict['num_cores']
    path_to_gatk = gatk_run_dict['path_to_gatk']
    ref_genome, dbsnp_vcf, indeldb_file = gatk_run_dict['ref_genome'], gatk_run_dict['snpdb_file'], gatk_run_dict['indeldb_file'] 
    hapmap_vcf, tgk_vcf = gatk_run_dict['hapmap_vcf'], gatk_run_dict['tgk_vcf']
    path_to_intervals = gatk_run_dict['path_to_intervals']
    resources_folder = gatk_run_dict['resources_folder']
    path_to_Rscript = gatk_run_dict['rscipt_path']
    temp_dir, out_dir = gatk_run_dict['temp_dir'],gatk_run_dict['out_dir']
    fdr_filter_level = gatk_run_dict['snps_fdr_threshold']
    target_exons = gatk_run_dict['target_exons'] 
    my_email=['alebalbin@gmail.com']
    
    # cluster parameters
    node_memory = 45000.0
    node_processors = 6
    max_node_cores = 12 
    single_processor=1
    mem_per_core = int(float(node_memory) / max_node_cores)
    extra_mem=8000
    # wt=walltime
    short_wtime = "24:00:00"
    long_wtime="60:00:00"
    # Run parameters
    mask_indel_padding = 10
    
    # Cohort correspond to biological meaningful groups
    # Breast Tumor, Breast normals for example
    jobn='gtk'
    i=0
    for cohort, cohort_samples in cohort_dict.iteritems():
        list_bam_files_normal = cohort_samples['normals']
        list_bam_files_tumor = cohort_samples['tumors']
        list_bam_all_samples = list_bam_files_normal+list_bam_files_tumor
        jobn=jobn+str(i)        
        # Calling raw snps
        snps_raw_vcf=out_dir+cohort+'.gatk_snps.raw.vcf'
        command = snp.unified_genotyper_multiSample(ref_genome,list_bam_all_samples,
                                   snps_raw_vcf, extra_mem, node_processors, path_to_gatk, 
                                   interva_list=target_exons, call_parameters=None)
        
        jobidrc = jh.qsub(jobn+'_ug', command, node_processors, cwd=None, walltime=long_wtime, pmem=None, 
                                  deps=None, stdout=None, email_addresses=my_email)
        
        # Calling raw indels and creating a mask indel file
        indels_vcf = out_dir+cohort+'.gatk_indels.raw.vcf'
        indels_bed = out_dir+cohort+'.gatk_indels.raw.bed'
        
        command = snp.indelGenotyper_pairedSample(ref_genome, list_bam_files_normal, list_bam_files_tumor, 
                                     indels_vcf, indels_bed, extra_mem, single_processor, path_to_gatk, 
                                     interva_list=target_exons, call_parameters=None)
    
        jobidig = jh.qsub(jobn+'_ig', command, single_processor, cwd=None, walltime=long_wtime, pmem=extra_mem, 
                                  deps=jobidrc, stdout=None, email_addresses=my_email)
        
        indels_masked_bed = out_dir+cohort+'.gatk_indels.mask.bed'
        command = snp.generate_indel_maskfile(indels_bed, resources_folder, mask_indel_padding, indels_masked_bed)
        
        jobidim = jh.qsub(jobn+'_im', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidig, stdout=None, email_addresses=my_email)
        
        # Basic snp filtering
        snps_basic_vcf = snps_raw_vcf.replace('raw.vcf','filtered.basic.vcf')
        command = snp.snps_basicFiltering(ref_genome, snps_raw_vcf, indels_masked_bed,
                       snps_basic_vcf, extra_mem, single_processor, path_to_gatk,
                       snp_cluster_window=10)
        
        jobidbf = jh.qsub(jobn+'_bf', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidim, stdout=None, email_addresses=my_email)
        
        
        # Final snp filtering: two methods a) hard filtering
        # model based filtering. Each of them constitute a 
        # vote in the majority snp calling
        
        # Method I)  Hard Filtering
        # Generates snps_hard_vcf file
        snps_hard_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.hard.vcf')
        command = snp.snps_hardFiltering(ref_genome, snps_basic_vcf,
                       snps_hard_vcf, use_mem, num_cores, path_to_gatk)
        
        jobidhf = jh.qsub(jobn+'_hf', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)

        # Method II) Model based recalibration
        # Three steps: a) cluster generation
        # b) calculation of variant posterior probability
        # c) select variants according to specific fdr
        # Generates snps_model_vcf file
        cluster_ouput_file = snps_basic_vcf.replace('filtered.basic.vcf','.cluster')
        snps_variantRecal_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.variantRecal.vcf')
        snps_model_vcf = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.vcf')
        tranches_file = snps_basic_vcf.replace('filtered.basic.vcf','filtered.model.tranches')
        
        # a) cluster generation
        command = snp.snps_generateVariantClusters(ref_genome,snps_basic_vcf, cluster_ouput_file,
                                 hapmap_vcf, tgk_vcf, dbsnp_vcf, path_to_gatk, use_mem)
        
        jobidvc = jh.qsub(jobn+'_vc', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidbf, stdout=None, email_addresses=my_email)
        
        # b) calculation of variant posterior probability
        command = snp.snps_variantRecalibration(ref_genome,snps_basic_vcf, cluster_ouput_file,
                              snps_variantRecal_vcf, tranches_file, hapmap_vcf, 
                              tgk_vcf, dbsnp_vcf, path_to_gatk, path_to_Rscript,
                              resources_folder,use_mem)
        
        jobidvr = jh.qsub(jobn+'_vr', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidvc, stdout=None, email_addresses=my_email)
        
        # c) select variants according to specific fdr.
        # Other tranches can be also selected by calling 
        # this function with other fdr thresholds
        command =  snp.snps_applyVariantCuts(ref_genome, snps_variantRecal_vcf, snps_model_vcf, 
                          tranches_file, fdr_filter_level, dbsnp_vcf, path_to_gatk, use_mem)
        
        jobidtc = jh.qsub(jobn+'_tc', command, single_processor, cwd=None, walltime=short_wtime, pmem=extra_mem, 
                                  deps=jobidvr, stdout=None, email_addresses=my_email)

        
        i+=1

# This pipeline needs to be checked to see if it runs and complete the job
if __name__ == '__main__':
             
    cohort_dict = {'Prost':{'normals':['/nobackup/med-mctp/oabalbin/test/s_4_12_sequence.txt.psorted.realigned.fixMate.markdup.bam'],
                            'tumors':['/nobackup/med-mctp/oabalbin/test/s_3_12_sequence.txt.psorted.realigned.fixMate.markdup.bam']}}
    
    gatk_run_dict = {'path_to_gatk':'/nobackup/med-mctp/sw/bioinfo/gatk/GenomeAnalysisTK-1.0.4705/',
                     'path_to_picard':'/nobackup/med-mctp/sw/bioinfo/picard/picard-tools-1.35/', 
                     'path_to_sam':'/nobackup/med-mctp/sw/bioinfo/samtools/samtools-0.1.10/',
                     'resources_folder':'/nobackup/med-mctp/sw/bioinfo/gatk/GenomeAnalysisTK-1.0.4705/resources/', 
                     'rscipt_path':'/home/software/rhel5/R/2.10.1-gcc/bin/Rscript',
                     'use_mem':8000, 'num_cores':1,
                     'ref_genome':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/hg19.fa', 
                     'snpdb_file':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/dbsnp132_00-All_processed.vcf',
                     'indeldb_file':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/dbsnp132_00-All_processed.vcf',
                     'hapmap_vcf':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/genotypes_r27_nr.hg19_fwd.processed.vcf', 
                     'tgk_vcf':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/OneKGenomes_2of4intersection.20100804.sites.vcf',
                     'target_exons':'/nobackup/med-mctp/sw/alignment_indexes/agilent/hg19/exome/Agilent_SureSelect_All_Exon_G3362.v2.hg19.bed',
                     'path_to_intervals':'/nobackup/med-mctp/oabalbin/test/',
                     'recal_analysis_outputdir':'/nobackup/med-mctp/oabalbin/test/recal_analysis/',
                     'temp_dir':'/nobackup/med-mctp/oabalbin/test/temp/',
                     'qsubfile':'/nobackup/med-mctp/oabalbin/test/',
                     'out_dir':'/nobackup/med-mctp/oabalbin/test/',
                     'snps_fdr_threshold':0.1
                     }

    #snps_calling_pairedSamples(cohort_dict,gatk_run_dict)
    snps_calling_multipleSamples(cohort_dict,gatk_run_dict)
    
