'''
Created on Nov 30, 2010

@author: oabalbin
'''

import logging
import os
import sys
import subprocess
#import argparse
import tempfile

import exome.gatk_cluster.gatk_realignment_cluster as gra
import exome.gatk_cluster.picard_commands_cluster as mypicard
import exome.gatk_cluster.samtools_commands_cluster as mysam
import exome.gatk_cluster.base_quality_calibration_cluster_2 as gqc
import exome.gatk_cluster.cluster_jobs_header as jh

from collections import defaultdict, deque
from optparse import OptionParser

from exome.jobs.base import JOB_SUCCESS, JOB_ERROR
from exome.jobs.job_runner import qsub, run_local
from exome.jobs.config import ExomePipelineConfig, ExomeJobConfig


# Global Variables
NODE_MEM=45000.0
NODE_CORES=12
SINGLE_CORE=1
MEM_PER_CORE= int(float(NODE_MEM) / NODE_CORES)
# wt=walltime
WT_SHORT=wt_create_realing_intv = "24:00:00"
WT_LONG=wt_realign="100:00:00"



def fast_lane_level_processing(jobconfig, configrun):
    '''
    Input: a sample lane dictionary with key:sample, values: lanes that belong to that sample
    By default this realignment method uses only known indel Sites to realign. 
    The known indel sites are provided in the indeldb_file
    gatk_run_dict is a dictionary with the parameters to run the gatk realignment walker
        indeldb_file
        
    '''
    
    #### Header Run parameters
    '''
    extra_mem, num_cores = gatk_run_dict['use_mem'], gatk_run_dict['num_cores']
    path_to_gatk, path_to_picard, path_to_sam = gatk_run_dict['path_to_gatk'], gatk_run_dict['path_to_picard'], gatk_run_dict['path_to_sam']
    ref_genome, snpdb_file, indeldb_file = gatk_run_dict['ref_genome'], gatk_run_dict['snpdb_file'], gatk_run_dict['indeldb_file'] 
    path_to_intervals = gatk_run_dict['path_to_intervals']
    recal_analysis_outputdir=gatk_run_dict['recal_analysis_outputdir']
    temp_dir, out_dir = gatk_run_dict['temp_dir'],gatk_run_dict['out_dir']
    my_email=['alebalbin@gmail.com']
    '''
    
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    ref_genome, snpdb_file, indeldb_file = configrun.genomes.gatk_ref_genome, configrun.genomes.snpdb, \
                                         configrun.indeldb        
    recal_analysis_outputdir=configrun.gatk_recal_anal_dir
    temp_dir =  configrun.gatk_temp_dir
    my_email=configrun.email_addresses
    
    # cluster parameters    
    # Lists
    new_sample_lanes_dict=defaultdict(deque)
    merged_sample_files_dict=defaultdict()
    
    sys.exit(0)
    
    First=True
    for sp, thlanes in sample_lanes_dict.iteritems():    
        # Cluster parameters
        jname=str(sp)

        if First:
            # Create realignment intervals at known indel positions\n
            # gatk does not support multi-core here yet
            command, intervals_to_realign = gra.intvTorealign_only_knownSites(ref_genome, extra_mem, SINGLE_CORE, 
                                                                 path_to_intervals, path_to_gatk, 
                                                                 indeldb_file)
            
            jobidks = jh.qsub(jname+'ks', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=None, stdout=None, email_addresses=my_email)            
            First=False
            
        for i,indexed_bam_file in enumerate(thlanes):
            
            jname=jname+'_'+str(i)
            # Realign Bam files at indel sites. Usually using indel knownSites db
            # gatk does not support multi-core here yet
            # This step could include full realignment but it would time intensive 
            command, realigned_bam_file = gra.realign_bam_only_knownSites(ref_genome, indexed_bam_file, 
                                                           intervals_to_realign, indeldb_file, 
                                                           extra_mem*2, SINGLE_CORE, temp_dir,path_to_gatk)
            
            jobidra = jh.qsub(jname+'ra', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem*2, 
                              deps=jobidks, stdout=None, email_addresses=my_email)            
             
            
            # Fix Mate information in the Realigned Bam file. It syncronizes the
            # information of mate pairs
            command, matefixed_bam = mypicard.fixmateInformation(realigned_bam_file, extra_mem, path_to_picard, temp_dir)
            
            jobidfm = jh.qsub(jname+'fm', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidra, stdout=None, email_addresses=my_email)            
           
            # Mark duplicates in the Realigned Bam file. It only mark the duplicates
            command, dedup_bam = mypicard.markDuplicates(matefixed_bam, extra_mem, path_to_picard)
            jobidmd = jh.qsub(jname+'md', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                              deps=jobidfm, stdout=None, email_addresses=my_email)            
            
            ##########            
            # Create a new pbs script for running this job
            # This function launch several qsub jobs for doing the quality recalibration
            # of the bases.
                        
            jobidqc, recal_bam_file = gqc.main_baseQ_recalibration(ref_genome, snpdb_file, 
                                                               dedup_bam, NODE_MEM, NODE_CORES,
                                                               gatk_run_dict, recal_analysis_outputdir, jobidmd,jname+'qc', my_email)
            # new dictionary with the realigned and 
            # recalibrated bam files paths for each lane that belongs to sample
            new_sample_lanes_dict[sp].append(recal_bam_file)
        
        
        recalibrated_lanes = new_sample_lanes_dict[sp]
        #out_dir outdir for the analysis
        sample_recal_merged_bam = sp.output_dir+sp+'_merged_recal.bam'        
        # Merge realigned, recalibrated lanes that belong to sample sp
        command = mysam.merge_bam_files(list(recalibrated_lanes), sample_recal_merged_bam, path_to_sam)        
        
        jobimd = jh.qsub(jname+'mb', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                              deps=jobidqc, stdout=None, email_addresses=my_email)            

        # Mark duplicates in the merged Bam file\n
        command, merged_dedup_bam = mypicard.markDuplicates(sample_recal_merged_bam, MEM_PER_CORE, path_to_picard)
        
        jobidmmd = jh.qsub(jname+'mmd', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                              deps=jobimd, stdout=None, email_addresses=my_email)            
        
        # Finally sort the merged file usin picard
        command, sorted_mdedup_bam = mypicard.sortSam(merged_dedup_bam,MEM_PER_CORE, path_to_picard)
        jobidps = jh.qsub(jname+'ps', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                              deps=jobidmmd, stdout=None, email_addresses=my_email)
        
        # Index the merged sorted file for use with gatk
        command, brs_indexed = mypicard.buildIndex(sorted_mdedup_bam, MEM_PER_CORE, path_to_picard)
        jobidsi = jh.qsub(jname+'si', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                              deps=jobidps, stdout=None, email_addresses=my_email)



        merged_sample_files_dict[sp]=sorted_mdedup_bam
        
    
    
    return  jobidsi, merged_sample_files_dict



def fast_sample_realignment(sample_lanes_dict, gatk_run_dict):
    '''
    for each lane.bam
        realigned.bam <- realign(lane.bam) [at only known sites]
        dedup.bam <- MarkDuplicate(realigned.bam)
        recal.bam <- recal(dedup.bam)
    
    for each sample
        recals.bam <- merged lane-level recal.bam's for sample
        dedup.bam <- MarkDuplicates(recals.bam)
        realigned.bam <- realign(dedup.bam) [with known sites if possible]
    '''


def sample_level_processing(sample_lanes_dict, gatk_run_dict):
    '''
    for each sample
        lanes.bam <- merged lane.bam's for sample
        dedup.bam <- MarkDuplicates(lanes.bam)
        realigned.bam <- realign(dedup.bam) [with known sites if possible]
        recal.bam <- recal(realigned.bam)
    '''




'''
def realignment_job(job_file, config_file, num_processors, runfunc, deps=None):
    #TODO: Remember to leave three options for the realignment.
       
    config = ExomePipelineConfig()
    config.from_xml(config_file)
    job  = ExomeJobConfig()
    job.from_cfg(job_file, config.output_dir)
    
    if not os.path.exists(job.output_dir):
        logging.error("%s: output directory %s does not exist" % (job.name, job.output_dir))
        return JOB_ERROR
    # create alignment directory
    if not os.path.exists(job.align_dir):
        logging.info("%s: created alignment directory %s" % (job.name, job.align_dir))
        os.makedirs(job.align_dir)
        
    # run job
    #realignment_method = 
    # run job
    job_id, bam_file = main_bwa_alignment(job, config, num_processors, runfunc, deps)
    return job_id, bam_file
    
    logging.basicConfig(level=logging.DEBUG,
                        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    parser = argparse.ArgumentParser()
    parser.add_argument("--local", dest="local", action="store_true", default=False)
    parser.add_argument("--cluster", dest="cluster", action="store_true", default=False)
    parser.add_argument("-p", "--processes", type=int, dest="num_processors", default=1)
    parser.add_argument("job_file")
    parser.add_argument("config_file")
    options = parser.parse_args()
    if not (options.local ^ options.cluster):
        parser.error("Must set either --local or --cluster to run job")
    if options.local:
        jobrunfunc = run_local
        runmode = "--cluster"
    elif options.cluster:
        jobrunfunc = qsub
        runmode = "--local"
        
    run_job(options.job_file, options.config_file, options.num_processors, jobrunfunc, runmode)
    
    fast_lane_level_processing(options.job_file, options.config_file, sample_lanes_dict, gatk_run_dict)
'''
    
if __name__ == '__main__':
        
    optionparser = OptionParser("usage: %prog [options] ")
    
    
    optionparser.add_option("-r", "--configrun", dest="configrun",
                            help="file with run configuration")
    optionparser.add_option("-e", "--configana", dest="--configana",
                            help="file with experiment configuration")
    (options, args) = optionparser.parse_args()

    config = ExomePipelineConfig()
    config.from_xml(options.configrun)
    
    
    '''
    config_file = sys.argv[1]
    config = ExomePipelineConfig()
    config.from_xml(config_file)
    
    print config.gatk_snps_fdr_threshold
    print config.recal_anal_dir
    print config.target_exons

    '''
    
    mydict = {'aM18': ['lane1.cfg', 'lane2.cfg']}
    samplejobs = {}
    
    
    fast_lane_level_processing(sample_lanes_dict, gatk_run_dict)
    
    
    '''
    for sample_name, lane_files in mydict.iteritems():
        jobs = []
        for job_file in lane_files:
            job  = ExomeJobConfig()
            job.from_cfg(job_file, config.output_dir)

            jobs.append(job)
            
            print job
            
        samplejobs[sample_name] = jobs
    
    for sample_name, sample_lane_jobs in samplejobs.iteritems():
        for job in jobs:
            print sample_name, job.name, job.align_bam_file
            
    
    fast_lane_level_processing(sample_lanes_dict, gatk_run_dict)
    
    aM18 mctp_615YDAAXX_3 /nobackup/med-mctp/projects/exome/mctp_615YDAAXX_3/align/aligned_reads.bam
    aM18 mctp_615YDAAXX_4 /nobackup/med-mctp/projects/exome/mctp_615YDAAXX_4/align/aligned_reads.bam
    
    sys.exit(0)
    
    sample_lanes_dict={'aM18':['/nobackup/med-mctp/oabalbin/test/s_3_12_sequence.txt.psorted.bam',
                               '/nobackup/med-mctp/oabalbin/test/s_4_12_sequence.txt.psorted.bam' ]}
    
    sample_lanes_dict2={'aM18':['/nobackup/med-mctp/oabalbin/test/s_4_12_sequence.txt.psorted.realigned.bam',
                               '/nobackup/med-mctp/oabalbin/test/s_3_12_sequence.txt.psorted.realigned.bam' ]}

    
    gatk_run_dict = {'path_to_gatk':'/nobackup/med-mctp/sw/bioinfo/gatk/GenomeAnalysisTK-1.0.4705/',
                     'path_to_picard':'/nobackup/med-mctp/sw/bioinfo/picard/picard-tools-1.35/', 
                     'path_to_sam':'/nobackup/med-mctp/sw/bioinfo/samtools/samtools-0.1.10/',
                     'resources_folder':'/nobackup/med-mctp/sw/bioinfo/gatk/GenomeAnalysisTK-1.0.4705/resources/', 
                     'rscipt_path':'/home/software/rhel5/R/2.10.1-gcc/bin/Rscript',
                     'use_mem':8000, 'num_cores':1,
                     'ref_genome':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/hg19.fa', 
                     'snpdb_file':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/dbsnp132_00-All_processed.vcf',
                     'indeldb_file':'/nobackup/med-mctp/sw/alignment_indexes/gatk/hg19/dbsnp132_00-All_processed.vcf',
                     'path_to_intervals':'/nobackup/med-mctp/oabalbin/test/',
                     'recal_analysis_outputdir':'/nobackup/med-mctp/oabalbin/test/recal_analysis/',
                     'temp_dir':'/nobackup/med-mctp/oabalbin/test/temp/',
                     'qsubfile':'/nobackup/med-mctp/oabalbin/test/',
                     'out_dir':'/nobackup/med-mctp/oabalbin/test/'
                     }
    
    '''
    
    
    
    
    
    
