'''
Created on Nov 30, 2010

@author: oabalbin
'''

import logging
import os
import sys
import subprocess
#import argparse
import tempfile
import lxml.etree as etree

import exome.gatk_cluster.gatk_realignment_cluster as gra
import exome.gatk_cluster.picard_commands_cluster as mypicard
import exome.gatk_cluster.samtools_commands_cluster as mysam
import exome.gatk_cluster.base_quality_calibration_job as gqc
#import exome.gatk_cluster.cluster_jobs_header as jh

from collections import defaultdict, deque
from optparse import OptionParser

from exome.jobs.base import JOB_SUCCESS, JOB_ERROR,up_to_date,job_done,up_to_date_job
from exome.jobs.job_runner import qsub_cac, qsub_loc, run_local
from exome.jobs.config import ExomePipelineConfig, ExomeAnalysisConfig

# define base paths to gatk realignment job scripts
_jobs_dir = os.path.abspath(os.path.dirname(__file__))


# Global Variables
NODE_MEM=45000.0
NODE_CORES=12
SINGLE_CORE=1
MEM_PER_CORE= int(float(NODE_MEM) / NODE_CORES)
NODE_CORES2=NODE_CORES/2
#EXTRA_MEM2=MEM_PER_CORE*NODE_CORES2
EXTRA_MEM2=16000
# wt=walltime
WT_SHORT= "24:00:00"
WT_LONG=wt_realign="100:00:00"


def check_create_dir(root_path, dir_name=None):
    '''
        if not os.path.isdir(full_path_name):
            os.mkdir( full_path_name )
    '''
    if not os.path.isdir(root_path):
            os.mkdir( root_path )
    if dir_name is not None:
        subfolder=os.path.join(root_path,dir_name)
        if not os.path.isdir(subfolder):
                os.mkdir( subfolder )
    else:             
        subfolder=root_path

    return subfolder

def remove_temp_files_lane(thlane):
    '''
    This are temporary and redundant files
    at the lane level
    realigned_bam_file, matefixed_bam, thlane.recal_bam_file_name 
    (the index recalibrate file is the one preserved)
    '''
    temp_files =[thlane.realigned_bam_file, thlane.matefixed_bam_file, thlane.recal_bam_file_name]
    rm_files=['rm']
    for thfile in temp_files:
        rm_files.append(thfile)
    
    return rm_files


def remove_temp_files_sample(thsp):
    '''
    This are temporary and redundant files
    at the lane level
    realigned_bam_file, matefixed_bam, thlane.recal_bam_file_name 
    (the index recalibrate file is the one preserved)
    '''
    temp_files =[thsp.merged_recalibrated_bam, thsp.merged_markdup_file]
    rm_files=['rm']
    for thfile in temp_files:
        rm_files.append(thfile)
    
    return rm_files


def create_symbolic_link(ifile, sfile):
    '''
    Creates a command for creating a symbolic link to the
    ifile and named as sfile
    '''
    args=['ln','-s',ifile,sfile]
    command = ",".join(args).replace(',',' ')

    return command



def fast_lane_level_processing(analysis, configrun, num_processors, jobrunfunc, depends, config_file=None, First=True):
    '''
    Input: a sample lane dictionary with key:sample, values: lanes that belong to that sample
    By default this realignment method uses only known indel Sites to realign. 
    The known indel sites are provided in the indeldb_file
    gatk_run_dict is a dictionary with the parameters to run the gatk realignment walker
        indeldb_file
        
    '''
    
    #### Header Run parameters
    
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    genomes = configrun.genomes['human']
    ref_genome, snpdb_file, indeldb_file = genomes.gatk_ref_genome, genomes.snpdb, \
                                         genomes.indeldb        
    recal_analysis_outputdir=configrun.gatk_recal_anal_dir
    temp_dir =  configrun.gatk_temp_dir
    my_email=configrun.email_addresses
    
    # cluster parameters        
    # Lists
    new_sample_lanes_dict=defaultdict(deque)
    merged_sample_files_dict=defaultdict()
    if depends is not None:
        prev_deps=depends[:]
    else:
        prev_deps=[]
        
    #First=True#False
    #jobidks=None
    real_deps=[]
    for sp in analysis.samples:
            # Cluster parameters
            jname='rra'
            sp_name=sp.name
            check_create_dir(sp.output_dir)
            # Check if create known realignment sites is needed
            if configrun.gatk_known_realignment_sites is not None:
                intervals_to_realign = configrun.gatk_known_realignment_sites

            else:
                if First:
                    intervals_to_realign = analysis.known_realignment_sites
                    # Create realignment intervals at known indel positions\n
                    # gatk does not support multi-core here yet
                    this_analysis_dir, dir_name = analysis.analysis_dir,analysis.name
                    check_create_dir(this_analysis_dir, dir_name)
                    
                    command = gra.intvTorealign_only_knownSites(ref_genome, extra_mem, SINGLE_CORE, 
                                                                         intervals_to_realign, path_to_gatk, 
                                                                         indeldb_file)
                    
                    jobidks = jobrunfunc(jname+'_createIntervals', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=None, stdout=None, email_addresses=my_email)            
                    First=False
                    prev_deps.extend([jobidks])
                
            
            # Start Realignment
            for i,thlane in enumerate(sp.lanes):
                
                jname=jname+'_'+thlane.lib_id #rra_SI_#
                check_create_dir(thlane.gatk_dir)
                #output the lane information to a separate directory
                xmlstring = thlane.to_xml()
                lane_file = os.path.join(thlane.gatk_dir, thlane.name + ".xml")
                
                f = open(lane_file, "w")
                print >>f, xmlstring
                f.close()

                # Realign Bam files at indel sites. Usually using indel knownSites db
                # gatk does not support multi-core here yet
                # This step could include full realignment but it would time intensive
                
                # Package 1 starts:  
                indexed_bam_file = thlane.align_bam_file
                realigned_bam_file = thlane.realigned_bam_file
                dedup_bam = thlane.markdup_bam_file
                recal_bam_file=thlane.recal_bam_indexed_name
                
                if up_to_date(dedup_bam,dedup_bam) and up_to_date_job(dedup_bam):
                    logging.info("[SKIPPED] GATK Realignment of BAM file. %s file is up to date" % (dedup_bam))
                    jobidmd=[]
                    jobidmd.extend(prev_deps)
                else:

                    logging.info("Starting GATK Realignment of BAM file %s" % (realigned_bam_file))
                    
                    ### Run Script: realinga_around_indels
                    
                    py_script = os.path.join(_jobs_dir, "realing_around_indels.py")
                    args = [sys.executable, py_script, 
                            "--config_file", config_file,
                            "--jname",jname,
                            "--lane_file", lane_file,
                            "--lane_root_dir",configrun.lane_dir,
                            "--intervals",intervals_to_realign,
                            "--ncores",str(SINGLE_CORE)]
                     
                    logging.info("Realigning Around Known indels for sample %s" % (jname))
                    
                    print jname, args, SINGLE_CORE, thlane.gatk_dir
                    jobidmd = jobrunfunc(jname, args, SINGLE_CORE, cwd=thlane.gatk_dir, 
                                     walltime=WT_LONG,
                                     pmem=extra_mem*2,
                                     stdout="realignment_job.log", 
                                     deps=prev_deps, 
                                     email_addresses=my_email)
                    
                    
                    dedup_bam = thlane.markdup_bam_file
                    
                    
                    
                    '''
                    command = gra.realign_bam_only_knownSites(ref_genome, indexed_bam_file, realigned_bam_file,
                                                                   intervals_to_realign, indeldb_file, 
                                                                   extra_mem*2, SINGLE_CORE, temp_dir,path_to_gatk)
                    
                    
                    jobidra = jobrunfunc(jname+'ra', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem*2, 
                                      deps=prev_deps, stdout=None, email_addresses=my_email) #depends.extends(jobidks)            
                    
                    
                    # Fix Mate information in the Realigned Bam file. It syncronizes the
                    # information of mate pairs
                    matefixed_bam = thlane.matefixed_bam_file
                    command = mypicard.fixmateInformation(realigned_bam_file, matefixed_bam, 
                                                                         extra_mem, path_to_picard, temp_dir)
                    
                    jobidfm = jobrunfunc(jname+'fm', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=jobidra, stdout=None, email_addresses=my_email)
                    
                    
                    # Mark duplicates in the Realigned Bam file. It only mark the duplicates
                    dedup_bam = thlane.markdup_bam_file
                    command = mypicard.markDuplicates(matefixed_bam, dedup_bam, extra_mem, path_to_picard)
                    jobidmd = jobrunfunc(jname+'md', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                      deps=jobidfm, stdout=None, email_addresses=my_email)            
                                    
                    # Note: 3/2/11: I can package the last 3 commands into one script that runs in 
                    #               a single core once it was provided 
                    # Package 1 ends:
                    '''
                
                if up_to_date(recal_bam_file,recal_bam_file) and up_to_date_job(recal_bam_file):
                    logging.info("[SKIPPED] GATK Base quality recalibration BAM file %s. File is up to date" % (recal_bam_file))
                    jobidqc=[]
                    jobidqc.extend(jobidmd)
                else:    
                    logging.info("Starting GATK Base quality recalibration of BAM file %s" % (dedup_bam))
                    # GET MORE CORES
                    jname='qrecal'+'_'+thlane.lib_id
                    py_script = os.path.join(_jobs_dir, "base_quality_recalibration.py")
                    args = [sys.executable, py_script, 
                            "--config_file", config_file,
                            "--jname",jname,
                            "--lane_file", lane_file,
                            "--lane_root_dir",configrun.lane_dir,
                            "--bamfile",dedup_bam,
                            "--ncores",str(NODE_CORES2),
                            "--core_mem",str(EXTRA_MEM2)]
                     
                    
                    print jname, args, NODE_CORES2, thlane.gatk_dir
                    jobidqc = jobrunfunc(jname, args, NODE_CORES2, cwd=thlane.gatk_dir, 
                                     walltime=WT_LONG,
                                     pmem=MEM_PER_CORE,
                                     stdout="baseq_recalibration_job.log", 
                                     deps=jobidmd, 
                                     email_addresses=my_email)

                
                ##########            
                # Create a new pbs script for running this job
                # This function launch several qsub jobs for doing the quality recalibration
                # of the bases.
                
                '''                
                jobidqc, recal_bam_file = gqc.main_baseQ_recalibration(ref_genome, snpdb_file, 
                                                                   dedup_bam, thlane, NODE_MEM, NODE_CORES,
                                                                   configrun, thlane.recal_anal_dir, 
                                                                   jobidmd, jname+'qc', my_email, jobrunfunc)
                
                '''
                # new dictionary with the realigned and 
                # recalibrated bam files paths for each lane that belongs to sample
                new_sample_lanes_dict[sp_name].append(recal_bam_file)
                
                # Remove temporary files at the lane level.    
                jname='rm_temp_'+thlane.lib_id
                command = remove_temp_files_lane(thlane)
                jobidrm = jobrunfunc(jname, command, SINGLE_CORE, cwd=thlane.gatk_dir, walltime=WT_SHORT, pmem=MEM_PER_CORE, 
                                     deps=jobidqc, stdout='remove_tmpfiles.log', email_addresses=my_email)            

                jname='rra'
            
            # This should become another independent script to merge lanes at the sample level
            # If a particular sample has only one lane, skip all sample merging process.
            if len(sp.lanes) == 1:
                print "Skiping the realigining sample steps"
                sorted_mdedup_bam = sp.sorted_mmarkdup_bam
                cmd_bam = create_symbolic_link(recal_bam_file, sorted_mdedup_bam)                
                # Create symbolic links to the index files
                recal_bai_file = recal_bam_file.replace('.bam','.bai')
                sorted_mdedup_bai = sorted_mdedup_bam.replace('.bam', '.bai')
                cmd_bai = create_symbolic_link(recal_bai_file, sorted_mdedup_bai)
                cmd = ",".join([cmd_bam,cmd_bai]).replace(',','\n')
                jobidln = jobrunfunc(jname+'_ln', cmd, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=MEM_PER_CORE, 
                                     deps=jobidqc, stdout=None, email_addresses=my_email)                            
                
                real_deps.extend([jobidln])
                
                continue
            
                    
            
                        
            jname='rra'+'_'+sp_name
            recalibrated_lanes = new_sample_lanes_dict[sp_name]
            #out_dir outdir for the analysis
            sample_recal_merged_bam = sp.merged_recalibrated_bam        
            
            # Package 2 Starts:
            # Merge realigned, recalibrated lanes that belong to sample sp
            #command = mysam.merge_bam_files(list(recalibrated_lanes), sample_recal_merged_bam, path_to_sam)        
            command = mypicard.picard_mergebam(list(recalibrated_lanes), sample_recal_merged_bam, MEM_PER_CORE, path_to_picard)
            
            jobimd = jobrunfunc(jname+'mergebam', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobidqc, stdout=None, email_addresses=my_email)            
    
            # Mark duplicates in the merged Bam file\n
            merged_dedup_bam=sp.merged_markdup_file
            command = mypicard.markDuplicates(sample_recal_merged_bam, merged_dedup_bam, 
                                                                MEM_PER_CORE, path_to_picard)
            
            jobidmmd = jobrunfunc(jname+'mdup', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobimd, stdout=None, email_addresses=my_email)            
            
            # Finally sort and index the merged file using picard
            sorted_mdedup_bam = sp.sorted_mmarkdup_bam
            command  = mypicard.sortIndexSam(merged_dedup_bam, sorted_mdedup_bam, MEM_PER_CORE, path_to_picard)
            jobidps = jobrunfunc(jname+'ps', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobidmmd, stdout=None, email_addresses=my_email)
            
            merged_sample_files_dict[sp_name]=sorted_mdedup_bam
            real_deps.extend([jobidps])
            
            # Remove temporary files at the sample level
            command = remove_temp_files_lane(thlane)
            jobidrms = jobrunfunc(jname+'_rm_temp', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=MEM_PER_CORE, 
                                 deps=jobidps, stdout=None, email_addresses=my_email)            

            # Package 2 Ends:
            
            
    return  real_deps


def fast_lane_level_processing2(analysis, configrun, num_processors, jobrunfunc):
    '''
    Input: a sample lane dictionary with key:sample, values: lanes that belong to that sample
    By default this realignment method uses only known indel Sites to realign. 
    The known indel sites are provided in the indeldb_file
    gatk_run_dict is a dictionary with the parameters to run the gatk realignment walker
        indeldb_file
        
    '''
    
    #### Header Run parameters
    
    extra_mem, num_cores = configrun.gatk_use_mem, configrun.gatk_num_cores
    path_to_gatk, path_to_picard, path_to_sam = configrun.gatk_path, configrun.picard_path, \
                                                configrun.samtools_path
    genomes = configrun.genomes['human']
    ref_genome, snpdb_file, indeldb_file = genomes.gatk_ref_genome, genomes.snpdb, \
                                         genomes.indeldb        
    recal_analysis_outputdir=configrun.gatk_recal_anal_dir
    temp_dir =  configrun.gatk_temp_dir
    my_email=configrun.email_addresses
    
    # cluster parameters        
    # Lists
    new_sample_lanes_dict=defaultdict(deque)
    merged_sample_files_dict=defaultdict()
    
    First=False#True
    jobidks=None
    
    #Call a first pass of indels in order to create a cohort indels set and 
    # use the set in known sites
    
    for sp in analysis.samples:
            # Cluster parameters
            jname='rra'
            sp_name=sp.name
            check_create_dir(sp.output_dir)
            
            intervals_to_realign = analysis.known_realignment_sites
            
            if First:
                # Create realignment intervals at known indel positions\n
                # gatk does not support multi-core here yet
                this_analysis_dir, dir_name = analysis.analysis_dir,analysis.name
                check_create_dir(this_analysis_dir, dir_name)
                
                command = gra.intvTorealign_only_knownSites(ref_genome, extra_mem, SINGLE_CORE, 
                                                                     intervals_to_realign, path_to_gatk, 
                                                                     indeldb_file)
                
                jobidks = jobrunfunc(jname+'ks', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=None, stdout=None, email_addresses=my_email)            
                First=False
                
                
            for i,thlane in enumerate(sp.lanes):
                
                indexed_bam_file = thlane.align_bam_file
                lname = thlane.name.split('_')
                jname=jname+lname[1][:5]+'_'+lname[2] #str(i)
                
                check_create_dir(thlane.gatk_dir)
                # Realign Bam files at indel sites. Usually using indel knownSites db
                # gatk does not support multi-core here yet
                # This step could include full realignment but it would time intensive
                 
                realigned_bam_file = thlane.realigned_bam_file

                command = gra.realign_bam_only_knownSites(ref_genome, indexed_bam_file, realigned_bam_file,
                                                               intervals_to_realign, indeldb_file, 
                                                               extra_mem*2, SINGLE_CORE, temp_dir,path_to_gatk)
                
                
                jobidra = jobrunfunc(jname+'ra', command, SINGLE_CORE, cwd=None, walltime=WT_LONG, pmem=extra_mem*2, 
                                  deps=jobidks, stdout=None, email_addresses=my_email)            
                 
                
                # Fix Mate information in the Realigned Bam file. It syncronizes the
                # information of mate pairs
                matefixed_bam = thlane.matefixed_bam_file
                command = mypicard.fixmateInformation(realigned_bam_file, matefixed_bam, 
                                                                     extra_mem, path_to_picard, temp_dir)
                
                jobidfm = jobrunfunc(jname+'fm', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidra, stdout=None, email_addresses=my_email)            
               
                # Mark duplicates in the Realigned Bam file. It only mark the duplicates
                dedup_bam = thlane.markdup_bam_file
                command = mypicard.markDuplicates(matefixed_bam, dedup_bam, extra_mem, path_to_picard)
                jobidmd = jobrunfunc(jname+'md', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=extra_mem, 
                                  deps=jobidfm, stdout=None, email_addresses=my_email)            
                                
                
                ##########            
                # Create a new pbs script for running this job
                # This function launch several qsub jobs for doing the quality recalibration
                # of the bases.
                jobidqc, recal_bam_file = gqc.main_baseQ_recalibration(ref_genome, snpdb_file, 
                                                                   dedup_bam, thlane, NODE_MEM, NODE_CORES,
                                                                   configrun, thlane.recal_anal_dir, 
                                                                   jobidmd, jname+'qc', my_email, jobrunfunc)
                
                # new dictionary with the realigned and 
                # recalibrated bam files paths for each lane that belongs to sample
                new_sample_lanes_dict[sp_name].append(recal_bam_file)
                
                jname='rra'
            
            
            jname='rra'+sp_name
            recalibrated_lanes = new_sample_lanes_dict[sp_name]
            #out_dir outdir for the analysis
            sample_recal_merged_bam = sp.merged_recalibrated_bam        
            # Merge realigned, recalibrated lanes that belong to sample sp
            #command = mysam.merge_bam_files(list(recalibrated_lanes), sample_recal_merged_bam, path_to_sam)        
            command = mypicard.picard_mergebam(list(recalibrated_lanes), sample_recal_merged_bam, MEM_PER_CORE, path_to_picard)
            
            jobimd = jobrunfunc(jname+'mb', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobidqc, stdout=None, email_addresses=my_email)            
    
            # Mark duplicates in the merged Bam file\n
            merged_dedup_bam=sp.merged_markdup_file
            command = mypicard.markDuplicates(sample_recal_merged_bam, merged_dedup_bam, 
                                                                MEM_PER_CORE, path_to_picard)
            
            jobidmmd = jobrunfunc(jname+'mmd', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobimd, stdout=None, email_addresses=my_email)            
            
            # Finally sort and index the merged file using picard
            sorted_mdedup_bam = sp.sorted_mmarkdup_bam
            command  = mypicard.sortIndexSam(merged_dedup_bam, sorted_mdedup_bam, MEM_PER_CORE, path_to_picard)
            jobidps = jobrunfunc(jname+'ps', command, SINGLE_CORE, cwd=None, walltime=WT_SHORT, pmem=None, 
                                  deps=jobidmmd, stdout=None, email_addresses=my_email)
            
            merged_sample_files_dict[sp_name]=sorted_mdedup_bam
            
            
    return  jobidps, merged_sample_files_dict



def fast_sample_realignment(sample_lanes_dict, gatk_run_dict):
    '''
    for each lane.bam
        realigned.bam <- realign(lane.bam) [at only known sites]
        dedup.bam <- MarkDuplicate(realigned.bam)
        recal.bam <- recal(dedup.bam)
    
    for each sample
        recals.bam <- merged lane-level recal.bam's for sample
        dedup.bam <- MarkDuplicates(recals.bam)
        realigned.bam <- realign(dedup.bam) [with known sites if possible]
    '''


def sample_level_processing(sample_lanes_dict, gatk_run_dict):
    '''
    for each sample
        lanes.bam <- merged lane.bam's for sample
        dedup.bam <- MarkDuplicates(lanes.bam)
        realigned.bam <- realign(dedup.bam) [with known sites if possible]
        recal.bam <- recal(realigned.bam)
    '''



'''
def run_analysis(analysis_file, config_file, num_processors, runfunc):
    #
    # Read configuration
    #
    config = ExomePipelineConfig()
    config.from_xml(config_file)
    analysis = ExomeAnalysisConfig()
    analysis.from_xml(analysis_file, config.output_dir)
    # Main loop for all samples
    for sample in analysis.samples:
        #
        # Process lanes
        #
        lane_deps = []
        for lane in sample.lanes:
            logging.info("Processing lane %s" % (lane.name))
            # write lane configuration to a separate XML file
            xmlstring = lane.to_xml()
            lane_file = os.path.join(sample.output_dir, lane.name + ".xml")
            f = open(lane_file, "w")
            print >>f, xmlstring
            f.close()
            # run lane job
            lane_deps.extend(run_lane.run_job(lane_file, config_file, num_processors, runfunc))
        #
        # Process samples  
        #
        # TODO: add sample processing code here
        pass



def realignment_job(job_file, config_file, num_processors, runfunc, deps=None):
    #TODO: Remember to leave three options for the realignment.
       
    config = ExomePipelineConfig()
    config.from_xml(config_file)
    job  = ExomeJobConfig()
    job.from_cfg(job_file, config.output_dir)
    
    if not os.path.exists(job.output_dir):
        logging.error("%s: output directory %s does not exist" % (job.name, job.output_dir))
        return JOB_ERROR
    # create alignment directory
    if not os.path.exists(job.align_dir):
        logging.info("%s: created alignment directory %s" % (job.name, job.align_dir))
        os.makedirs(job.align_dir)
        
    # run job
    #realignment_method = 
    # run job
    job_id, bam_file = main_bwa_alignment(job, config, num_processors, runfunc, deps)
    return job_id, bam_file
    
    logging.basicConfig(level=logging.DEBUG,
                        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    parser = argparse.ArgumentParser()
    parser.add_argument("--local", dest="local", action="store_true", default=False)
    parser.add_argument("--cluster", dest="cluster", action="store_true", default=False)
    parser.add_argument("-p", "--processes", type=int, dest="num_processors", default=1)
    parser.add_argument("job_file")
    parser.add_argument("config_file")
    options = parser.parse_args()
    if not (options.local ^ options.cluster):
        parser.error("Must set either --local or --cluster to run job")
    if options.local:
        jobrunfunc = run_local
        runmode = "--cluster"
    elif options.cluster:
        jobrunfunc = qsub
        runmode = "--local"
        
    run_job(options.job_file, options.config_file, options.num_processors, jobrunfunc, runmode)
    
    fast_lane_level_processing(options.job_file, options.config_file, sample_lanes_dict, gatk_run_dict)
'''
    
if __name__ == '__main__':
        
    optionparser = OptionParser("usage: %prog [options] ")
    optionparser.add_option("-r", "--config_file", dest="config_file",
                            help="file with run configuration")
    optionparser.add_option("-a", "--analysis_file", dest="analysis_file",
                            help="file with experiment configuration")  
    optionparser.add_option("--local", dest="local", action="store_true", default=False)
    optionparser.add_option("--cluster", dest="cluster", action="store_true", default=False)
    optionparser.add_option("-p", "--processes", type=int, dest="num_processors", default=1)
    optionparser.add_option("-f", dest="First", action="store_false", default=True)


    (options, args) = optionparser.parse_args()    

    config = ExomePipelineConfig()
    config.from_xml(options.config_file)
    analysis = ExomeAnalysisConfig()
    analysis.from_xml(options.analysis_file, config.output_dir)

    
    if not (options.local ^ options.cluster):
        optionparser.error("Must set either --local or --cluster to run job")
    if options.local:
        jobrunfunc = run_local
    elif options.cluster:
        jobrunfunc = qsub_cac    
        
    # check for the dependencies and that the files that it need they are there.
    depends=None
    #depends=['5217086.nyx.engin.umich.edu']
    fast_lane_level_processing(analysis, config, options.num_processors, jobrunfunc, depends,options.config_file, options.First)
    
    