'''
Created on Nov 18, 2009

@author: mkiyer
'''
from optparse import OptionParser
import subprocess
import logging
import os
import sys
import multiprocessing
import time

from phdb import PileupDB
import veggie.alignment.sam as samtools
from veggie.db.sample.samplegroup import parse_samplegroups_xml

def make_phdb_worker(analysis_info):
    analysis_id, samfile, tmp_path = analysis_info
    return make_phdb(analysis_id, samfile, tmp_path)

def make_phdb(analysis_id, insamfile, tmp_path, force=False):
    import veggie.genome.chrom as genome    
    # setup logging
    logger = logging.getLogger('.'.join([__name__, analysis_id]))
    # insert into phd database file
    logger.debug("%s: %s: inserting into database" % (os.getpid(), analysis_id))
    phdfile = os.path.join(tmp_path, analysis_id + '.phd')
    phdb = PileupDB(phdfile, 'w')
    phdb.insert_sam(insamfile, group_name=analysis_id, references=genome.HG18.chrom_sizes.keys())
    phdb.close()
    return phdfile

def merge_phdb_file(inphd, outphd):
    '''
    copy phd file using the 'ptrepack' utility.  used to merge many
    phd files into one
    '''
    complib = PileupDB.default_complib
    complevel = PileupDB.default_complevel
    retcode = subprocess.call(['ptrepack', '-v', 
                               '--complib', complib, '--complevel', str(complevel), 
                               inphd + ':/', outphd + ':/'])
    return retcode

def get_sample_alignments(sample_xmlfile, rnaseq_path, sdbi_path,
                          samfile_name="aligned_reads.bam"):
    """
    returns one list of parameters per job
    """
    logger = logging.getLogger(__name__)    
    
    import veggie.db.sample as sdb
    sdbi = sdb.get_sampledb_instance(sdbi_path)    
    samfiles = []
    # parse the sample xml file and retrieve all the desired samples
    for sample_group in parse_samplegroups_xml(sample_xmlfile):
        for sample in sample_group.samples:
            # get the libraries associated with this sample
            for library in sdbi.get_libraries_by_sample_name(sample, best=True):
                samfile = os.path.join(rnaseq_path, library.id, samfile_name)
                if not os.path.exists(samfile):
                    logger.warning("SAM file %s does not exist, skipping this library" % samfile)
                    continue
                samfiles.append((library.id, samfile))
    return samfiles


if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)

    optionparser = OptionParser("usage: %prog [options] [-s <samples.xml>] <rnaseq_path> <db.phd>")
    optionparser.add_option("-s", "--samples", dest="samples",
                            help="process the samples defined in the sample XML file")
    optionparser.add_option("-p", "--processors", dest="processors", type="int", default=2,
                            help="divide the job into n processes")
    optionparser.add_option("--tmp-path", dest="tmp_path", default=None,
                            help="path to store intermediate results")
    optionparser.add_option("--sampledb", dest="sample_db_file")
    (options, args) = optionparser.parse_args()
    if options.samples is None:
        optionparser.error("No samples specified")
    if options.processors < 1:
        optionparser.error("Must specify at least 1 processor with the -p flag")
    elif options.processors > multiprocessing.cpu_count():
        logging.warning("Requested %s processors, but only %d available\n" % (options.processors,
                                                                              multiprocessing.cpu_count()))
        options.processors = multiprocessing.cpu_count()

    # positional arguments
    rnaseq_path = args[0]
    masterdb = args[1]
    if options.tmp_path is None:
        tmp_path = os.path.abspath(os.path.dirname(masterdb))
    else:
        tmp_path = options.tmp_path
    # log the command-line arguments
    logging.info('rnaseq_path: %s', rnaseq_path)
    logging.info('db: %s', masterdb)
    logging.info('tmp path: %s', tmp_path)
    logging.info('samples: %s', options.samples)
    # read the samples file
    samfiles = get_sample_alignments(options.samples, rnaseq_path, options.sample_db_file)
    logging.debug('alignment files to add to database:')
    for saminfo in samfiles:
        logging.debug('\t%s', ' '.join(saminfo))
    # make directory for results
    if not os.path.exists(tmp_path):
        logging.debug('Creating tmp file path %s' % tmp_path)
        os.makedirs(tmp_path)    
    # create database
    if not os.path.exists(masterdb):
        phdb = PileupDB(masterdb, 'w')
        phdb.close()
    # start worker processes on the partitioned analysis groups
    tasks = [(x[0], x[1], tmp_path) for x in samfiles]
    pool = multiprocessing.Pool(options.processors)
    imap_unordered_it = pool.imap_unordered(make_phdb_worker, tasks)
    for inphd in imap_unordered_it:
        merge_phdb_file(inphd, masterdb)
    pool.close()
    pool.join()

