'''
Created on Nov 10, 2009

@author: mkiyer
'''

from veggie.sequence.io import get_seq_length
from veggie.app.alignment.novoalign import Novoalign

from sqlalchemy import create_engine, MetaData, Table, Column, \
    String, Integer, Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_

import operator
import glob
import datetime
import os
import sys
import logging
import collections
import re

# sequence file formats where '%d' will be substituted with the lane id 
# and '?' is a wildcard character to match any single character
_paired_seq_glob_str = 's_%d_?_sequence.txt'
_single_seq_glob_str = 's_%d_sequence.txt'

# dict that tells us whether an analysis should have a
# paired end naming scheme or a single read naming scheme
_is_paired_end_filename = {'default': False,
                           'eland': False,
                           'eland_rna': False,
                           'eland_extended': False,
                           'eland_pair': True,
                           'eland_tag': False}

# map the software versions to the appropriate bowtie
# quality scores parameter to use
_bowtie_quals = {'/Pipeline/GAPipeline-0.3.0.1/Goat/../Gerald': 'solexa-quals', 
                 '/Pipeline/SolexaPipeline-0.2.2.6/Goat/../Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.1rc1/bin': 'solexa-quals',
                 '/Pipeline/SolexaPipeline-0.2.2.6/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0/Goat/../Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0.1/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.4.0/bin': 'solexa1.3-quals',
                 '/Pipeline/GAPipeline-1.1rc1p4/bin': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.3.2/bin': 'solexa1.3-quals'
                 }

# some of the data paths in sample db are wrong and need to be fixed
# for now, get around this by hacking the correct paths in the dictionary
# below and remove this when the database is corrected
hacked_data_paths = {
    '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX_B/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root': '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root',
}
hacked_archives = ['/data2']


def find_pe_sequence_files(flowcell, lane, sequence_path,
                           prefix_fmt="%s_%s",
                           seq_suffix="best_sequence.txt",
                           bowtie_quals_suffix='best_bowtiequals.txt'):
    analysis_id = prefix_fmt % (flowcell, lane)
    # build a string to use to search for sequence files    
    mate1_search_string = ''.join([analysis_id, '*_1_', seq_suffix])
    mate2_search_string = ''.join([analysis_id, '*_2_', seq_suffix])
    # glob to get the sequence file matches
    mate1_fastq_files = glob.glob(os.path.join(sequence_path, mate1_search_string))
    mate2_fastq_files = glob.glob(os.path.join(sequence_path, mate2_search_string))
    if len(mate1_fastq_files) != 1 and len(mate2_fastq_files) != 1:
        logging.error('Error finding paired-end sequences for flowcell %s lane %s' % (flowcell, lane))
        return None
    fastq_files = [mate1_fastq_files[0], mate2_fastq_files[0]]
    # regexp for getting beginning of filename to use as prefix
    # in search for bowtie quals file
    prefix_re = re.compile(r'(.*)' + seq_suffix)    
    quals_params = set([])
    read_lengths = set([])    
    for fastq_file in fastq_files:
        m = prefix_re.match(os.path.basename(fastq_file))
        if not m:
            logging.critical("regexp match failed on file %s" % fastq_file)
            continue
        # get the quals param from the bowtie file
        fastq_prefix = m.group(1)
        quals_file = os.path.join(sequence_path, fastq_prefix + bowtie_quals_suffix)            
        if not os.path.exists(quals_file):
            logging.error("quality score file %s not found... skipping %s" %
                          (quals_file, fastq_file))
            return
        quals_fhd = open(quals_file)
        quals_params.add(quals_fhd.readline().strip())
        quals_fhd.close()        
        # get the read length
        read_lengths.add(get_seq_length(fastq_file))

    # check to see that read lengths and quality scoring system is the same
    # for all the fastq files
    if len(quals_params) > 1:
        logging.error('quality scores not consistent across samples: %s' % quals_params)
        return None
    if len(read_lengths) > 1:
        logging.error('read lengths not consistent across samples: %s' % read_lengths)
        return None        
    # return tuple with sequence files, quals param, and read length    
    return fastq_files, quals_params.pop(), read_lengths.pop()

def find_fastq_files(data_path, file_pattern):
    # correct data path if it does not start with '/' (or equiv for other OS)
    if not data_path.startswith(os.path.sep):
        data_path = os.path.join(os.path.sep, data_path)

    # some of the data paths in sample db are flat out wrong.  a dict
    # of corrections is maintained and used as a hack to get to the 
    # correct data paths in the interim while sample db is fixed
    if data_path in hacked_data_paths:
        logging.warning("Using hacked data path %s instead of %s" % (hacked_data_paths[data_path], data_path))
        data_path = hacked_data_paths[data_path]

    # search the data path for the sequence files
    fastq_files = glob.glob(os.path.join(data_path, file_pattern))

    # if the file search came up empty
    if len(fastq_files) == 0:
        # some sequences are located in a /Temp dir
        new_data_path = os.path.join(data_path, 'Temp')
        fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))

    # if the file search still came up empty
    if len(fastq_files) == 0:
        # try searching ALL the archive directories for the path
        archives = glob.glob(os.path.join(os.path.sep, "archive*"))
        # add archives that are not part of the typical name 
        archives.extend(hacked_archives)
        path_wo_archive = os.path.sep.join(data_path.split(os.path.sep)[2:])
        
        for archive in archives:
            # try without /Temp
            new_data_path = os.path.join(archive, path_wo_archive)
            fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))
            if len(fastq_files) != 0:
                logging.error("Expected data_path %s, but found at %s" %
                              (data_path, new_data_path))
                break
            # add /Temp
            new_data_path = os.path.join(new_data_path, 'Temp')
            fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))
            if len(fastq_files) != 0:
                logging.error("Expected data_path %s, but found at %s" %
                              (data_path, new_data_path))
                break
    return fastq_files


class Library(object):
    
    read_types = set(['single_read', 'paired_end', 'paired_end_ditags'])
    
    def __init__(self):
        self.id = None    
        self.fastq_files = None
        self.fastq_format = None
        self.read_type = None
        self.read_length_variable = False
        self.read_length = 0        
        self.cluster_raw = 0
        self.cluster_pf = 0        
        self.dna_conc = None
        self.rin = None
        self.protocol = None
        self.platform_name = None
        self.platform_version = None
        self.run_date = None

    def __cmp__(self, other):
        '''
        currently used to sort libraries by quality
        '''
        assert self.id == other.id
        # first sort by clusters that pass filter
        if (self.cluster_pf != 0) and (other.cluster_pf != 0):
            cluster_pf = self.cluster_pf - other.cluster_pf
            if cluster_pf != 0:
                return cluster_pf
        if (self.cluster_raw != 0) and (other.cluster_raw != 0):
            cluster_raw = self.cluster_raw - other.cluster_raw
            if cluster_raw != 0:
                return cluster_raw
        if (self.read_length != 0) and (other.read_length != 0):
            read_length = self.read_length - other.read_length
            if read_length != 0:
                return read_length

    def __str__(self):
        return ('<Library (id=%s fastq_files=%s fastq_format=%s read_type=%s'
                ' read_length_variable=%s read_length=%d cluster_raw=%d'
                ' cluster_pf=%d)>' % 
                (self.id, self.fastq_files, self.fastq_format, 
                 self.read_type, self.read_length_variable, self.read_length,
                 self.cluster_raw, self.cluster_pf)) 

class ChIPSeqLibrary(Library):
    def __init__(self):
        super(ChIPSeqLibrary, self).__init__()
        self.chip_ab_id = None
        
class ChIPAntibody(object):
    def __init__(self):
        self.id = None
        self.vendor = None
        self.product_no = None
        self.monoclonal = None

class SampleDB(object):
    '''
    class for connecting to and retrieving data from sampleDB tables
    '''
    def __init__(self, echo=False):
        '''
        creates an interface to sampleDB
        '''
        user = 'solexa_dp_ro'
        password = 'readonly'
        host = 'pathbio-db1'
        port = 1521
        service_name = 'O9DB1'
        # oracle
        oracle_db = create_engine('oracle://' + user + ':' + password + '@' + \
                                  host + ':' + str(port) + '/' + service_name,
                                  echo=echo)        
        self.schema='solexa_dp'
        self.engine = oracle_db
        self.meta = MetaData()
        self.meta.bind = self.engine
        self.meta.reflect(bind=oracle_db, schema=self.schema)

    def _get_table(self, tablename):
        return self.meta.tables['%s.%s' % (self.schema, tablename)]
    
    def _find_fastq_from_query_results(self, res):
        # determine the file pattern of the analysis based on
        # single-read vs. paired-end
        if res.analysis not in _is_paired_end_filename:
            logging.error('analysis field not recognized in sample %s: %s' % 
                          (res.solexa_sample_id, res.analysis))
            raise KeyError(res.analysis)
        elif _is_paired_end_filename[res.analysis]:
            glob_str = _paired_seq_glob_str % res.lane_id
        else:
            glob_str = _single_seq_glob_str % res.lane_id 
        # search for files
        return find_fastq_files(res.data_path, glob_str)

    def _get_mctp_id(self, flowcell, lane):
        '''
        MCTP samples are identified flowcell/lane
        '''
        return '%s_%d' % (flowcell, lane)

    def get_mctp_libraries(self, flowcell, lane):
        '''
        generates Library objects that match the specified flowcell and lane
        '''
        table = self._get_table('pre_analysis_summary')       
        stmt = select([table],
                      and_(table.c.flowcell == flowcell,
                           table.c.lane_id == lane,
                           table.c.data_path != None))
        for res in stmt.execute():
            # search for the fastq files
            fastq_files = self._find_fastq_from_query_results(res)
            if len(fastq_files) == 0:                    
                logging.error("Flowcell: %s lane: %d data_path: %s "
                              "could not be found on any of the /archive "
                              "directories.. skipping this result..." %
                              (res.flowcell, res.lane_id, res.data_path))
                continue
            # get the fastq quality score format
            if not res.sw_version in _bowtie_quals:
                logging.error("Flowcell: %s lane: %d does not have a valid"
                              " sw_version field (%s could not be "
                              " recognized)... skipping this results..." %
                              (res.flowcell, res.lane_id, res.sw_version))
                continue
            fastq_format = _bowtie_quals[res.sw_version]
            # construct the Library object for this result
            library = Library()
            library.id = self._get_mctp_id(flowcell, lane)            
            library.fastq_files = fastq_files
            library.fastq_format = fastq_format
            library.read_length_variable = False
            library.read_length = get_seq_length(fastq_files[0])
            if len(fastq_files) == 1:
                library.read_type = 'single_read'
            elif len(fastq_files) == 2:
                library.read_type = 'paired_end'
            library.cluster_raw = res.cluster_raw
            library.cluster_pf = res.cluster_pf
            library.run_date = res.run_date
            # TODO:
            library.dna_conc = None
            library.rin = None
            library.protocol = None
            library.platform_name = None
            library.platform_version = None
            yield library

__sdb = None
def get_sampledb():
    global __sdb
    if __sdb is None:
        __sdb = SampleDB()
    return __sdb
            
def get_best_analysis_for_library(flowcell, lane):
    sdb = get_sampledb()
    libraries = list(sdb.get_mctp_libraries(flowcell, lane))
    for l in libraries:
        print l
    if len(libraries) > 1:
        logging.warning('flowcell %s lane %d has >1 matching analysis' 
                        ' choosing best one based on heuristics...' %
                        (flowcell, lane))
    return sorted(libraries, reverse=True)[0]
    #return sorted(libraries, key=operator.attrgetter('cluster_raw'), reverse=True)[0]
        
class Experiment(object):    
    def __init__(self, name, treatment, control):
        self.name = name
        self.treatment_library = treatment
        self.control_library = control

def get_chipseq_experiment(t_flowcell, t_lane,
                           c_flowcell, c_lane,
                           name=None):
    t_library = get_library_with_best_analysis(t_flowcell, t_lane)
    c_library = get_library_with_best_analysis(c_flowcell, c_lane)
    if name is None:
        name = '%s_%d_vs_%s_%d' % (t_flowcell, t_lane,
                                   c_flowcell, t_lane)
    return Experiment(name, t_library, c_library)


def run_single_read_novoalign(library, results_cwd):
    readgroup = '.'
    platformunit = '.'
    
    # skip alignments that already exist
    logging.debug("%s: preparing to run novoalign on files: %s quals: %s" % 
                  (library.id, library.fastq_files, library.fastq_format))    
    # alignment options
#    novoalign_options = ({'-o': r'"SAM @RG\\tID:%s\tPU:%s\tLB:%s"' % (readgroup, platformunit, library.id),
#                          '-r': 'None'})
    novoalign_options = ({'-o': 'SAM',
                          '-r': 'None'})

    output_file = os.path.join(results_cwd, library.id + '.sam')
    output_fhd = open(output_file, 'w')
    # create the app controller
    novoalign_app = Novoalign(options=novoalign_options, cwd=results_cwd,
                              stdout=output_fhd)
    # run
    novoalign_app.run_default(library.fastq_files, 
                              quals_format=library.fastq_format)
    output_fhd.close()

def run_alignment(libraries, results_path):
    if os.path.exists(results_path):
        logging.debug("Alignment results path %s exists, skipping..." % (results_path))
    else:
        # make directory for results
        logging.debug('Creating alignment results path %s' % (results_path))
        os.makedirs(results_path)
    for library in libraries:
        run_single_read_novoalign(library, results_path)
    
def run_chipseq_pipeline(experiments, results_path, force=False):
    libraries = set()
    for exper in experiments:
        libraries.add(exper.treatment_library)
        libraries.add(exper.control_library)
    #alignment_cwd = os.path.join(results_path, 'alignments')
    run_alignment(libraries, results_path)


vcap_pol2 = {'VCaP_PolII_all': [('20E6WAAXX', ('6',), 'eland_result'),
                                 ('20E6WAAXX', ('7',), 'eland_result')]}
vcap_input = {'VCaP_Input': [('301YWAAXX', ('4_1', '4_2',), 'export')]}


lncap_ethl_h3k27 = {'LNCaP_ethl_H3K27': [('20FALAAXX', ('3',), 'export')]}
lncap_r1881_h3k27 = {'LNCaP_R1881_H3K27': [('20FALAAXX', ('4',), 'export')]}

vcap_ethl_h3k4 = {'VCaP_ethl_H3K4': [('205HYAAXX', ('2',), 'export')]}
vcap_r1881_h3k4 = {'VCaP_R1881_H3K4': [('205HYAAXX', ('6',), 'export'),
                                       ('20A0MAAXX', ('1',), 'export')]}

vcap_ethl_h3k27 = {'VCaP_ethl_H3K27': [('205HYAAXX', ('3',), 'export')]}
vcap_r1881_h3k27 = {'VCaP_R1881_H3K27': [('205HYAAXX', ('7',), 'export')]}

wa1831_h3k27 = {'WA18-31_H3K27_all': [('20BPVAAXX', ('3',), 'export'),
                                      ('20BPVAAXX', ('7',), 'export')]}

wa1831_igg = {'WA18-31_IgG': [('20LVAAAXX', ('1',), 'export')]}

wa1831_h3k4 = {'WA18-31_H3K4': [('20LVAAAXX', ('4',), 'export'),
                                ('20LVAAAXX', ('8',), 'export')]}

vcap_h3k27 = {'VCaP_H3K27_all': [('20E6WAAXX', ('3',), 'eland_result'),
                                 ('20E6WAAXX', ('4',), 'eland_result')]}

vcap_h3k4 = {'VCaP_H3K4_all': [('20E6WAAXX', ('1'), 'eland_result'),
                               ('20E6WAAXX', ('2',), 'eland_result')]}

vcap_h3k36 = {'VCaP_H3K36_all': [('20F66AAXX', ('2',), 'export'),
                                 ('20F0CAAXX', ('6',), 'export')]}

vcap_panh3 = {'VCaP_PanH3_all': [('20E46AAXX', ('6',), 'export'),
                                 ('20E6WAAXX', ('8',), 'eland_result')]}
vcap_aceh3 = {'VCaP_AceH3': [('20F0BAAXX', ('2',), 'export')]}
vcap_h3_ctrl = {'VCaP_h3_ctrl': [('20E46AAXX', ('6',), 'export'),
                                 ('20E6WAAXX', ('8',), 'eland_result'),
                                 ('20F0BAAXX', ('2',), 'export')]}


lncap_h3k4 = {'LNCaP_H3K4_all': [('207B8AAXX', ('8',), 'export'),
                                 ('20F0CAAXX', ('3',), 'export'),
                                 ('20F5GAAXX', ('7',), 'export')]}

lncap_h3k36 = {'LNCaP_H3K36_all': [('20F66AAXX', ('1',), 'export'),
                                   ('20F0CAAXX', ('4',), 'export')]}

lncap_panh3 = {'LNCaP_PanH3': [('20FFJAAXX', ('4',), 'export')]}
lncap_aceh3 = {'LNCaP_AceH3': [('20F0BAAXX', ('1',), 'export')]}
lncap_h3_ctrl = {'LNCaP_h3_ctrl': [('20FFJAAXX', ('4',), 'export'),
                                   ('20F0BAAXX', ('1',), 'export')]}

h3k36 = {'H3K36_all': [('20F66AAXX', ('2',), 'export'),
                       ('20F0CAAXX', ('6',), 'export'),
                       ('20F66AAXX', ('1',), 'export'),
                       ('20F0CAAXX', ('4',), 'export')]}

vcap_h3k36_1 = {'VCaP_H3K36_1': [('20F66AAXX', ('2',), 'export')]}
vcap_h3k36_2 = {'VCaP_H3K36_2': [('20F0CAAXX', ('6',), 'export')]}

lncap_h3k36_1 = {'LNCaP_H3K36_1': [('20F66AAXX', ('1',), 'export')]}
lncap_h3k36_2 = {'LNCaP_H3K36_2': [('20F0CAAXX', ('4',), 'export')]}

vcap_control = {'VCaP_control': [('301YWAAXX', ('4_1', '4_2',), 'export'),
                                 ('20E46AAXX', ('6',), 'export'),
                                 ('20E6WAAXX', ('8',), 'eland_result'),
                                 ('206KMAAXX', ('7',), 'export')]}

vcap_h3k4_samples = {'VCaP_H3K4_all': [('205HYAAXX', ('6',), 'export'),
                                       ('20A0MAAXX', ('1',), 'export')]}

h3k4_all = {'H3K4_all': [('205HYAAXX', ('6',), 'export'),
                         ('20A0MAAXX', ('1',), 'export'),
                         ('207B8AAXX', ('8',), 'export'),
                         ('20F0CAAXX', ('3',), 'export'),
                         ('20F5GAAXX', ('8',), 'export')]} 

if __name__ == '__main__':    
    logging.basicConfig(level=logging.DEBUG)
    flowcell = sys.argv[1]
    lane = int(sys.argv[2])
    libraries = [get_best_analysis_for_library(flowcell, lane)]
    run_alignment(libraries, '.')
