'''
Created on Feb 26, 2010

@author: mkiyer
'''
# built-in python imports
import logging
import collections

# globally installed packages
import ruffus
from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, \
    Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_
import sqlalchemy as sqla

# project imports

# local imports

class ChIPSeqDB(object):
    '''
    class for connecting to and retrieving data from chipseq database
    '''
    # constants
    CLASS_TYPE_EXPERIMENT = "Experiment"
    CLASS_TYPE_CONTROL = "Control"
    OUTPUT_TYPE_NORMALIZED = "normalized"
    OUTPUT_TYPE_RAW = "raw"
    
    ANALYSIS_STATUS_COMPLETE = "Complete"
    ANALYSIS_STATUS_FAILED = "Failed"
    ANALYSIS_STATUS_PENDING = "Pending"
    ANALYSIS_STATUS_RUNNING = "Running"
    ANALYSIS_STATUS_HOLD = "hold"
    
    SAMPLE_POOL_STATUS_PENDING = 'pending'
    SAMPLE_POOL_STATUS_COMPLETE = 'complete'
    
    # table names
    TABLE_SAMPLE_POOL_MAIN = 'sample_pool_main'
    TABLE_ANALYSIS_MAIN = 'analysis_main'
    TABLE_ANALYSIS_PEAK_DETAIL = 'analysis_peak_detail'
    
    def __init__(self, connection_string, schema_name, echo=False, reflect=False):
        '''
        creates an interface to chipseq db
        '''        
        # connect to database
        self.engine = create_engine(connection_string, echo=echo)
        self.schema = schema_name
        self.meta = MetaData()
        self.meta.bind = self.engine
        # tables
        self.analysis_methods = None
        self.analysis_peak_detail = None
        self.analysis_signal = None
        self.analysis_class_map = None
        self.class_sample_map = None
        self.sample_pool_main = None
        self.sample_pool_library_map = None
        self.sample_pool_output = None        
        # reflect tables
        if reflect:
            self._reflect_all()

    def _reflect_table(self, table_name):
        if not hasattr(self, table_name):
            tbl = Table(table_name, self.meta, autoload=True, autoload_with=self.engine)
            setattr(self, table_name, tbl)
        else:
            tbl = getattr(self, table_name)
        return tbl


    def _reflect_all(self):
        self.meta.reflect(bind=self.engine, schema=self.schema)
        # create shortcuts to tables
        def get_table(tablename):
            return self.meta.tables['%s.%s' % (self.schema, tablename)]
        self.analysis_main = get_table(self.TABLE_ANALYSIS_MAIN)
        self.analysis_methods = get_table('analysis_methods')
        self.analysis_peak_detail = get_table(self.TABLE_ANALYSIS_PEAK_DETAIL)
        self.analysis_signal = get_table('analysis_signal')
        self.analysis_class_map = get_table('analysis_class_map')
        self.class_sample_map = get_table('class_sample_map')
        self.sample_pool_main = get_table(self.TABLE_SAMPLE_POOL_MAIN)
        self.sample_pool_library_map = get_table('sample_pool_library_map')
        self.sample_pool_output = get_table('sample_pool_output')

    def get_sample_pool_main(self):
        tbl = self.sample_pool_main
        stmt = select([tbl])
        for res in stmt.execute():
            yield res
            
    def get_sample_pool_status_flag(self, sp_id):
        tbl = self.sample_pool_main if (self.sample_pool_main != None) else self._reflect_table(self.TABLE_SAMPLE_POOL_MAIN)
        stmt = select([tbl.c.status_flag], 
                      tbl.c.sample_pool_id == sp_id)
        return stmt.execute().fetchone().status_flag
    
    def set_sample_pool_status_flag(self, sp_id, flag):
        tbl = self.sample_pool_main if (self.sample_pool_main != None) else self._reflect_table(self.TABLE_SAMPLE_POOL_MAIN)
        stmt = tbl.update().where(tbl.c.sample_pool_id == sp_id).values(status_flag=flag)
        stmt.execute()

    def get_sample_pool_libraries(self, sp_id):
        tbl = self.sample_pool_library_map
        stmt = select([tbl.c.flowcell_id, tbl.c.lane_id],
                      tbl.c.sample_pool_id == sp_id)
        for res in stmt.execute():
            yield res.flowcell_id, res.lane_id

    def get_all_sample_pool_libraries(self):
        tbl = self.sample_pool_library_map
        stmt = select([tbl.c.sample_pool_id, tbl.c.flowcell_id, tbl.c.lane_id])
        for res in stmt.execute():
            yield res.sample_pool_id, res.flowcell_id, res.lane_id

    def get_sample_pool_output(self, sp_id):
        tbl = self.sample_pool_output
        stmt = select([tbl.c.tag_name, tbl.c.file_name],
                      tbl.c.sample_pool_id == sp_id)
        output_files = {}
        for res in stmt.execute():
            output_files[res.tag_name] = res.file_name
        return output_files

    def get_analysis_info(self):
        logger = logging.getLogger(__name__)
        # reflect all tables
        self._reflect_all()
        # examine all analyses        
        tbl = self.analysis_main
        stmt = select([tbl.c.analysis_id, 
                       tbl.c.analysis_name,
                       tbl.c.analysis_status,
                       tbl.c.method_id])
        for ares in stmt.execute():
            analysis = type('AnalysisInfo', (object,), dict())
            analysis.id = ares.analysis_id
            analysis.name = ares.analysis_name
            analysis.status = ares.analysis_status
            analysis.method_id = ares.method_id

            # get method associated with this analysis
            tbl = self.analysis_methods
            stmt = select([tbl], tbl.c.method_id == analysis.method_id)
            mres = stmt.execute().fetchone()
            if mres == None:
                logger.error("%s (%s): No method associated with this analysis" % 
                             (analysis.id, analysis.name))
                continue
            analysis.method_name = mres.method_name
            analysis.method_sw_version = mres.sw_version
            analysis.method_command_options = mres.command_options

            # get "classes" associated with this analysis
            tbl = self.analysis_class_map
            stmt = select([tbl.c.class_id, tbl.c.s_flag],
                          tbl.c.analysis_id == analysis.id)
            class_type_id_map = {}
            class_type_sample_pool_map = collections.defaultdict(lambda: [])
            for class_res in stmt.execute():
                # get class id and type
                class_id, class_type = class_res.class_id, class_res.s_flag
                class_type_id_map[class_type] = class_id
                # get sample pools associated with this class
                tbl = self.class_sample_map
                stmt = select([tbl.c.sample_pool_id, tbl.c.sample_pool_name],
                              tbl.c.class_id == class_id)
                # get sample pools
                for sample_res in stmt.execute():                        
                    sample_pool_id = sample_res.sample_pool_id
                    # add sample pool id to class
                    class_type_sample_pool_map[class_type].append(sample_pool_id)                    
            # save analysis -> class -> sample pool information
            analysis.class_type_id_map = class_type_id_map
            analysis.class_type_sample_pool_map = dict(class_type_sample_pool_map)
            # done building the analysis object
            yield analysis

    def analysis_previously_completed(self, a_id):
        '''Return True if analysis completed successfully or failed'''        
        status_flag = self.get_analysis_status_flag(a_id)
        if ((status_flag == self.ANALYSIS_STATUS_COMPLETE) or 
            (status_flag == self.ANALYSIS_STATUS_FAILED)):
            return True
        return False

    def get_analysis_status_flag(self, a_id):
        tbl = self._reflect_table(self.TABLE_ANALYSIS_MAIN)
        stmt = select([tbl.c.analysis_status], 
                      tbl.c.analysis_id == a_id)
        return stmt.execute().fetchone().analysis_status
    
    def set_analysis_status_flag(self, a_id, flag):
        tbl = self._reflect_table(self.TABLE_ANALYSIS_MAIN)
        stmt = tbl.update().where(tbl.c.analysis_id == a_id).values(analysis_status=flag)
        stmt.execute()

    def insert_peaks(self, analysis_id, peaks, replace=True):
        tbl = self._reflect_table(self.TABLE_ANALYSIS_PEAK_DETAIL)
        if replace:
            # delete any existing peaks from this analysis ID            
            tbl.delete().where(tbl.c.analysis_id == analysis_id).execute()
        # insert peaks
        records = []
        chunksize = 1000
        conn = self.engine.connect()
        for peak in peaks:
            records.append({'analysis_id': analysis_id,
                            'chr_no': peak.chrom,
                            'peak_start': int(peak.start),
                            'peak_end': int(peak.end),
                            'normalized_rank': peak.normalized_rank,
                            'peak_max_pos': peak.summit,
                            't_tags': peak.t_tags,
                            'c_tags': peak.c_tags,
                            'pvalue_neglog10': peak.neglog10p,
                            'fdr_neglog10': peak.neglog10fdr,
                            'fold_change': peak.fold_change})
            if len(records) > chunksize:
                conn.execute(tbl.insert(), records)
                records = []
        if len(records) > 0:
            conn.execute(tbl.insert(), records)
        conn.close()


class PeakInfo(object):
    def __init__(self):
        self.chrom = None
        self.start = None
        self.end = None
        self.analysis_id = None
        self.normalized_rank = 0
        self.summit = None
        self.t_tags = None
        self.c_tags = None
        self.neglog10p = None
        self.neglog10fdr = None
        self.fold_change = None
