'''
Created on Nov 14, 2009

@author: mkiyer
'''
from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, \
    Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_
from veggie.sequence.io import get_seq_length
import collections
import datetime
import glob
import logging
import operator
import os
import re
import sys
import datetime
import pickle
from HTMLParser import HTMLParser

# dict that tells us whether an analysis should have a
# paired end naming scheme or a single read naming scheme
_is_paired_end_filename = {'default': False,
                           'eland': False,
                           'eland_rna': False,
                           'eland_extended': False,
                           'eland_pair': True,
                           'eland_tag': False}

# map the software versions to the appropriate bowtie
# quality scores parameter to use
_bowtie_quals = {'/Pipeline/GAPipeline-0.3.0.1/Goat/../Gerald': 'solexa-quals', 
                 '/Pipeline/SolexaPipeline-0.2.2.6/Goat/../Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.1rc1/bin': 'solexa-quals',
                 '/Pipeline/SolexaPipeline-0.2.2.6/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0/Goat/../Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0.1/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-0.3.0/Gerald': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.4.0/bin': 'solexa1.3-quals',
                 '/Pipeline/GAPipeline-1.1rc1p4/bin': 'solexa-quals',
                 '/Pipeline/GAPipeline-1.3.2/bin': 'solexa1.3-quals',
                 '/Pipeline/GAPipeline-1.5.1/bin': 'solexa1.3-quals'
                 }

# sequence file formats where '%d' will be substituted with the lane id 
# and '?' is a wildcard character to match any single character
_paired_seq_glob_str = 's_%d_?_sequence.txt'
_single_seq_glob_str = 's_%d_sequence.txt'
# some of the data paths in sample db are wrong and need to be fixed
# for now, get around this by hacking the correct paths in the dictionary
# below and remove this when the database is corrected
hacked_data_paths = {
    '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX_B/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root': '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root',
}
hacked_archives = ['/data2', '/archive/solexa']

class OncoseqMappingStats(object):

    def __init__(self):
        self.flowcell = None
        self.lane = None
        self.sample = None
        self.qc_status = None
    
    class HTMLDataParser(HTMLParser):
        def __init__(self):
            HTMLParser.__init__(self)
            self.tag_stack = []
            self.tag_data = ''
            self.row_fields = []
            self.rows = []
        def handle_starttag(self, tag, attrs):
            self.tag_stack.append(tag)
            self.tag_data = ''
            if tag == 'tr':
                self.row_fields = []
        def handle_endtag(self, tag):
            if tag == 'tr':
                if len(self.row_fields) > 0:
                    self.rows.append(self.row_fields)
            elif tag == 'td':
                self.row_fields.append(self.tag_data)
            self.tag_stack.pop()
        def handle_data(self, data):
            self.tag_data += data

    @staticmethod
    def fetch_oncoseq_mapping_stats():
        '''
        Format of the table:
        0. Flowcell
        1. Lane
        2. Sample
        3. Old Sample
        4. TissueType
        5. CellLineTissue
        6. Matched
        7. Progression
        8. Treatment
        9. Analysis Date
        10. Paired-End
        11. Max Read Length
        12. QC Status
        13. Total Reads
        14. Mapped Reads
        15. Human-mapped Reads
        16. Human-mapped/Total
        17. Splice-mapped Reads
        18. Splice-mapped/Human-mapped
        '''
        import urllib2
        #url = 'http://141.214.6.100/cgi-bin/laneInfoTable.py?plaintext=True'
        #url = 'http://141.214.6.100/cgi-bin/laneInfoTable.py'
        url = 'http://141.214.8.19/cgi-bin/laneInfoTable.py'
        f = urllib2.urlopen(url)
        htmlparser = OncoseqMappingStats.HTMLDataParser()
        htmlparser.feed(f.read())
        for row in htmlparser.rows:
            mappingstats = OncoseqMappingStats()
            mappingstats.sample = row[2]
            mappingstats.flowcell = row[0]
            mappingstats.lane = int(row[1])
            mappingstats.qc_status = row[12]
            yield mappingstats

_oncoseq_qc_status = None
def get_mctp_library_qc(mctp_id):
    global _oncoseq_qc_status
    if _oncoseq_qc_status is None:
        logging.info('Loading Oncoseq Mapping Stats...')
        _oncoseq_qc_status = {}
        for stats in OncoseqMappingStats.fetch_oncoseq_mapping_stats():
            library_id = SampleDB.get_mctp_id(stats.flowcell, stats.lane)
            if stats.qc_status == 'QC-FAIL':
                _oncoseq_qc_status[library_id] = False
            else:
                _oncoseq_qc_status[library_id] = True
        logging.info('Finished loading Oncoseq Mapping Stats...')
    if mctp_id not in _oncoseq_qc_status:
        logging.warning('%s: No Oncoseq QC status, assuming QC-PASS' % mctp_id)
        return True
    return _oncoseq_qc_status[mctp_id]

def reflect_sample_report_main_view(meta):
    '''
    "Column Name","Data Type","Nullable","Data Default","COLUMN ID","COMMENTS","INSERTABLE","UPDATABLE","DELETABLE"
    "SOLEXA_SAMPLE_ID","VARCHAR2(50)","No","","1","","NO","NO","NO"
    "SAMPLE_SOURCE_TYPE","VARCHAR2(50)","Yes","","2","","NO","NO","NO"
    "SAMPLE_SOURCE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","3","","NO","NO","NO"
    "SAMPLE_SOURCE_ID","VARCHAR2(50)","No","","4","","NO","NO","NO"
    "SAMPLE_TYPE","VARCHAR2(50)","Yes","","5","","NO","NO","NO"
    "SAMPLE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","6","","NO","NO","NO"
    "SAMPLE_DESC","VARCHAR2(255)","Yes","","7","","NO","NO","NO"
    "APP_TYPE","VARCHAR2(50)","Yes","","8","","NO","NO","NO"
    "APP_TYPE_LOOKUP","NUMBER(10,0)","Yes","","9","","NO","NO","NO"
    "SUB_DATE","VARCHAR2(19)","Yes","","10","","NO","NO","NO"
    "OWNER","VARCHAR2(50)","No","","11","","NO","NO","NO"
    "ND_CONC","NUMBER(10,2)","Yes","","12","","NO","NO","NO"
    "COMMENTS","VARCHAR2(3000)","Yes","","13","","NO","NO","NO"
    "SAMPLE_NAME","VARCHAR2(100)","No","","14","","NO","NO","NO"
    "EXP_DESIGN","VARCHAR2(50)","Yes","","15","","NO","NO","NO"
    "EXP_DESIGN_LOOKUP","NUMBER(10,0)","Yes","","16","","NO","NO","NO"
    "TISSUE_TYPE","VARCHAR2(50)","Yes","","17","","NO","NO","NO"
    "TISSUE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","18","","NO","NO","NO"
    "TECH_TYPE","VARCHAR2(50)","Yes","","19","","NO","NO","NO"
    "TECH_TYPE_LOOKUP","NUMBER(10,0)","Yes","","20","","NO","NO","NO"
    "SAMPLE_STATUS","VARCHAR2(50)","Yes","","21","","NO","NO","NO"
    "SAMPLE_STATUS_LOOKUP","NUMBER(10,0)","Yes","","22","","NO","NO","NO"
    "PRE_ANALYSIS_ID","VARCHAR2(50)","Yes","","23","","NO","NO","NO"
    "RUN_DATE","DATE","Yes","","24","","NO","NO","NO"
    "GA","VARCHAR2(50)","Yes","","25","","NO","NO","NO"
    "FLOWCELL","VARCHAR2(50)","Yes","","26","","NO","NO","NO"
    "LANE_ID","NUMBER(10,0)","Yes","","27","","NO","NO","NO"
    "ANALYSIS_DATE","DATE","Yes","","28","","NO","NO","NO"
    "SW_VERSION","VARCHAR2(500)","Yes","","29","","NO","NO","NO"
    "DATA_PATH","VARCHAR2(2000)","Yes","","30","","NO","NO","NO"
    "ANALYSIS","VARCHAR2(100)","Yes","","31","","NO","NO","NO"
    "USE_BASE","VARCHAR2(2000)","Yes","","32","","NO","NO","NO"
    "READ_LENGTH","NUMBER(10,0)","Yes","","33","","NO","NO","NO"
    "SEED_LENGTH","NUMBER(10,0)","Yes","","34","","NO","NO","NO"
    "MAX_MATCH","NUMBER(10,0)","Yes","","35","","NO","NO","NO"
    "ELAND_GENOME","VARCHAR2(2000)","Yes","","36","","NO","NO","NO"
    "GENOME_CONTAM","VARCHAR2(2000)","Yes","","37","","NO","NO","NO"
    "GENOME_SPLICE","VARCHAR2(2000)","Yes","","38","","NO","NO","NO"
    "CLUSTER_RAW","NUMBER(10,0)","Yes","","39","","NO","NO","NO"
    "CLUSTER_PF","NUMBER(10,0)","Yes","","40","","NO","NO","NO"
    "ALIGN_PF","NUMBER(10,0)","Yes","","41","","NO","NO","NO"
    "ERR_RATE_PF","NUMBER(10,0)","Yes","","42","","NO","NO","NO"
    "SAMPLE_ID","VARCHAR2(25)","Yes","","43","","NO","NO","NO"
    "PARAM_ID","VARCHAR2(25)","Yes","","44","","NO","NO","NO"
    "PARAM_ORI_NAME","VARCHAR2(100)","Yes","","45","","NO","NO","NO"
    "PARAM_ORI_VALUE","VARCHAR2(100)","Yes","","46","","NO","NO","NO"
    '''
    mytable = Table('sample_report_main', meta,
                    Column('solexa_sample_id', String(50), nullable=False),
                    Column('sample_source_type', String(50), nullable=True),
                    Column('sample_source_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_source_id', String(50), nullable=False),
                    Column('sample_type', String(50), nullable=True),
                    Column('sample_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_desc', String(255), nullable=True),
                    Column('app_type', String(50), nullable=True),
                    Column('app_type_lookup', Numeric(10,0), nullable=True),
                    Column('sub_date', String(19), nullable=True),
                    Column('owner', String(50), nullable=False),
                    Column('nd_conc', Numeric(10,2), nullable=True),
                    Column('comments', String(3000), nullable=True),
                    Column('sample_name', String(100), nullable=False),
                    Column('exp_design', String(50), nullable=True),
                    Column('exp_design_lookup', Numeric(10,0), nullable=True),
                    Column('tissue_type', String(50), nullable=True),
                    Column('tissue_type_lookup', Numeric(10,0), nullable=True),
                    Column('tech_type', String(50), nullable=True),
                    Column('tech_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_status', String(50), nullable=True),
                    Column('sample_status_lookup', Numeric(10,0), nullable=True),
                    Column('pre_analysis_id', String(50), nullable=True),
                    Column('run_date', Date, nullable=True),
                    Column('ga', String(50), nullable=True),
                    Column('flowcell', String(50), nullable=True),
                    Column('lane_id', Numeric(10,0), nullable=True),
                    Column('analysis_date', Date, nullable=True),
                    Column('sw_version', String(500)),
                    Column('data_path', String(2000)),
                    Column('analysis', String(100)),
                    Column('use_base', String(2000)),
                    Column('read_length', Numeric(10,0)),
                    Column('seed_length', Numeric(10,0)),
                    Column('max_match', Numeric(10,0)),
                    Column('eland_genome', String(2000)),
                    Column('genome_contam', String(2000)),
                    Column('genome_splice', String(2000)),
                    Column('cluster_raw', Numeric(10,0)),
                    Column('cluster_pf', Numeric(10,0)),
                    Column('align_pf', Numeric(10,0)),
                    Column('err_rate_pf', Numeric(10,0)),
                    Column('sample_id', String(25)),
                    Column('param_id', String(25)),
                    Column('param_ori_name', String(100)),
                    Column('param_ori_value', String(100)),
                    schema='solexa_dp')
    return mytable

def reflect_sample_peaks_main_view(meta):
    '''
    "Column Name"    "Data Type"    "Nullable"    "Data Default"    "COLUMN ID"    "COMMENTS"    "INSERTABLE"    "UPDATABLE"    "DELETABLE"
    "BA_ID"    "VARCHAR2(50)"    "No"    ""    "1"    ""    "NO"    "NO"    "NO"
    "SAMPLE_ID"    "VARCHAR2(1000)"    "No"    ""    "2"    ""    "NO"    "NO"    "NO"
    "PEAK_SIZE"    "NUMBER(10,0)"    "No"    ""    "3"    ""    "NO"    "NO"    "NO"
    "PEAK_CONC"    "NUMBER(10,2)"    "No"    ""    "4"    ""    "NO"    "NO"    "NO"
    "MOLARITY"    "NUMBER(10,2)"    "No"    ""    "5"    ""    "NO"    "NO"    "NO"
    "OBSERVATIONS"    "VARCHAR2(1000)"    "Yes"    ""    "6"    ""    "NO"    "NO"    "NO"
    "AREA"    "NUMBER(10,2)"    "No"    ""    "7"    ""    "NO"    "NO"    "NO"
    "AM_TIME"    "NUMBER(10,2)"    "No"    ""    "8"    ""    "NO"    "NO"    "NO"
    "PEAK_HEIGHT"    "NUMBER(10,2)"    "No"    ""    "9"    ""    "NO"    "NO"    "NO"
    "PEAK_WIDTH"    "NUMBER(10,2)"    "No"    ""    "10"    ""    "NO"    "NO"    "NO"
    "PERCENTAGE_TOTAL"    "NUMBER(10,2)"    "No"    ""    "11"    ""    "NO"    "NO"    "NO"
    "TIME_CORRECTED"    "NUMBER(10,2)"    "No"    ""    "12"    ""    "NO"    "NO"    "NO"
    "PEAK_STATUS"    "VARCHAR2(50)"    "No"    ""    "13"    ""    "NO"    "NO"    "NO"
    "BA_SAMPLE_PEAK_ID"    "VARCHAR2(50)"    "No"    ""    "14"    ""    "NO"    "NO"    "NO"
    "QC_STATUS"    "VARCHAR2(10)"    "Yes"    ""    "15"    ""    "NO"    "NO"    "NO"
    "COMMENTS"    "VARCHAR2(3000)"    "Yes"    ""    "16"    ""    "NO"    "NO"    "NO"
    "POS_COUNT"    "VARCHAR2(3)"    "Yes"    ""    "17"    ""    "NO"    "NO"    "NO"
    "APP_TYPE"    "VARCHAR2(50)"    "No"    ""    "18"    ""    "NO"    "NO"    "NO"
    '''
    mytable = Table('sample_peaks_main', meta,
                    Column('ba_id', String(50), nullable=False),
                    Column('sample_id', String(1000), nullable=False),
                    Column('peak_size', Numeric(10,0), nullable=False),
                    Column('peak_conc', Numeric(10,2), nullable=False),
                    Column('molarity', Numeric(10,2), nullable=False),
                    Column('observations', String(1000), nullable=True),
                    Column('area', Numeric(10,2), nullable=False),
                    Column('am_time', Numeric(10,2), nullable=False),
                    Column('peak_height', Numeric(10,2), nullable=False),
                    Column('peak_width', Numeric(10,2), nullable=False),
                    Column('percentage_total', Numeric(10,2), nullable=False),
                    Column('time_corrected', Numeric(10,2), nullable=False),
                    Column('peak_status', String(50), nullable=False),
                    Column('ba_sample_peak_id', String(50), nullable=False),
                    Column('qc_status', String(10), nullable=True),
                    Column('comments', String(3000), nullable=True),
                    Column('pos_count', String(3), nullable=True),
                    Column('app_type', String(50), nullable=False),
                    schema='solexa_dp')
    return mytable

    
def check_path_for_files(mypath, filenames):
    found_all = True
    for filename in filenames:
        if not os.path.exists(os.path.join(mypath, filename)):
            found_all = False
            break        
    return found_all        

def find_fastq_path(data_path, fastq_filenames):
    # correct data path if it does not start with '/' (or equiv for other OS)
    if not data_path.startswith(os.path.sep):
        data_path = os.path.join(os.path.sep, data_path)
    # some of the data paths in sample db are flat out wrong.  a dict
    # of corrections is maintained and used as a hack to get to the 
    # correct data paths in the interim while sample db is fixed
    if data_path in hacked_data_paths:
        logging.warning("Using hacked data path %s instead of %s" % (hacked_data_paths[data_path], data_path))
        data_path = hacked_data_paths[data_path]

    # search the data path for the sequence files
    if check_path_for_files(data_path, fastq_filenames):
        return data_path

    # the initial file search came up empty, so next check for a /Temp dir 
    # immediately after the original data path
    new_data_path = os.path.join(data_path, 'Temp')
    if check_path_for_files(new_data_path, fastq_filenames):
        return new_data_path

    # the next search also came up empty, so now expand the search to all
    # known archive volumes for the files
    archives = glob.glob(os.path.join(os.path.sep, "archive*"))
    # add archives that are not part of the typical naming scheme 
    archives.extend(hacked_archives)
    path_wo_archive = os.path.sep.join(data_path.split(os.path.sep)[2:])    
    for archive in archives:
        # try without /Temp
        new_data_path = os.path.join(archive, path_wo_archive)
        if check_path_for_files(new_data_path, fastq_filenames):
            logging.error("Expected data_path %s, but found at %s" %
                          (data_path, new_data_path))
            return new_data_path
        # add /Temp
        new_data_path = os.path.join(new_data_path, 'Temp')
        if check_path_for_files(new_data_path, fastq_filenames):
            logging.error("Expected data_path %s, but found at %s" %
                          (data_path, new_data_path))
            return new_data_path
    # at this point all searches failed so return None
    return None

class Sample(object):

    def __init__(self):
        self.id = None
        self.params = {}
        self.app_type = None        
        self.sample_type = None
        self.tissue_type = None
        
    def __repr__(self):
        return self.__str__()
    
    def __str__(self):
        return 'Sample(id=%s, sample_type=%s, tissue_type=%s params=%s)' % \
                (self.id, self.sample_type, self.tissue_type, self.params)

    @property
    def name(self):
        return self.params['oncoseq_sample_name']

class Library(object):
    
    read_types = set(['single_read', 'paired_end', 'paired_end_ditags'])
    
    @staticmethod
    def choose_best_library(libraries):
        '''use heuristics to choose best library'''
        return sorted(libraries, reverse=True)[0]
    
    def __init__(self):
        self.id = None    
        self.sample_id = None
        self.fastq_files = None
        self.fastq_format = None
        self.read_type = None
        self.read_length_variable = False
        self.read_length = 0        
        self.cluster_raw = 0
        self.cluster_pf = 0        
        self.dna_conc = None
        self.rin = None
        self.protocol = None
        self.platform_name = None
        self.platform_version = None
        self.run_date = None
        self.insert_size = None
        self.qc_status = None

    def __cmp__(self, other):
        '''
        currently used to sort libraries by quality
        '''
        assert self.id == other.id
        # first sort by clusters that pass filter
        if (self.cluster_pf != 0) and (other.cluster_pf != 0):
            cluster_pf = self.cluster_pf - other.cluster_pf
            if cluster_pf != 0:
                return cluster_pf
        if (self.cluster_raw != 0) and (other.cluster_raw != 0):
            cluster_raw = self.cluster_raw - other.cluster_raw
            if cluster_raw != 0:
                return cluster_raw
        if (self.read_length != 0) and (other.read_length != 0):
            read_length = self.read_length - other.read_length
            if read_length != 0:
                return read_length
        # last resort sort by run date
        if self.run_date > other.run_date:
            return 1
        elif self.run_date == other.run_date:
            return 0
        else:
            return -1

    def __str__(self):
        return ('<Library (id=%s run_date=%s fastq_files=%s fastq_format=%s read_type=%s'
                ' read_length_variable=%s read_length=%d cluster_raw=%d'
                ' cluster_pf=%d)>' % 
                (self.id, self.run_date, self.fastq_files, self.fastq_format, 
                 self.read_type, self.read_length_variable, self.read_length,
                 self.cluster_raw, self.cluster_pf))
    
    def is_paired_end(self):
        return self.read_type == 'paired_end'

class ChIPSeqLibrary(Library):
    def __init__(self):
        super(ChIPSeqLibrary, self).__init__()
        self.chip_ab_id = None
        
class ChIPAntibody(object):
    def __init__(self):
        self.id = None
        self.vendor = None
        self.product_no = None
        self.monoclonal = None


class SampleDBInstance(object):
    def __init__(self,
                 samples,
                 libraries,
                 sample_to_library,
                 library_to_sample,
                 sample_groups,
                 param_schema):
        self.creation_date = datetime.datetime.now()
        self.samples = dict(samples)
        self.libraries = dict(libraries)
        self.sample_to_library = dict(sample_to_library)
        self.library_to_sample = dict(library_to_sample)
        self.sample_groups = dict(sample_groups)
        self.param_schema = dict(param_schema)

    @staticmethod
    def load(filename):
        return pickle.load(open(filename, 'rb'))

    def save(self, filename):
        # save using pickle for now
        pickle.dump(self, open(filename, 'wb'))

    def contains_sample_id(self, sample_id):
        return sample_id in self.samples
    
    def contains_library_id(self, library_id):
        return library_id in self.libraries

    def contains_sample_name(self, sample_name):
        return sample_name in self.sample_groups
    
    def itersamplenames(self):
        for sample_name in self.sample_groups.keys():
            yield sample_name

    def get_samples_by_name(self, sample_name):
        assert sample_name in self.sample_groups
        return [self.samples[s] for s in self.sample_groups[sample_name]]

    def get_libraries_by_sample_name(self, sample_name, best=True):
        libraries = []
        for sample_id in self.sample_groups[sample_name]:
            library_groups = self.get_sample_libraries(sample_id)
            for library_analyses in library_groups:
                if best:
                    libraries.append(Library.choose_best_library(library_analyses))
                else:
                    libraries.extend(library_analyses)
        return libraries
            
    def get_sample_libraries(self, sample_id):
        return [self.libraries[x] for x in self.sample_to_library[sample_id]]

    def get_libraries(self, library_id):
        return self.libraries[library_id]

    def get_library_samples(self, library_id):
        return [self.samples[x] for x in self.library_to_sample[library_id]]

    def _validate_cond(self, cond):
        for k, v in cond.iteritems():
            if k not in self.param_schema:
                return False
            else:
                if v not in self.param_schema[k]:
                    return False
        return True

    def _find_samples_matching_cond(self, cond):
        if cond is None:
            return []
        if not self._validate_cond(cond):
            return []
        matching_samples = []
        for sample_name, sample_ids in self.sample_groups.iteritems():
            for sample_id in sample_ids:
                sample = self.samples[sample_id]
                match = True
                for k, v in cond.iteritems():
                    if k not in sample.params:
                        # check sample attributes instead of params
                        if not hasattr(sample, k):
                            #logging.warning("sample %s has no parameter or attribute %s" % (sample, k))
                            match = False
                            break
                        if getattr(sample, k) != v:
                            match = False
                            break
                    elif sample.params[k] != v:
                        match = False
                        break
            if match == True:
                matching_samples.append(sample_name)
        return matching_samples

    def get_sample_names(self, samples=None, conds=None):
        matching_samples = set([])
        if samples != None:
            matching_samples.update([s for s in samples if self.contains_sample_name(s)])
        if conds != None:         
            for cond in conds:
                if self._validate_cond(cond):
                    cond_samples = self._find_samples_matching_cond(cond)
                    matching_samples.update(cond_samples)
                else:
                    logging.error("invalid cond: %s" % (cond))
        return sorted(matching_samples)

    def get_sample_properties(self, sample_names):
        propdict = collections.defaultdict(lambda: {})
        # write all sample properties in param_schema
        for prop in sorted(self.param_schema.keys()):
            prop_values = []
            for sample_name in sample_names:
                matched_samples = self.get_samples_by_name(sample_name)
                sample_prop_values = set()
                for s in matched_samples:
                    if prop in s.params:
                        prop_val = s.params[prop]
                        if prop_val.lower() != 'none':
                            sample_prop_values.add(prop_val)
                if len(sample_prop_values) == 0:
                    prop_values.append(None)
                else:
                    if len(sample_prop_values) > 1:
                        logging.warning('Sample name %s has multiple values for property %s: %s' % (sample_name, prop, str(sample_prop_values)))
                    prop_values.append(','.join(sample_prop_values))            
            # if any of the samples have a valid value for this property then 
            # include it in the returned dictionary
            if set(prop_values) == set([None]):
                logging.debug('Skipping property %s' % prop)
            else:
                for sample_name, prop_value in zip(sample_names, prop_values):
                    propdict[sample_name][prop] = prop_value
        return propdict
        
    def write_sample_properties(self, sample_names, outfhd, left_cols=0):        
        propdict = self.get_sample_properties(sample_names)        
        proplist = set()
        for s in sample_names:
            proplist.update(propdict[s].keys())
        proplist = sorted(proplist)
        for prop in proplist:
            prop_values = [propdict[s][prop] for s in sample_names]
            outfhd.write('\t' * left_cols)
            outfhd.write('%s\t%s\n' % (prop, '\t'.join(map(str,prop_values))))


class SampleDB(object):
    '''
    class for connecting to and retrieving data from sampleDB tables
    '''
    sr_sequence_fmt = "s_%d_sequence.txt"
    pe_sequence_fmt = "s_%d_%d_sequence.txt"

    @staticmethod
    def get_mctp_id(flowcell, lane):
        '''MCTP samples are identified flowcell/lane'''
        return 'mctp_%s_%d' % (flowcell, lane)

    @staticmethod
    def get_mctp_flowcell_lane(mctp_id):
        return mctp_id.split('_')[1:]

    def __init__(self, echo=False):
        '''
        creates an interface to sampleDB
        '''
        user = 'solexa_dp_ro'
        password = 'readonly'
        host = 'pathbio-db1'
        port = 1521
        service_name = 'O9DB1'
        # oracle
        oracle_db = create_engine('oracle://' + user + ':' + password + '@' + \
                                  host + ':' + str(port) + '/' + service_name,
                                  echo=echo)        
        self.schema='solexa_dp'
        self.engine = oracle_db
        self.meta = MetaData()
        self.meta.bind = self.engine
        # only reflect if we start to use other tables
        self.meta.reflect(bind=oracle_db, schema=self.schema)
        # reflect tables
        self.sample_table = reflect_sample_report_main_view(self.meta)        
        self.bioanalyzer_table = reflect_sample_peaks_main_view(self.meta)

    def _get_table(self, tablename):
        return self.meta.tables['%s.%s' % (self.schema, tablename)]

    def _transform_param_name(self, k):
        k = k.lower()
        k = '_'.join(k.split())
        return k

    def load(self):
        '''
        Loads ALL relevant information from SampleDB
        - Libraries
        - Samples and their parameters
        '''
        # create the data structures used to store the sampledb snapshot
        samples = collections.defaultdict(lambda: Sample())
        libraries = collections.defaultdict(lambda: [])
        library_to_sample = collections.defaultdict(lambda: set())
        sample_to_library = collections.defaultdict(lambda: set())
        all_fastq_files = set()
        sample_groups = collections.defaultdict(lambda: set())
        param_schema = collections.defaultdict(lambda: set())
        # get the insert size lookup table for all samples
        insert_sizes = self._init_sample_insert_sizes()        
        # query the sample report main table for all records where
        # a legimitate sequencing run has taken place
        table = self.sample_table
        stmt = select([table],
                      and_(table.c.flowcell != None,
                           table.c.data_path != None,
                           table.c.tissue_type != "PhiX Control"))
        for res in stmt.execute():
            # make library
            library_id = self.get_mctp_id(res.flowcell, res.lane_id)                
            library = self._make_library(res)
            # if library is None could mean no fastq files were found
            if library is None:
                logging.warning("%s: No fastq files found" % library_id)
                continue
            # add additional library attributes
            # TODO: fix this so it is not added half-hazardly
            library.insert_size = insert_sizes[res.sample_id]    
            # check the fastq files, which define unique library/analysis 
            # instances
            if tuple(library.fastq_files) not in all_fastq_files:
                # add the library
                all_fastq_files.add(tuple(library.fastq_files))
                libraries[library.id].append(library)
            # get new sample if one with this id doesn't already exist
            s = samples[res.sample_id]
            # add sample attributes
            s.id = res.sample_id
            s.app_type = res.app_type
            s.sample_type = res.sample_type
            s.tissue_type = res.tissue_type
            if res.param_ori_name != None:
                param_name = self._transform_param_name(res.param_ori_name)
                if param_name in s.params and res.param_ori_value != s.params[param_name]:
                    logging.error('sample %s: parameter inconsistent param %s (%s != %s)' %
                                  (s.id, param_name, res.param_ori_value, s.params[param_name]))
                s.params[param_name] = res.param_ori_value
                # keep track of all available values for parameters
                param_schema[param_name].add(res.param_ori_value)
                # pool transcriptome samples by the oncoseq sample name field
                if res.param_ori_name == "Oncoseq Sample Name":
                    sample_groups[res.param_ori_value].add(s.id)
            # keep track of all available values for parameters
            param_schema['app_type'].add(s.app_type)
            param_schema['tissue_type'].add(s.tissue_type)
            param_schema['sample_type'].add(s.sample_type)
            # mapping libraries to samples and vice versa (many-to-many)
            library_to_sample[library_id].add(res.sample_id)
            sample_to_library[res.sample_id].add(library_id)
        # return database snapshot
        return SampleDBInstance(samples,
                                libraries,
                                sample_to_library,
                                library_to_sample,
                                sample_groups,
                                param_schema)

    def search_libraries(self, flowcell, lane, best=True):
        '''
        returns Library objects that match the specified flowcell and lane
        '''
        table = self._get_table('pre_analysis_summary')       
        stmt = select([table],
                      and_(table.c.flowcell == flowcell,
                           table.c.lane_id == lane,
                           table.c.data_path != None))
        matches = []
        for res in stmt.execute():
            library = self._make_library(res)
            if library is not None:
                matches.append(library)
        if best == True:
            return [Library.choose_best_library(matches)]
        else:
            return matches

    def _init_sample_insert_sizes(self):
        table = self.bioanalyzer_table
        stmt = select([table])        
        percents = collections.defaultdict(lambda: 0) 
        insert_sizes = collections.defaultdict(lambda: 0)        
        for res in stmt.execute():            
            if res.percentage_total >= percents[res.sample_id]:
                if res.peak_size > insert_sizes[res.sample_id]:
                    insert_sizes[res.sample_id] = res.peak_size
                percents[res.sample_id] = res.percentage_total
        del percents
        # TODO: this is a poor man's way of determining the insert size
        # the ideal way would be to statistically profile based on 
        # paired-end mappings
        for sample_id in insert_sizes.iterkeys():
            # approximate insert size as fragment length - 2*50
            insert_sizes[sample_id] = max(0, insert_sizes[sample_id] - 100)                    
        return insert_sizes

    def _find_fastq_from_query_results(self, res):
        # determine the file pattern of the analysis based on
        # single-read vs. paired-end
        if res.analysis not in _is_paired_end_filename:
            logging.error('analysis field not recognized in sample %s: %s' % 
                          (res.solexa_sample_id, res.analysis))
            raise KeyError(res.analysis)
        elif _is_paired_end_filename[res.analysis]:
            fastq_basenames = [self.pe_sequence_fmt % (res.lane_id, 1),
                               self.pe_sequence_fmt % (res.lane_id, 2)]        
        else:
            fastq_basenames = [self.sr_sequence_fmt % (res.lane_id)]
        # search for correct path to fastq files
        fastq_path = find_fastq_path(res.data_path, fastq_basenames)
        # return absolute paths to fastq files
        if fastq_path is None:
            logging.error("fastq_path not found")
            return []
        return [os.path.join(fastq_path, f) for f in fastq_basenames]

    def _make_library(self, res):
        # search for the fastq files
        fastq_files = self._find_fastq_from_query_results(res)
        if len(fastq_files) == 0:                    
            logging.error("Flowcell: %s lane: %d data_path: %s "
                          "could not be found on any of the /archive "
                          "directories.. skipping this result..." %
                          (res.flowcell, res.lane_id, res.data_path))
            return None
        # get the fastq quality score format
        if not res.sw_version in _bowtie_quals:
            logging.error("Flowcell: %s lane: %d does not have a valid"
                          " sw_version field (%s could not be "
                          " recognized)... skipping this result..." %
                          (res.flowcell, res.lane_id, res.sw_version))
            return None
        fastq_format = _bowtie_quals[res.sw_version]
        # construct the Library object for this result
        library = Library()
        library.id = self.get_mctp_id(res.flowcell, res.lane_id)            
        library.fastq_files = fastq_files
        library.fastq_format = fastq_format
        library.read_length_variable = False
        library.read_length = get_seq_length(fastq_files[0])
        if len(fastq_files) == 1:
            library.read_type = 'single_read'
        elif len(fastq_files) == 2:
            library.read_type = 'paired_end'
        library.cluster_raw = int(res.cluster_raw)
        library.cluster_pf = int(res.cluster_pf)
        library.run_date = res.run_date
        # TODO: get qc status from oncoseq for now, but switch to new quality
        # method in the future
        library.qc_status = get_mctp_library_qc(library.id)
        # TODO: get this information
        library.dna_conc = None
        library.rin = None
        library.protocol = None
        library.platform_name = None
        library.platform_version = None
        return library


__sdb = None
def get_sampledb():
    global __sdb
    if __sdb is None:
        __sdb = SampleDB()
    return __sdb

