'''
Created on Aug 21, 2009

@author: mkiyer
'''

from sqlalchemy import create_engine, MetaData, Table, Column, \
    String, Integer, Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_

import operator
import glob
import datetime
import os
import sys
import logging
import collections
import re

# sequence file formats where '%d' will be substituted with the lane id 
# and '?' is a wildcard character to match any single character
paired_seq_glob_str = 's_%d_?_sequence.txt'
single_seq_glob_str = 's_%d_sequence.txt'

# regexp for finding mate pair # in paired end files
pair_re = re.compile(r's_._(.)_sequence.txt')

# map the software versions to the appropriate bowtie
# quality scores parameter to use
bowtie_quals = {'/Pipeline/GAPipeline-0.3.0.1/Goat/../Gerald': 'solexa-quals', 
                '/Pipeline/SolexaPipeline-0.2.2.6/Goat/../Gerald': 'solexa-quals',
                '/Pipeline/GAPipeline-1.1rc1/bin': 'solexa-quals',
                '/Pipeline/SolexaPipeline-0.2.2.6/Gerald': 'solexa-quals',
                '/Pipeline/GAPipeline-0.3.0/Goat/../Gerald': 'solexa-quals',
                '/Pipeline/GAPipeline-0.3.0.1/Gerald': 'solexa-quals',
                '/Pipeline/GAPipeline-0.3.0/Gerald': 'solexa-quals',
                '/Pipeline/GAPipeline-1.4.0/bin': 'solexa1.3-quals',
                '/Pipeline/GAPipeline-1.1rc1p4/bin': 'solexa-quals',
                '/Pipeline/GAPipeline-1.3.2/bin': 'solexa1.3-quals'
                }

# dict that tells us whether an analysis should have a
# paired end naming scheme or a single read naming scheme
is_paired_end_filename = {'default': False,
                          'eland': False,
                          'eland_rna': False,
                          'eland_extended': False,
                          'eland_pair': True,
                          'eland_tag': False}


def reflect_sample_report_main_view(meta):
    '''
    "Column Name","Data Type","Nullable","Data Default","COLUMN ID","COMMENTS","INSERTABLE","UPDATABLE","DELETABLE"
    "SOLEXA_SAMPLE_ID","VARCHAR2(50)","No","","1","","NO","NO","NO"
    "SAMPLE_SOURCE_TYPE","VARCHAR2(50)","Yes","","2","","NO","NO","NO"
    "SAMPLE_SOURCE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","3","","NO","NO","NO"
    "SAMPLE_SOURCE_ID","VARCHAR2(50)","No","","4","","NO","NO","NO"
    "SAMPLE_TYPE","VARCHAR2(50)","Yes","","5","","NO","NO","NO"
    "SAMPLE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","6","","NO","NO","NO"
    "SAMPLE_DESC","VARCHAR2(255)","Yes","","7","","NO","NO","NO"
    "APP_TYPE","VARCHAR2(50)","Yes","","8","","NO","NO","NO"
    "APP_TYPE_LOOKUP","NUMBER(10,0)","Yes","","9","","NO","NO","NO"
    "SUB_DATE","VARCHAR2(19)","Yes","","10","","NO","NO","NO"
    "OWNER","VARCHAR2(50)","No","","11","","NO","NO","NO"
    "ND_CONC","NUMBER(10,2)","Yes","","12","","NO","NO","NO"
    "COMMENTS","VARCHAR2(3000)","Yes","","13","","NO","NO","NO"
    "SAMPLE_NAME","VARCHAR2(100)","No","","14","","NO","NO","NO"
    "EXP_DESIGN","VARCHAR2(50)","Yes","","15","","NO","NO","NO"
    "EXP_DESIGN_LOOKUP","NUMBER(10,0)","Yes","","16","","NO","NO","NO"
    "TISSUE_TYPE","VARCHAR2(50)","Yes","","17","","NO","NO","NO"
    "TISSUE_TYPE_LOOKUP","NUMBER(10,0)","Yes","","18","","NO","NO","NO"
    "TECH_TYPE","VARCHAR2(50)","Yes","","19","","NO","NO","NO"
    "TECH_TYPE_LOOKUP","NUMBER(10,0)","Yes","","20","","NO","NO","NO"
    "SAMPLE_STATUS","VARCHAR2(50)","Yes","","21","","NO","NO","NO"
    "SAMPLE_STATUS_LOOKUP","NUMBER(10,0)","Yes","","22","","NO","NO","NO"
    "PRE_ANALYSIS_ID","VARCHAR2(50)","Yes","","23","","NO","NO","NO"
    "RUN_DATE","DATE","Yes","","24","","NO","NO","NO"
    "GA","VARCHAR2(50)","Yes","","25","","NO","NO","NO"
    "FLOWCELL","VARCHAR2(50)","Yes","","26","","NO","NO","NO"
    "LANE_ID","NUMBER(10,0)","Yes","","27","","NO","NO","NO"
    "ANALYSIS_DATE","DATE","Yes","","28","","NO","NO","NO"
    "SW_VERSION","VARCHAR2(500)","Yes","","29","","NO","NO","NO"
    "DATA_PATH","VARCHAR2(2000)","Yes","","30","","NO","NO","NO"
    "ANALYSIS","VARCHAR2(100)","Yes","","31","","NO","NO","NO"
    "USE_BASE","VARCHAR2(2000)","Yes","","32","","NO","NO","NO"
    "READ_LENGTH","NUMBER(10,0)","Yes","","33","","NO","NO","NO"
    "SEED_LENGTH","NUMBER(10,0)","Yes","","34","","NO","NO","NO"
    "MAX_MATCH","NUMBER(10,0)","Yes","","35","","NO","NO","NO"
    "ELAND_GENOME","VARCHAR2(2000)","Yes","","36","","NO","NO","NO"
    "GENOME_CONTAM","VARCHAR2(2000)","Yes","","37","","NO","NO","NO"
    "GENOME_SPLICE","VARCHAR2(2000)","Yes","","38","","NO","NO","NO"
    "CLUSTER_RAW","NUMBER(10,0)","Yes","","39","","NO","NO","NO"
    "CLUSTER_PF","NUMBER(10,0)","Yes","","40","","NO","NO","NO"
    "ALIGN_PF","NUMBER(10,0)","Yes","","41","","NO","NO","NO"
    "ERR_RATE_PF","NUMBER(10,0)","Yes","","42","","NO","NO","NO"
    "SAMPLE_ID","VARCHAR2(25)","Yes","","43","","NO","NO","NO"
    "PARAM_ID","VARCHAR2(25)","Yes","","44","","NO","NO","NO"
    "PARAM_ORI_NAME","VARCHAR2(100)","Yes","","45","","NO","NO","NO"
    "PARAM_ORI_VALUE","VARCHAR2(100)","Yes","","46","","NO","NO","NO"
    '''
    mytable = Table('sample_report_main', meta,
                    Column('solexa_sample_id', String(50), nullable=False),
                    Column('sample_source_type', String(50), nullable=True),
                    Column('sample_source_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_source_id', String(50), nullable=False),
                    Column('sample_type', String(50), nullable=True),
                    Column('sample_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_desc', String(255), nullable=True),
                    Column('app_type', String(50), nullable=True),
                    Column('app_type_lookup', Numeric(10,0), nullable=True),
                    Column('sub_date', String(19), nullable=True),
                    Column('owner', String(50), nullable=False),
                    Column('nd_conc', Numeric(10,2), nullable=True),
                    Column('comments', String(3000), nullable=True),
                    Column('sample_name', String(100), nullable=False),
                    Column('exp_design', String(50), nullable=True),
                    Column('exp_design_lookup', Numeric(10,0), nullable=True),
                    Column('tissue_type', String(50), nullable=True),
                    Column('tissue_type_lookup', Numeric(10,0), nullable=True),
                    Column('tech_type', String(50), nullable=True),
                    Column('tech_type_lookup', Numeric(10,0), nullable=True),
                    Column('sample_status', String(50), nullable=True),
                    Column('sample_status_lookup', Numeric(10,0), nullable=True),
                    Column('pre_analysis_id', String(50), nullable=True),
                    Column('run_date', Date, nullable=True),
                    Column('ga', String(50), nullable=True),
                    Column('flowcell', String(50), nullable=True),
                    Column('lane_id', Numeric(10,0), nullable=True),
                    Column('analysis_date', Date, nullable=True),
                    Column('sw_version', String(500)),
                    Column('data_path', String(2000)),
                    Column('analysis', String(100)),
                    Column('use_base', String(2000)),
                    Column('read_length', Numeric(10,0)),
                    Column('seed_length', Numeric(10,0)),
                    Column('max_match', Numeric(10,0)),
                    Column('eland_genome', String(2000)),
                    Column('genome_contam', String(2000)),
                    Column('genome_splice', String(2000)),
                    Column('cluster_raw', Numeric(10,0)),
                    Column('cluster_pf', Numeric(10,0)),
                    Column('align_pf', Numeric(10,0)),
                    Column('err_rate_pf', Numeric(10,0)),
                    Column('sample_id', String(25)),
                    Column('param_id', String(25)),
                    Column('param_ori_name', String(100)),
                    Column('param_ori_value', String(100)),
                    schema='solexa_dp')
    return mytable


def reflect_sample_peaks_main_view(meta):
    '''
    "Column Name"    "Data Type"    "Nullable"    "Data Default"    "COLUMN ID"    "COMMENTS"    "INSERTABLE"    "UPDATABLE"    "DELETABLE"
    "BA_ID"    "VARCHAR2(50)"    "No"    ""    "1"    ""    "NO"    "NO"    "NO"
    "SAMPLE_ID"    "VARCHAR2(1000)"    "No"    ""    "2"    ""    "NO"    "NO"    "NO"
    "PEAK_SIZE"    "NUMBER(10,0)"    "No"    ""    "3"    ""    "NO"    "NO"    "NO"
    "PEAK_CONC"    "NUMBER(10,2)"    "No"    ""    "4"    ""    "NO"    "NO"    "NO"
    "MOLARITY"    "NUMBER(10,2)"    "No"    ""    "5"    ""    "NO"    "NO"    "NO"
    "OBSERVATIONS"    "VARCHAR2(1000)"    "Yes"    ""    "6"    ""    "NO"    "NO"    "NO"
    "AREA"    "NUMBER(10,2)"    "No"    ""    "7"    ""    "NO"    "NO"    "NO"
    "AM_TIME"    "NUMBER(10,2)"    "No"    ""    "8"    ""    "NO"    "NO"    "NO"
    "PEAK_HEIGHT"    "NUMBER(10,2)"    "No"    ""    "9"    ""    "NO"    "NO"    "NO"
    "PEAK_WIDTH"    "NUMBER(10,2)"    "No"    ""    "10"    ""    "NO"    "NO"    "NO"
    "PERCENTAGE_TOTAL"    "NUMBER(10,2)"    "No"    ""    "11"    ""    "NO"    "NO"    "NO"
    "TIME_CORRECTED"    "NUMBER(10,2)"    "No"    ""    "12"    ""    "NO"    "NO"    "NO"
    "PEAK_STATUS"    "VARCHAR2(50)"    "No"    ""    "13"    ""    "NO"    "NO"    "NO"
    "BA_SAMPLE_PEAK_ID"    "VARCHAR2(50)"    "No"    ""    "14"    ""    "NO"    "NO"    "NO"
    "QC_STATUS"    "VARCHAR2(10)"    "Yes"    ""    "15"    ""    "NO"    "NO"    "NO"
    "COMMENTS"    "VARCHAR2(3000)"    "Yes"    ""    "16"    ""    "NO"    "NO"    "NO"
    "POS_COUNT"    "VARCHAR2(3)"    "Yes"    ""    "17"    ""    "NO"    "NO"    "NO"
    "APP_TYPE"    "VARCHAR2(50)"    "No"    ""    "18"    ""    "NO"    "NO"    "NO"
    '''
    mytable = Table('sample_peaks_main', meta,
                    Column('ba_id', String(50), nullable=False),
                    Column('sample_id', String(1000), nullable=False),
                    Column('peak_size', Numeric(10,0), nullable=False),
                    Column('peak_conc', Numeric(10,2), nullable=False),
                    Column('molarity', Numeric(10,2), nullable=False),
                    Column('observations', String(1000), nullable=True),
                    Column('area', Numeric(10,2), nullable=False),
                    Column('am_time', Numeric(10,2), nullable=False),
                    Column('peak_height', Numeric(10,2), nullable=False),
                    Column('peak_width', Numeric(10,2), nullable=False),
                    Column('percentage_total', Numeric(10,2), nullable=False),
                    Column('time_corrected', Numeric(10,2), nullable=False),
                    Column('peak_status', String(50), nullable=False),
                    Column('ba_sample_peak_id', String(50), nullable=False),
                    Column('qc_status', String(10), nullable=True),
                    Column('comments', String(3000), nullable=True),
                    Column('pos_count', String(3), nullable=True),
                    Column('app_type', String(50), nullable=False),
                    schema='solexa_dp')
    return mytable



# some of the data paths in sample db are wrong and need to be fixed
# for now, get around this by hacking the correct paths in the dictionary
# below and remove this when the database is corrected
hacked_data_paths = {
    '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX_B/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root': '/archive04/090115_PATHBIO-SOLEXA2_30JD2AAXX/Data/IPAR_1.01/Bustard1.9.6_24-01-2009_root/GERALD_24-01-2009_root',
}
hacked_archives = ['/data2']

def find_fastq_files(data_path, file_pattern):
    # correct data path if it does not start with '/' (or equiv for other OS)
    if not data_path.startswith(os.path.sep):
        data_path = os.path.join(os.path.sep, data_path)

    # some of the data paths in sample db are flat out wrong.  a dict
    # of corrections is maintained and used as a hack to get to the 
    # correct data paths in the interim while sample db is fixed
    if data_path in hacked_data_paths:
        logging.warning("Using hacked data path %s instead of %s" % (hacked_data_paths[data_path], data_path))
        data_path = hacked_data_paths[data_path]

    # search the data path for the sequence files
    fastq_files = glob.glob(os.path.join(data_path, file_pattern))

    # if the file search came up empty
    if len(fastq_files) == 0:
        # some sequences are located in a /Temp dir
        new_data_path = os.path.join(data_path, 'Temp')
        fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))

    # if the file search still came up empty
    if len(fastq_files) == 0:
        # try searching ALL the archive directories for the path
        archives = glob.glob(os.path.join(os.path.sep, "archive*"))
        # add archives that are not part of the typical name 
        archives.extend(hacked_archives)
        path_wo_archive = os.path.sep.join(data_path.split(os.path.sep)[2:])
        
        for archive in archives:
            # try without /Temp
            new_data_path = os.path.join(archive, path_wo_archive)
            fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))
            if len(fastq_files) != 0:
                logging.error("Expected data_path %s, but found at %s" %
                              (data_path, new_data_path))
                break
            # add /Temp
            new_data_path = os.path.join(new_data_path, 'Temp')
            fastq_files = glob.glob(os.path.join(new_data_path, file_pattern))
            if len(fastq_files) != 0:
                logging.error("Expected data_path %s, but found at %s" %
                              (data_path, new_data_path))
                break
    return fastq_files

def make_file_prefix(flowcell, lane, analysis_date, data_path, pair=0):
    path_ext = os.path.splitext(os.path.basename(data_path))[1]
    if len(path_ext) == 0:
        path_ext = '1' 
    else:
        path_ext = path_ext[1:]
    
    if pair > 0:
        fileprefix = '%s_%d_%d_%s_%s' % (flowcell, lane, pair, analysis_date, path_ext)
    else:
        fileprefix = '%s_%d_%s_%s' % (flowcell, lane, analysis_date, path_ext)
    return fileprefix

class Sample:

    def __init__(self):
        self.name = None
        self.params = {}
        self.lanes = set([])
        self.app_type = None        
        self.sample_type = None
        self.tissue_type = None
        self.sample_ids = []

    def __repr__(self):
        return self.__str__()
    
    def __str__(self):
        return 'Sample(name=%s, sample_type=%s, tissue_type=%s lanes=%s params=%s)' % \
                (self.name, self.sample_type, self.tissue_type, 
                 self.lanes, self.params)

__sdb = None
def get_sampledb():
    global __sdb
    if __sdb is None:
        __sdb = SampleDB()
    return __sdb

class SampleDB(object):
    '''
    class for connecting to and retrieving data from sampleDB tables
    '''
    def __init__(self, echo=False):
        '''
        creates an interface to sampleDB
        '''
        user = 'solexa_dp_ro'
        password = 'readonly'
        host = 'pathbio-db1'
        port = 1521
        service_name = 'O9DB1'
        # oracle
        oracle_db = create_engine('oracle://' + user + ':' + password + '@' + \
                                  host + ':' + str(port) + '/' + service_name,
                                  echo=echo)
        meta = MetaData()
        meta.bind = oracle_db
        self.engine = oracle_db
        self.meta = meta
        # reflect tables
        self.sample_table = reflect_sample_report_main_view(meta)        
        self.bioanalyzer_table = reflect_sample_peaks_main_view(meta)
        # load sample parameters
        self._params, self._param_schema = self.get_sample_parameters()
    
    @property
    def params(self):
        return self._params
    
    @property
    def param_schema(self):
        return self._param_schema
    
    def _find_fastq_from_query_results(self, res):
        # determine the file pattern of the analysis based on
        # single-read vs. paired-end
        if res.analysis not in is_paired_end_filename:
            logging.error('analysis field not recognized in sample %s: %s' % 
                          (res.solexa_sample_id, res.analysis))
            raise KeyError(res.analysis)
        elif is_paired_end_filename[res.analysis]:
            glob_str = paired_seq_glob_str % res.lane_id
        else:
            glob_str = single_seq_glob_str % res.lane_id 
        # search for files
        return find_fastq_files(res.data_path, glob_str)

    def find_fastq(self, flowcell, lane, best=True):
        '''
        returns the best scoring set of fastq files based on # of clusters
        and alignment # in sampledb
        '''
        table = self.sample_table
        stmt = select([table],
                      and_(table.c.flowcell.like(flowcell),
                           table.c.lane_id.like(lane),
                           table.c.data_path != None))
        stmt = stmt.distinct()        
        matches = []
        for res in stmt.execute():
            # search for the fastq files
            fastq_files = self._find_fastq_from_query_results(res)
            if len(fastq_files) == 0:                    
                logging.error("Flowcell: %s lane: %d data_path: %s "
                              "could not be found on any of the /archive "
                              "directories.. skipping this sample..." %
                              (res.flowcell, res.lane_id, res.data_path))
                continue
            # compute score - for now multiple cluster_pf by alignment %
            score = int(res.cluster_pf) * float(res.align_pf)
            # store match
            matches.append((fastq_files, score))
        if best is False:
            return matches
        # sort to determine the best scoring set of sequences
        matches = sorted(matches, key=operator.itemgetter(1), reverse=True)
        # return the best match for now
        return matches[0][0]
    
    def get_analyses(self, where_clauses):
        table = self.sample_table
        stmt = select([table],
                      and_(table.c.flowcell != None,
                           table.c.data_path != None,
                           table.c.tissue_type != "PhiX Control"))
        
        for col, value in where_clauses.iteritems():
            if col in table.c:
                stmt = stmt.where(table.c[col] == value)
            else:
                logging.error('table has no column %s' % col)
                raise ValueError
        stmt = stmt.distinct()
        analyses = set([])
        for res in stmt.execute():
            analyses.add((str(res.flowcell), int(res.lane_id)))
        return sorted(analyses)

    def get_analyses_w_insert_size(self, where_clauses):
        table = self.sample_table
        stmt = select([table],
                      and_(table.c.flowcell != None,
                           table.c.data_path != None,
                           table.c.tissue_type != "PhiX Control"))
        
        for col, value in where_clauses.iteritems():
            if col in table.c:
                stmt = stmt.where(table.c[col] == value)
            else:
                logging.error('table has no column %s' % col)
                raise ValueError
        stmt = stmt.distinct()
        analyses = set([])
        for res in stmt.execute():
            analyses.add((str(res.flowcell), int(res.lane_id), 
                          int(self.get_estimated_fragment_length(res.solexa_sample_id))))
        return sorted(analyses)

    def get_samples(self):
        # get the insert size lookup table first
        insert_sizes = self.get_sample_insert_sizes()
        # make a sample dictionary
        raw_samples = collections.defaultdict(lambda: Sample())
        # organize samples by sample id field     
        table = self.sample_table
        stmt = select([table],
                      and_(table.c.flowcell != None,
                           table.c.data_path != None,
                           table.c.tissue_type != "PhiX Control"))
        for res in stmt.execute():
            s = raw_samples[res.sample_id]
            s.sample_id = res.sample_id
            s.app_type = res.app_type
            s.sample_type = res.sample_type
            s.tissue_type = res.tissue_type
            s.lanes.add((res.flowcell.strip(), res.lane_id))
            s.sample_ids.append(res.sample_id)
            if res.param_ori_name != None:
                s.params[res.param_ori_name] = res.param_ori_value
            s.insert_size = insert_sizes[res.sample_id]
        return raw_samples
    
    def get_sample_parameters(self):
        raw_samples = self.get_samples()
        # organize samples according to oncoseq sample name
        # TODO: this is dirty.. objects should not keep dictionaries 
        # of their attributes.. these should become attributes too..
        # in fact we should not be creating this separate set of 
        # samples but need to do so due to lack of appropriate 
        # sample tissue id field
        sample_params = collections.defaultdict(lambda: {})
        param_schema = collections.defaultdict(lambda: set([]))
        for s in raw_samples.itervalues():
            # if the sample has an oncoseq name
            if "Oncoseq Sample Name" in s.params.keys():
                os = sample_params[s.params["Oncoseq Sample Name"]]
                os['name'] = s.params["Oncoseq Sample Name"]
                if 'lanes' not in os:
                    os['lanes'] = set([])
                os['lanes'].update(s.lanes)
                if 'ids' not in os:
                    os['ids'] = []
                os['ids'].append(s.sample_id)
                os['app_type'] = s.app_type
                os['tissue_type'] = s.tissue_type
                os['sample_type'] = s.sample_type
                os['insert_size'] = s.insert_size
                for k, v in s.params.iteritems():
                    k = k.lower()
                    k = '_'.join(k.split())                    
                    if k in os and v != os[k]:
                        logging.error('sample parameter inconsistency: sample %s name %s param %s (%s != %s)' %
                                      (s.sample_id, os['name'], k, os[k], v))
                    os[k] = v
                    # keep track of all available values for parameters                    
                    param_schema[k].add(v)                    
                # keep track of all available values for parameters
                param_schema['app_type'].add(s.app_type)
                param_schema['tissue_type'].add(s.tissue_type)
                param_schema['sample_type'].add(s.sample_type)                
        return sample_params, param_schema

    def get_sample_insert_sizes(self):
        table = self.bioanalyzer_table
        stmt = select([table])        
        percents = collections.defaultdict(lambda: 0) 
        insert_sizes = collections.defaultdict(lambda: 0)        
        for res in stmt.execute():            
            if res.percentage_total >= percents[res.sample_id]:
                if res.peak_size > insert_sizes[res.sample_id]:
                    insert_sizes[res.sample_id] = res.peak_size
                percents[res.sample_id] = res.percentage_total
        del percents
        # TODO: this is a poor man's way of determining the insert size
        # the ideal way would be to statistically profile based on 
        # paired-end mappings
        for sample_id in insert_sizes.iterkeys():
            # approximate insert size as fragment length - 2*50
            insert_sizes[sample_id] = max(0, insert_sizes[sample_id] - 100)                    
        return insert_sizes
        
    def get_estimated_fragment_length(self, sample_id):
        '''
        Look at the bioanalyzer results to determine the approximate fragment
        length of the library
        '''
        if sample_id is None:
            return 0                
        table = self.bioanalyzer_table
        stmt = select([table],
                      table.c.sample_id == sample_id)
        best_length = 0
        best_percent_total = 0
        for res in stmt.execute():
            if res.percentage_total >= best_percent_total:
                if res.peak_size > best_length:
                    best_length = res.peak_size
                best_percent_total = res.percentage_total
        print best_length
        return best_length

    def create_analysis_symlinks(self, root_path):
        '''
        create symbolic links to the data path for every analysis in sampleDB
        '''
        # keep track of best scoring lanes
        best_lanes = collections.defaultdict(lambda: (0, 'jomama'))        

        # statistics
        phix_control_lanes = 0
        flowcell_lanes_skipped = set([])
        symlinks_created = 0

        # ignore "PhiX Control" lanes
        # only look at the samples with analysis
        table = self.sample_table
        stmt = select([table.c.solexa_sample_id,
                       table.c.tissue_type,
                       table.c.flowcell,
                       table.c.lane_id,
                       table.c.analysis_date,
                       table.c.data_path,
                       table.c.analysis,
                       table.c.sw_version,
                       table.c.cluster_pf,
                       table.c.align_pf],
                      and_(table.c.flowcell != None,
                           table.c.data_path != None,
                           table.c.tissue_type != "PhiX Control"))
        stmt = stmt.distinct()

        for res in stmt.execute():
            fastq_files = self._find_fastq_from_query_results(res)
            if len(fastq_files) == 0:                    
                logging.error("Flowcell: %s lane: %d data_path: %s "
                              "could not be found on any of the /archive "
                              "directories.. skipping this sample..." %
                              (res.flowcell, res.lane_id, res.data_path))
                flowcell_lanes_skipped.add((res.flowcell, res.lane_id))
                continue
            
            # compute score - for now multiple cluster_pf by alignment %
            score = int(res.cluster_pf) * float(res.align_pf)

            for f in fastq_files:
                if is_paired_end_filename[res.analysis]:
                    pair = int(pair_re.match(os.path.basename(f)).group(1))
                else:
                    pair = 0

                fileprefix = make_file_prefix(res.flowcell,
                                              res.lane_id,
                                              res.analysis_date,
                                              res.data_path,
                                              pair)
                # create symlinks to these sequence files
                symlinkpath = os.path.join(root_path, 
                                           fileprefix + '_sequence.txt')
                if not os.path.exists(symlinkpath):
                    logging.debug("creating symlink %s -> %s" % (f, symlinkpath))
                    os.symlink(f, symlinkpath)
                    # stats
                    symlinks_created += 1

                # create a file specifying bowtie quality score param to use
                qualpath = os.path.join(root_path,
                                        fileprefix + '_bowtiequals.txt')
                if not os.path.exists(qualpath):
                    logging.debug("creating bowtiequals file %s" % 
                                  bowtie_quals[res.sw_version])
                    qualf = open(qualpath, 'w')
                    qualf.write('%s' % bowtie_quals[res.sw_version])
                    qualf.close()

                fc_lane_pair = (res.flowcell, res.lane_id, pair)
                if score >= best_lanes[fc_lane_pair][0]:
                    best_lanes[fc_lane_pair] = (score, fileprefix)
                    logging.debug('    choosing %s as best' % fileprefix)
        
        # symlinks to best lanes path
        dest_path = root_path
        # now make symlinks to the best scoring lanes
        for k, v in best_lanes.iteritems():
            flowcell, lane, pair = k
            score, prefix = v
            
            if pair > 0:
                tgt_prefix = '%s_%d_%d' % (flowcell, lane, pair)
            else:
                tgt_prefix = '%s_%d' % (flowcell, lane)
            
            seq_path = os.path.join(root_path,
                                    prefix + '_sequence.txt')
            quals_path = os.path.join(root_path,
                                      prefix + '_bowtiequals.txt')
            tgt_seq_path = os.path.join(dest_path, 
                                        tgt_prefix + '_best_sequence.txt')
            tgt_quals_path = os.path.join(dest_path,
                                          tgt_prefix + '_best_bowtiequals.txt')

            logging.debug('flowcell %s lane %d pair %d (%s) --> %s' %
                          (flowcell, lane, pair, tgt_seq_path, seq_path))
            os.symlink(seq_path, tgt_seq_path)
            os.symlink(quals_path, tgt_quals_path) 

        logging.debug("phix control lanes: %d" % phix_control_lanes)
        logging.debug("# of flowcell/lanes skipped: %d" % len(flowcell_lanes_skipped))        
        logging.debug("symlinks created: %d" % symlinks_created)
