#!/usr/bin/env python
"""
Feature Matrix Parser for TCGA data

Parses data from the Data Coordination Center (DCC) currently hosted at
https://tcga-data.nci.nih.gov into a feature matrix suitable for processing
through analysis tools such as our pairwise p-valueizer
(https://en.wikipedia.org/wiki/Pairwise_comparison) or RF-ACE
(https://code.google.com/p/rf-ace/). 

This script may be invoked from command line as a standalone tool.

For more information, see echoHelp() below.


Design notes:

* Global regular expressions are used in lieu of an LALR parser and
  context-free grammar to maximize expressiveness and ease of use, at the
  expense of performance during the initial matching. This is deliberate.

* This tool, despite being multiply-staged, is assumed to be used in a single
  pass against its input, resulting in a reusable, robust feature matrix
  suitable for post-processing. 

* Files that fail parsing repudiate themselves by blocking the feature matrix
  generation process from completing successfully. These can be filtered out by
  way of regular expression for now, but an explicit anti-regexp may be
  desirable at some point in the future for filtering, for example, all of the
  normal tissue samples from the matrix.

* The complete EBNF for feature identifiers, including data type, is: 

feature_identifier = data_type, ":", feature_type, ":", location, ":", context,
  ":", source, optional_note ; 
data_type = "B" | "C" | "N" ;
feature_type = "CLIN" | "CNVR" | "GEXP" | "GNAB" | "METH" | "MIRN" | "RPPA" |
  "SAMP"; 
location = ( "" | gene | "(", chr, ",", start, ",", stop, ",", strand ")" ) ;
gene = { all_characters - ( ":" | "(" ) } ;
chr = "chr" , ( { digit } | "X" | "Y" | "M" ) ;
start = { digit } ;
end = { digit } ; # End MUST be <= start, always
strand = "+" | "-" ;
context = { all_characters - ( ":" ) } ;
source = { all_characters - ( ":" ) } ;
build_identifier = { alphanumeric_characters } ;
optional_note = "" | ( ":", { all_characters - ( ":" ) } ) ;
all_characters = ? all visible characters ? ;
alphanumeric_characters = ? all letters and digits ? ;

TODOs:
* Support threading
* For jobs that eclipse system memory, expect an Apache Pig-backed version
* Intra-file averaging, inter-file flattening when timestamp reordering is set
* Generic getopt/ConfigParser support for overriding a config file

!! TODOs:
* Support more data files (input feature matrices; Brady's MAF file)
* Add support for sorting selected files by creation or update time,
   allowing multiple files with the same data to be overridden by the newest
   data points
* Add command-line switch to enable said sort operation, with fmatrix averaging
   turned off
* Add support for parsing feature matrix files using our defined output
   pseudo-mimetype and ColumnarKVGenerator
* Add isClinData(...), isCopyNumberData(...), to unify these definitions
   sprinkling the code
* Variable-length scoping, vis TCGA barcode merges. For now, we keep everything
(except clinical data) in separate data rows


Author: Tom Robinson <trobinson@systemsbiology.org>
Initial Revision: December 19th, 2011

"""

## Imports

import ConfigParser
import csv
#import logging
import os
import re
import string
import sys
from datetime import datetime
from random import shuffle
from xml.etree import cElementTree

# From https://code.google.com/p/golem/source/browse/src/python
import fake_golemize
import golemize

# These should be included as part of this package:
from fmatrix import *
from ftranslator import *
from kvparser import *
from sdrf import *


## Credit documentation

__author__ = "Tom Robinson"
__copyright__ = "Institute for Systems Biology, 2011"
__credits__ = ["Tom Robinson"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Tom Robinson"
__email__ = "trobinson@systemsbiology.org"
__status__ = "Production"
__platform__ = "Multi-OS, Python 2.6+"


## Constants

# Key connector for composite key data
CONNECTOR=':'

# Error codes
ERRCO_SUCCESS = 0               # Successful return
ERRCO_PRINTHELP = 1             # Help was printed, for some reason
ERRCO_UNDEFINED = 0xffffffff    # Undefined error with an expressive sentinel

# Canonical form for feature identifiers, sans the data type, which is inferred
#  and part of the feature matrix parsing code
# No optional note information should be included in the output of this tool
FEATURE_CANONICAL_REGEXP = r"^[A-Z]{4}:(|[^:]+):[^:]+:[^:]+$"

# Regexp for TCGA patient barcodes
PATIENT_BARCODE_RE = r"(tcga-[0-9a-z]{2}-[0-9a-z]{4})"

# Pseudo-mimetypes, by {type : ext, ..}
PSEUDO_MIMETYPES = {
    'SDRF': ['.sdrf.txt'],
    'TSV': ['.tsv'],
    'XML': ['.xml'],
    'TXT': ['.txt']
}


## Mutable Globals

# List of matched files, by regular expression, before filtering
gLMatchedFiles = []

# Logger (for lazy logging)
#  We implement are own small version here, because the logger class cannot be
#  serialized for parallel operation via Golemize, due to an unpicklable
#  attribute lookup on thread.lock.
#  (See: http://stackoverflow.com/questions/3375443/cant-pickle-loggers)
class Logger(object):
    def __init__(self):
        pass
    def info(self,msg):
        print datetime.now(), "INFO", msg
    def warn(self,msg):
        print datetime.now(), "WARN", msg
    def error(self,msg):
        print datetime.now(), "ERROR", msg

logger = Logger()
#logger = logging.getLogger('fmParse')
#handler = logging.StreamHandler(sys.stdout)
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
#handler.setFormatter(formatter)
#logger.addHandler(handler)

# Set a permissive log level; this will !! be configurable later
#logger.setLevel(logging.INFO)

## Class Definitions

class CopyNumberParser(object):
    """ Parse gene expression data into a comparable representation, based on
    fixed tiling or a dictionary of breakpoints

    See __init__ for more details

    """

    __n = 0             ## The current position in the results array ##
    __logger = None     ## The logger paired to this object ##
    __segments = {}     ## The segment data for this parser ##
    __denom = {}        ## The rolling denominator for these same data ##
    __keys = None       ## Cached keys from our segments dictionary ##

    def __init__(self,generator,binWidth=1000,partitionDict=None,logger=None):
        """ Create a new instance of this class

         Notes: 

         * The partition dictionary will be used for bin coalesence if
            given. Note that this dictionary must be comprehensive in order for
            initialization to complete successfully. If it isn't, the initial
            creation of this object will loudly and violently explode.

         * This class is, for the most part, implementation locked to Genome
           Wide SNP data. Additional logic will be necessary to make it
           generic.  

        Parameters
        ----------
        generator : Python generator
            ColumnarKVGenerator or equivalent that returns key-value pairs

        binWidth : int
            Width of each bin during the aliasing stage of this parser

        partitionDict : dictionary
            Optional dictionary of partitions to coalesce the created bins into 
       
        logger : logger
            Optional logger for verbose informational messages

        Return Value
        ------------
        CopyNumberParser
            A new instance of this class, configured with the given params

        Examples
        --------
        cnp = CopyNumberParser(myGen)         # A new instance is created with 
                                              #  otherwise default parameters
                                              #  wrapping the given reader

        """
        if not isinstance(binWidth,int) or binWidth <= 0:
            raise ValueError( \
              "binWidth must be an integer value greater than zero")

        self.__logger=logger

        # Grab and cache all of the input from our generator, since we can't
        #  guarantee it'll appear in anything approaching sorted order
        for data in generator:
            matches = \
              re.match("([^-]+):([^-]+):([0-9]+):([0-9]+):([^-]+)",data[0])

            if matches:
                groups = matches.groups()
                
                # Grab the data from the groups matched above
                sSourceIdentifier = groups[0]
                sChromosome = self.normalize(groups[1])
                iLocStart = int(groups[2])
                iLocEnd = int(groups[3])       
                sDataTypeIdentifier = groups[4]
                fContainedData = float(data[1])

                # Detect the data type, using it to infer any required
                #  transformations
                if sDataTypeIdentifier == "seg.mean":
                    # Treat this as a mean across the entire length of the
                    # sequence, by multiplying the value by the length
                    fContainedData *= (iLocEnd - iLocStart)

                elif sDataTypeIdentifier != "num.mark":
                    if self.__logger:
                        self.__logger.warn( \
                          "Identifier %s not recognized." % \
                          sDataTypeIdentifier \
                          + " Treating generically!")

                # Preserve unidirectionality: virtually reorder iLocStart,
                #  iLocEnd if the latter is greater than the former.
                if iLocStart > iLocEnd:
                    iLocStart,iLocEnd = iLocEnd,iLocStart
                    
                # Now, bin the data, assuming an even read probability across
                #  the entire length of the detected segment. In other words,
                #  take each subsegment, compute its contained length in each
                #  of our bins, and use that to extract a proportional amount
                #  from fContainedData.

                # Chain of responsibility: if the partitions are explicitly
                #  given, use those. If not and a binWidth is, use that. If
                #  neither are given, use the start and end locations instead.
                #partitions = [[iLocStart,iLocEnd,iLocEnd - iLocStart + 1]]
                partitions = self.binPartition(iLocStart,iLocEnd,binWidth) 
                if partitionDict:
                    # Extract only the applicable partitions for this
                    #  chromosome from the given partition dictionary

                    # PS: This will violently and deliberately explode if the
                    #  given partition dictionary isn't comprehensive.
                    candidatePartitions = dict([[i,0] for i in \
                                                partitionDict[sChromosome]])
                    
                    # The following invariant must hold true for this
                    #  algorithm to work correctly:
                    #
                    # * partitions is in sorted order
                    # * candidatePartitions is in sorted order
                    # * candidatePartitions is comprehensive, containing every
                    #    partition generated by binPartition, above
                    # * candidatePartitions and partitions exist in the same
                    #    pixelation frame, relative to binWidth, with alignment
                    #    at the edges of each segment
                    #
                    # The algorithm walks both lists simultaneously, aligning
                    #  each segment to its candidate. If the dictionary is
                    #  non-comprehensive, this algorithm will fail.
                    idx = 0
                    for candidate in sorted(candidatePartitions.keys()):
                        while idx < len(partitions) and \
                          partitions[idx][0] >= candidate[0] and \
                          partitions[idx][1] <= candidate[1]:

                            if self.__logger:
                                self.__logger.info( \
                                  "Resegmenting along (chr%s,%d,%d)" % \
                                  (sChromosome, candidate[0], candidate[1]))

                            candidatePartitions[candidate] += partitions[idx][2]
                            idx += 1
                    
                    if idx != len(partitions):
                        raise ValueError( \
                          "Given partition dictionary was non-comprehensive." \
                          + " Please inspect the results for %s (%d)." % \
                          (str(partitions[idx]), idx))

                    # And now, repartition the data
                    partitions = []
                    for item in candidatePartitions.items():
                        partitions.append((item[0][0],item[0][1],item[1]))

                for part in partitions:
        
                    # Append the counts into our segments and, if necessary for
                    #  averaging, our rolling denominator as well.
                    key = \
                      ('chr' + sChromosome, \
                        str(part[0]),str(part[1]),'+',sDataTypeIdentifier) 

                    if key in self.__segments:
                        self.__segments[key] += \
                          fContainedData * \
                            float(part[2])/(iLocEnd - iLocStart + 1)
                    else:
                        self.__segments[key] = \
                          fContainedData * \
                            float(part[2])/(iLocEnd - iLocStart + 1)

                    if sDataTypeIdentifier == "seg.mean":
                        if key in self.__denom:
                            self.__denom[key] += part[2]
                        else:
                            self.__denom[key] = part[2]

            else:
                raise ValueError( \
                  "%s doesn't seem to be valid CNVR data" % \
                  str(data))

    def normalize(self,chrom):
        """ Normalize the given chromosome string by removing any extraneous
        identifiers, such as "chr", "chrom", and translating X and Y

        Parameters
        ----------
        generator : Python generator
            ColumnarKVGenerator or equivalent that returns key-value pairs

        Return Value
        ------------
        string
            Normalized representation of this chromosome's identifier

        Examples
        --------
        assert(cnp.normalize('chr1') == '1')    # Normalize this chromosome

        """
        if chrom:
            return chrom.lower()\
              .replace('hs','')\
              .replace('chr','')\
              .replace('chrom','')\
              .replace('23','X')\
              .replace('24','Y')\
              .upper()

    def binPartition(self,locStart,locEnd,binWidth):
        """ Generate partitions from a start position, end position, and the
        partition width, assuming no offset 

        In other words, a fixed-width aliasing partitioner.

        Parameters
        ----------
        locStart : int
            Start position of the segment to partition

        locEnd : int
            End position of the segment to partition

        binWidth : int
            Width of each output partition

        Return Value
        ------------
        list of list
            List of partitions, along with their allocated fill from the input
            segment. These will usually be of amount binWidth, except for the
            first and last partitions.

        Examples
        --------
        cnp.binPartition(0,10000,1000)  # Should return ten partitions of 1k

        """

        # Hash the positions by binWidth. We'll be using the range of
        #  hash positions between these values to partition our reads
        #  and segment mean
        hashStart = locStart / binWidth
        hashEnd = locEnd / binWidth

        ret = []
        for idx in xrange(hashStart,hashEnd+1):
            bottom = \
              [0,(locStart - (binWidth*idx))][hashStart > binWidth*idx]
            top = \
              [binWidth,(locEnd - (binWidth*idx))][hashEnd < (binWidth+1)*idx]

            ret.append([(idx*binWidth),((idx+1)*binWidth),top-bottom])

        return ret

    def __iter__(self):
        """ Returns our iterator; namely, a pointer to self

        Parameters
        ----------
        None (other than the default, "self")

        Return Value
        ------------
        CopyNumberParser
            A pointer back to this class

        Examples
        --------
        assert(cnp == cnp.__iter__())   # Points to the same thing

        """
        return self

    def next(self):
        """ Returns the next element, if one exists

        Parameters
        ----------
        None (other than the default, "self")

        Return Value
        ------------
        list of string
            The next element, in form [key, value]

        Examples
        --------
        kv = cnp.next()     # Returns the next element, if it exists
                            #  Otherwise, raises StopIteration()

        """
        if self.__n == len(self.__segments):
            raise StopIteration()
        else:
            self.__n += 1
            if not self.__keys:
                self.__keys = self.__segments.keys()

            # Return the key with its target data
            key = self.__keys[self.__n-1]

            # Only use a denominator if it's given for this value
            denom = 1.0
            if key in self.__denom:
                denom = float(self.__denom[key])

            # Return the key, along with the contained segment data, if and
            #  only if the denominator exceeds zero.
            if denom > 0.0:
                return [string.join(key,','),self.__segments[key] / denom]
            else:
                return [string.join(key,','),self.__segments[key]]


    
## Function Definitions

def doStep3(sFilepath,dataTuple):
    """ !! Document """

    # First, unpack our parameters from our data tuple
    (cSdrfIndexer) = dataTuple    

    # Inner barcodizer for raw lookups; we defer the web hit to our
    #  synchronized one, to synchronize its cache file
    innerLogger = Logger()
    innerBarcodizer = Barcodizer(logger=innerLogger) 

    # Then, establish our local variables
    stFeatures = set()
    stRawIdentifiers = set()
    dCnvrData = {}
    stCnvrFeatures = set()

    # And our dud return, when we bomb out early
    lDud = [set(),set(),{},set()]

    # ... and proceed to walk them.
    if True: # !!
        innerLogger.info('Extracting identifiers from ' + sFilepath)    

        # Based upon each class of file, parse out the salient data using a
        #  series of simplified rules. These rules boil down to: "create
        #  usable key-value pairs that uniquely identify the patient and
        #  uniquely identify the column identifier, followed by the data
        #  point. These will be collapsed into our merge file in the next step.

        # Grab the filename from this path. 
        #  If it cannot be found, return early.
        sFilename = getFilename(sFilepath)
        if not sFilename:
            return lDud

        # First, see if we can find it in our SDRF lookup table
        uniqueIdentifier = cSdrfIndexer.lookup(sFilepath)

        # Next, check to see if our file contains a valid unique identifier
        if not uniqueIdentifier:
            rawIdentifiers = innerBarcodizer.extractRawIdentifiersFromString(sFilepath)

            # Caveat: if the file is misidentified, throw a warning and ignore
            #  the rawIdentifier found from the filename.
            if len(rawIdentifiers) == 1:
                uniqueIdentifier = rawIdentifiers[0]
                innerLogger.info('Identified as %s by filepath' % uniqueIdentifier)
            elif len(rawIdentifiers) > 0:
                innerLogger.warn('Too many identifiers for file' \
                  + ' %s. Continuing happily...' % \
                  (sFilepath))

        kv = extractData(sFilepath)
        if kv:
            for elem in kv:
                if elem[0]:
                    if not re.match(FEATURE_CANONICAL_REGEXP,elem[0]):
                        raise ValueError( \
                          'Feature name was not in canonical form: '
                          + elem[0])
                    innerLogger.info('Processing key: ' + str(elem[0]))

                    # Finally, see if the unique identifier is in the key. If
                    #  we fail at this point, we warn and return early.
                    innerIdentifier = uniqueIdentifier

                    # Clinical features get an explicit passthrough, because
                    #  they lack SDRF files AND point to collections of sample
                    #  rawIdentifiers. They're handled separately at a later step.
                    if 'CLIN:' not in elem[0] or identifyAs('TSV',sFilename):
                        rawIdentifiers = \
                          innerBarcodizer.extractRawIdentifiersFromString(elem[0]) 
                        if not innerIdentifier:
                            if not rawIdentifiers:
                                innerLogger.warn('Could not find identifier for' \
                                  + ' %s : %s. Skipping this entry.' % \
                                  (sFilepath, elem[0]))    
                                continue
                            if len(rawIdentifiers) != 1:
                                innerLogger.warn('Too many identifiers for' \
                                  + ' %s : %s. Skipping this entry.' % \
                                  (sFilepath, elem[0]))
                                continue

                        # Any sample rawIdentifier that appears in the feature name
                        #  takes precedence over the original identification
                        #  occurring
                        if len(rawIdentifiers) == 1:
                            innerIdentifier = rawIdentifiers[0]
                            innerLogger.info('Identified as %s by feature name' % \
                              innerIdentifier) 

                        innerIdentifier_upper = innerIdentifier.upper()
                        stRawIdentifiers.add(innerIdentifier_upper)
                    else:
                        innerIdentifier = ''

                    # Normalize the feature by removing TCGA sample rawIdentifiers
                    #  from it; UUID removal isn't supported at this time
                    rawIdentifiers = innerBarcodizer.extractRawIdentifiersFromString(elem[0]) 
                    for rawIdentifier in rawIdentifiers:
                        rawIdentifier_upper = rawIdentifier.upper()
                        elem[0] = elem[0] \
                          .replace(',' + rawIdentifier,'') \
                          .replace(rawIdentifier + ',','') \
                          .replace(rawIdentifier,'') \
                          .replace(',' + rawIdentifier_upper,'') \
                          .replace(rawIdentifier_upper + ',','') \
                          .replace(rawIdentifier_upper,'')
                            
                    # CNVR features are treated awkwardly, because they require
                    #  special handling using a clustering algorithm. So, we
                    #  load these up in a list for later sorting, using a
                    #  sort-and-sweep approach to partitioning the space in
                    #  linear space.
                    if 'CNVR:' in elem[0] and not identifyAs('TSV',sFilename):

                        # Extract only the genomic coordinates and use that for
                        #  binning our data. We impose no ordering at this
                        #  point, deferring that to the clustering algorithm.

                        # CNVR data must always contain genomic coordinates. If
                        #  this fails, it implies that assumption also fails. 
                        split = elem[0].split(':')

                        # Maintain read data only
                        if split[2].endswith('num.mark'):
                            stCnvrFeatures.add((split[2],split[3]))

                            # Unpack split[1], converting its coords to
                            #  integer values
                            split[1] = \
                              split[1].replace('(','').replace(')','').split(',')
                            split[1][1] = int(split[1][1])
                            split[1][2] = int(split[1][2])
                            split[1] = tuple(split[1])

                            # Note: a non-integer num.mark is only valid if we
                            #  receive input from an upstream feature
                            #  matrix. Thus, we float the reads before we
                            #  convert them to int, to support both cases.
                            if split[1] in dCnvrData:
                                dCnvrData[split[1]] += int(float(elem[1]))
                            else:
                                dCnvrData[split[1]] = int(float(elem[1]))

                    else:
                        innerLogger.info('Adding key: ' + str(elem[0]))
                        stFeatures.add(elem[0])
    return [stFeatures,stRawIdentifiers,dCnvrData,stCnvrFeatures]


def doStep4(sFilepath,dataTuple):
    """ !! Document """

    # First, unpack our parameters from our data tuple
    (cSdrfIndexer, dReseg, stFeatures, stBarcodes, \
      dPatientRawIdentifiers) = dataTuple  
    
    # Inner barcodizer for raw lookups; we defer the web hit to our
    #  synchronized one, to synchronize its cache file
    innerLogger = Logger()
    innerBarcodizer = Barcodizer(logger=innerLogger) 
        
    # Set up our return list
    ret = []

    if True: # !!
        innerLogger.info('Propagating data from ' + sFilepath)

        # Grab the filename from this path. 
        #  If it cannot be found, return early.
        sFilename = getFilename(sFilepath)
        if not sFilename:
            return []

        # First, see if we can find it in our SDRF lookup table
        uniqueIdentifier = cSdrfIndexer.lookup(sFilepath)

        # Next, check to see if our file contains a valid unique identifier
        if not uniqueIdentifier:
            rawIdentifiers = innerBarcodizer.extractRawIdentifiersFromString(sFilepath)

            # Caveat: if the FILE is misidentified, throw a warning and ignore
            #  the rawIdentifier found from the filename.
            if len(rawIdentifiers) == 1:
                uniqueIdentifier = rawIdentifiers[0]
                innerLogger.info('Identified as %s by filepath' % uniqueIdentifier)
            elif len(rawIdentifiers) > 0:
                innerLogger.warn('Too many identifiers for file' \
                  + ' %s. Continuing happily...' % \
                  (sFilepath))
        
        kv = extractData(sFilepath,dReseg)                        

        # See if we can match a patient rawIdentifier from the filename
        sPbcode = None
        matches = re.findall(PATIENT_BARCODE_RE, sFilepath.lower()) 
        if not matches:
            innerLogger.info('No patient barcode found in file %s' % sFilepath)
        else:
            # Reconcile the found rawIdentifiers if we receive too many
            #  unique ones. This implies an XML file was
            #  double-filed dubiously.
            #matches = matches.matches()
            stPbcodes = set()
            for match in matches:
                stPbcodes.add(match)
            if len(stPbcodes) != 1:
                innerLogger.info('Irreconcilable number of rawIdentifiers for file %s' % \
                              + sFilepath + '(%d). Continuing happily...')
            else:
                # Oh, we just have one. Good. Grab the unique rawIdentifier
                #  we received, then and store it for later use.
                sPbcode = stPbcodes.pop().upper()

        if kv:
            for elem in kv:
                if elem[0]:
                    innerLogger.info('Processing matrix with key: ' + str(elem))

                    # Finally, see if the unique identifier is in the key. If
                    #  we fail at this point, we warn and return early.
                    innerIdentifier = uniqueIdentifier

                    # Clinical features get an explicit passthrough, because
                    #  they lack SDRF files AND point to collections of sample
                    #  rawIdentifiers. They're handled separately at a later step.
                    if 'CLIN:' not in elem[0] or identifyAs('TSV',sFilename):
                        rawIdentifiers = \
                          innerBarcodizer.extractRawIdentifiersFromString(elem[0]) 
                        if not innerIdentifier:
                            if not rawIdentifiers:
                                innerLogger.warn('Could not find identifier for' \
                                  + ' %s : %s. Skipping this entry.' % \
                                  (sFilepath, elem[0]))
                                continue
                            if len(rawIdentifiers) != 1:
                                innerLogger.warn('Too many identifiers for' \
                                  + ' %s : %s. Skipping this entry.' % \
                                  (sFilepath, elem[0]))
                                continue

                        # Any sample rawIdentifier that appears in the feature name
                        #  takes precedence over the original identification
                        #  occurring
                        if len(rawIdentifiers) == 1:
                            innerIdentifier = rawIdentifiers[0]
                            innerLogger.info('Identified as %s by feature name' % \
                              innerIdentifier) 
                    else:
                        innerIdentifier = ''

                    # Normalize the feature by removing TCGA sample rawIdentifiers
                    #  from it; UUID removal isn't supported at this time
                    rawIdentifiers = innerBarcodizer.extractRawIdentifiersFromString(elem[0]) 
                    for rawIdentifier in rawIdentifiers:
                        rawIdentifier_upper = rawIdentifier.upper()
                        elem[0] = elem[0] \
                          .replace(',' + rawIdentifier,'') \
                          .replace(rawIdentifier + ',','') \
                          .replace(rawIdentifier,'') \
                          .replace(',' + rawIdentifier_upper,'') \
                          .replace(rawIdentifier_upper + ',','') \
                          .replace(rawIdentifier_upper,'')

                    # Alias the feature and capitalize the rawIdentifier
                    feature = elem[0]
                    innerIdentifier = innerIdentifier.upper()
                
                    # Determine the value format for this cell
                    value = elem[1]
                    try:
                        value = float(value)
                    except:
                        if value.isdigit():
                            value = int(value)
                
                    # Deposit this entry into the feature matrix we've
                    #  allocated             
                    if 'CLIN:' in feature:

                        # Did we fail patient rawIdentifier lookup with a clinical
                        #  XML file? If so, oops.
                        if not sPbcode:
                            innerLogger.warn('Patient rawIdentifier not present for ' \
                              + sFilepath + '. Skipping this XML' \
                              + ' file and continuing happily...')
                            return []

                        # See if we have a match in our patient rawIdentifiers dict
                        if sPbcode not in dPatientRawIdentifiers:
                            innerLogger.warn('Patient rawIdentifier ' + sPbcode \
                              + ' not in list of samples. Skipping this XML' \
                              + ' file and continuing happily...')
                            return []

                        # And use it to walk our patient-to-sample rawIdentifier
                        #  lookup table, propagating patient-level results to
                        #  all of the samples contained under it.
                        for rawIdentifier in dPatientRawIdentifiers[sPbcode]:
                            ret.append((rawIdentifier.upper(), feature, value))
                    else:
                        if innerIdentifier in stBarcodes and \
                          feature in stFeatures:
                            innerLogger.info('Adding to matrix: ' + str(elem))
                            ret.append((innerIdentifier, feature, value))
    return ret


def echoHelp():
    """Echos help information to STDOUT
    This outputs CLI syntax and tool usage information. 

    Parameters
    ----------
    None

    Return Value
    ------------
    rcode : int
        ERRCO_PRINTHELP, always

    Examples
    --------
    echoHelp()

    """
    print \
    '''
    Generate a feature matrix using the context information in the given
    configuration file, writing output into the given work directory.

    Usage: %s <configpath> <workDir>
     (eg. %s myconfig.conf /tmp/myWorkDir)
    ''' % (sys.argv[0],sys.argv[0])

    return ERRCO_PRINTHELP
    

def extractData(sFilepath,partitionDict=None):
    """ Extracts key-value pairs from the given file, via pseudo-mimetyping

    These definitions are yield-shaped to allow for spot filtering done on a
    per-file basis. These rules are expected to be modularized in the future.

    !! Modularize
    
    Parameters
    ----------
    sFilepath : string
        String filepath to extract data from

    Return Value
    ------------
    generator
        Python generator that emits records from the file

    Examples
    --------
    for elem in extractData('/path/to/my/file'):
        print elem     

    """

    sFilename = getFilename(sFilepath)
    if not sFilename:
        return

    hFileHandle = open(sFilepath,'rU')

    if identifyAs('SDRF',sFilename):
        logger.info('Identified as SDRF file; skipping')

    elif identifyAs('TSV',sFilename):
        # !! Include other files, including (for example) Brady's MAF files!
        if sFilename.endswith('fmatrix.tsv'):
            logger.info('Identified as upstream feature matrix file')

            for rst in ColumnarKVGenerator( \
              hFileHandle, \
              keycols=[0], \
              additionalSkipLines=[0,0], \
              connector=CONNECTOR):
                if rst and rst[1].strip() not in EMPTY_VALUES:
                    bdf = rst[0].strip().split(':',2)  
                                          # As [barcode : data_type : feature]
                                          #  Data type is dropped, as this will
                                          #  be inferred by the available data
                                          #  and MAY change.
                    #yield [bdf[2] + ',' + bdf[0][0:15],rst[1].strip()]
                    yield [bdf[2] + ',' + bdf[0],rst[1].strip()]

        if sFilename.endswith('legacy.tsv'):
            logger.info('Identified as legacy pipeline TSV file')

            for rst in ColumnarKVGenerator( \
              hFileHandle, \
              keycols=[0], \
              additionalSkipLines=[0,0], \
              connector=CONNECTOR):
                if rst and rst[1].strip() not in EMPTY_VALUES:
                    # Convert from the legacy feature name format + barcode:
                    #  DTYPE:FTYPE:FNAME:CHR:START:STOP:STRAND:EXTRA:BARCODE

                    # ... into our current one, as above.
                    legacy = rst[0].split(':')

                    # The chromosome takes precedence over the location
                    #  information. Notably, due to complex column FNAME, we
                    #  can't determine which names are gene names and which are
                    #  some other form of identifier without an unbounded LUT,
                    #  so these get glommed into the "context" field in the new
                    #  format.
                    print rst
                    if ''.join(legacy[3:7]):
                        yield ['%s:(%s,%s,%s,%s):%s:LegacyDataFile,%s' % \
                          (legacy[1],legacy[3],legacy[4],legacy[5],legacy[6], \
                          legacy[2],legacy[8]), rst[1].strip()] 
                    else:
                        yield ['%s::%s:LegacyDataFile,%s' % \
                          (legacy[1],legacy[2],legacy[7]), rst[1].strip()] 

        if sFilename.endswith('legacy.transpose.tsv'):
            logger.info('Identified as legacy (transposed) pipeline TSV file')

            for rst in ColumnarKVGenerator( \
              hFileHandle, \
              keycols=[0], \
              additionalSkipLines=[0,0], \
              connector=CONNECTOR):
                if rst and rst[1].strip() not in EMPTY_VALUES:
                    # Convert from the legacy feature name format:
                    #  BARCODE:DTYPE:FTYPE:FNAME:CHR:START:STOP:STRAND:EXTRA

                    # ... into our current one, as above.
                    legacy = rst[0].split(':')

                    # The chromosome takes precedence over the location
                    #  information. Notably, due to complex column FNAME, we
                    #  can't determine which names are gene names and which are
                    #  some other form of identifier without an unbounded LUT,
                    #  so these get glommed into the "context" field in the new
                    #  format.
                    print rst
                    if ''.join(legacy[4:8]):
                        yield ['%s:(%s,%s,%s,%s):%s:LegacyDataFile,%s' % \
                          (legacy[2],legacy[4],legacy[5],legacy[6],legacy[7], \
                          legacy[1],legacy[0]), rst[1].strip()]
                    else:
                        yield ['%s::%s:LegacyDataFile,%s' % \
                          (legacy[2],legacy[3],legacy[0]), rst[1].strip()] 
        '''
        else:
            logger.info('Identified as generic TSV file')            
            for rst in ColumnarKVGenerator( \
              hFileHandle, \
              keycols=[0], \
              additionalSkipLines=[0,0], \
              connector=CONNECTOR):
                if rst[1].strip() not in EMPTY_VALUES:
                    yield ['GNRC::%s:UnidentifiedGenericTSVFile' % \
                      rst[0].replace(CONNECTOR,','),rst[1]]
        '''
        

    elif identifyAs('XML',sFilename):
        # Verify this is a clinical XML file
        if 'clin' in sFilepath:
            logger.info('Identified as Clinical XML file')
        
            # !! Parse this type of file
            # !! This will need to be done automatically using the system in
            # !! place in Dataport, or an equivalent! Compound ID parsing,
            # !! go! 
            for rst in parseXml(hFileHandle):
                tag = rst[0].replace(':',',')
                if '/' in tag:
                    tag = tag.rsplit('/',1)[1]
                yield ['CLIN::%s:ClinXML' % tag,rst[1]]

    elif identifyAs('TXT',sFilepath):
        if sFilename.startswith('mut_bin_') and '_transpose_' not in sFilename:
            logger.info('Identified as binarized mutation (GNAB) data')
            
            for rst in ColumnarKVGenerator( \
              hFileHandle, \
              keycols=[0], \
              additionalSkipLines=[0,0], \
              connector=CONNECTOR):
                blf = rst[0].strip().split(':',1) # As [barcode : loc_feature]
                blf[1] = blf[1].split('_',1)
                #yield ['GNAB:%s:%s:ISBGenerated,%s' % \
                #  (blf[1][0],blf[1][1],blf[0][0:15]),rst[1]]
                yield ['GNAB:%s:%s:ISBGenerated,%s' % \
                  (blf[1][0],blf[1][1],blf[0]),rst[1]]
                

        if 'illumina' and 'rnaseq' in sFilepath:
            
            # !! Parse this type of file
            # !! KV files! Use generic KV compound parser!
            # Spec: (keylength, token)
            # !! For example:
            # !! ./public/tumor/coad/cgcc/unc.edu/illuminaga_rnaseq/rnaseq/unc.edu_COAD.IlluminaGA_RNASeq.Level_3.3.3.0/UNCID_291745.TCGA-AA-3525-01A-02R-0826-07.100603_UNC3-RDR300156_0007_61MM1AAXX.5.trimmed.annotated.translated_to_genomic.exon.quantification.txt 
                
            if sFilename.endswith('.quantification.txt'):
                ftype = "GEXP"
                if "mirna" in sFilepath:
                    ftype = "MIRN"

                subtype = None
                if sFilename.endswith('mirna.quantification.txt'):
                    subtype = "miRNA"
                elif sFilename.endswith('mrna.quantification.txt'):
                    subtype = "mRNA"
                elif sFilename.endswith('isoform.quantification.txt'):
                    subtype = "Isoform"
                elif sFilename.endswith('exon.quantification.txt'):
                    subtype = "Exon"
                else:
                    return
                    #subtype = "Unknown"
                logger.info('Identified as Illumina miRNASeq MIRN data')

                subplat = None
                if 'illuminaga' in sFilename:
                    subplat = "IlluminaGA"
                elif 'illuminahiseq' in sFilename:
                    subplat = "IlluminaHiSeq"
                else:
                    subplat = "IlluminaUnknown"

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    if re.match("[^:]+:[0-9]+-[0-9]+:[-+]", rst[0]):
                        dataTuple = \
                          tuple(rst[0].replace('-',',',1).replace(':',',',2).rsplit(':',1))
                        yield ['%s:(%s):%s:%s,%sQuantification' % \
                          (ftype,dataTuple[0],dataTuple[1],subtype,subplat),rst[1]]
                    else:
                        dataTuple = tuple(rst[0].split(':',1))
                        yield ['%s:%s:%s:%s,%sQuantification' % \
                          (ftype,dataTuple[0],dataTuple[1],subtype,subplat),rst[1]]

        if 'ht_hg-u133a' in sFilepath:
            
            # !! Parse this type of file
            # !! KV files! Use generic KV compound parser!
            # !! For example:
            # !! ./public/tumor/gbm/cgcc/broad.mit.edu/ht_hg-u133a/transcriptome/broad.mit.edu_GBM.HT_HG-U133A.Level_3.8.1007.0/BONES_p_TCGA_Batch8_9_RNA_HT_HG-U133A_96-HTA_A01_298034.level3.data.txt

            if sFilename.endswith('.data.txt'):
                logger.info('Identified as U133A platform GEXP data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    yield ['GEXP:%s:U133AExprProfile' % rst[0], rst[1]]

        if 'agilentg4502a' in sFilepath:
            
            # !! Parse this type of file
            # !! KV files! Use generic KV compound parser!
            # !! For example:
            # !! ./public/tumor/coad/cgcc/unc.edu/agilentg4502a_07_3/transcriptome/unc.edu_COAD.AgilentG4502A_07_3.Level_3.2.0.0/US82800149_251976013160_S01_GE2_105_Dec08.txt_lmean.out.logratio.gene.tcga_level3.data.txt

            if sFilename.endswith('.data.txt'):
                logger.info('Identified as Agilent GEXP data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    yield ['GEXP:%s:AgilentExprProfile' % rst[0], rst[1]]

        if 'h-mirna' in sFilepath:
            
            # !! Parse this type of file
            # !! KV files! Use generic KV compound parser!
            # !! For example:
            # !! ./public/tumor/ov/cgcc/unc.edu/h-mirna_8x15kv2/mirna/unc.edu_OV.H-miRNA_8x15Kv2.Level_3.1.8.0/TCGA-61-2614-01A-01T-1142-07.gene.tcga_level3.data.txt

            if sFilename.endswith('.data.txt'):
                logger.info('Identified as H-miRNA MIRN data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    yield ['MIRN:%s:%s:hmiRNAExprProfile' % tuple(rst[0].split(':',1)), rst[1]]

        if 'humanmethylation' in sFilepath:
            
            # !! Parse this type of file
            # !! KV files! Use generic KV compound parser!
            # !! Platform reconciliation: just use feature masking and
            # !! RF-ACE
            # !! For example:
            # !! ./public/tumor/blca/cgcc/jhu-usc.edu/humanmethylation450/methylation/jhu-usc.edu_BLCA.HumanMethylation450.Level_3.3.0.0/jhu-usc.edu_BLCA.HumanMethylation450.3.lvl-3.TCGA-BT-A20W-11A-11D-A14Z-05.txt

            if 'humanmethylation' in sFilename.lower() and \
              sFilename.endswith('.txt'):
                logger.info('Identified as Human Methylation METH data')
                platform = '(UnknownCount)'
                sFilename_lower = sFilename.lower()
                if 'humanmethylation27' in sFilename_lower:
                    platform = '27k'
                if 'humanmethylation450' in sFilename_lower:
                    platform = '450k'

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    yield ['METH:%s:HumanMethylation%s' % (rst[0],platform), \
                      rst[1]]

        if 'genome_wide_snp' in sFilepath:
            
            # !! Parse this type of file
            # !! Mixture! .seg.data.txt is KV matrix, the rest is a dict!
            # !! Example:
            # !! ./public/tumor/ov/cgcc/broad.mit.edu/genome_wide_snp_6/snp/broad.mit.edu_OV.Genome_Wide_SNP_6.Level_3.9.1002.0/TOUSE_p_TCGAaffxB9_10a_N_GenomeWideSNP_6_E10_396990.seg.data.txt

            if sFilename.endswith('.seg.data.txt'):
                logger.info('Identified as Genome CNVR (SNP) data')

                if partitionDict:                
                    for rst in CopyNumberParser(ColumnarKVGenerator( \
                      hFileHandle, \
                      keycols=[0,1,2,3], \
                      additionalSkipLines=[0,0], \
                      connector=CONNECTOR), \
                      partitionDict=partitionDict,logger=logger):
                        if rst and rst[0] and rst[1]: # !! Why is this required?
                            yield ['CNVR:(%s):%s:GenomeWideSNP6' % \
                              tuple(rst[0].rsplit(',',1)),rst[1]]
                else:
                    for rst in ColumnarKVGenerator( \
                      hFileHandle, \
                      keycols=[0,1,2,3], \
                      additionalSkipLines=[0,0], \
                      connector=CONNECTOR):
                        if rst:
                            pieces = rst[0].split(':')
                            pieces[1] = normalizeChrom(pieces[1])
                            yield ['CNVR:(%s):%s:GenomeWideSNP6' % \
                              tuple([','.join(pieces[1:4]),pieces[4]]),rst[1]]

        if 'mda_rppa_core' in sFilepath:
            
            # !! Parse this type of file
            # !! Mixture! .seg.data.txt is KV matrix, the rest is a dict!
            # !! Examples:
            # !! ./public/tumor/gbm/cgcc/mdanderson.org/mda_rppa_core/protein_exp/mdanderson.org_GBM.MDA_RPPA_Core.Level_3.1.0.0/mdanderson.org_GBM.MDA_RPPA_Core.protein_expression.Level_3.ff4890d1-67e8-4d62-b116-2b049753f6ee.txt

            if 'rppa' in sFilename.lower() and 'Level_3' in sFilename and \
              sFilename.endswith('.txt'):
                logger.info('Identified as MD Anderson RPPA data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    yield ['RPPA:%s:MDAnderson' % rst[0], rst[1]]
                    
        # !! Also handle, for example:
        #./public/tumor/brca/cgcc/mdanderson.org/mda_rppa_core/protein_exp/mdanderson.org_BRCA.MDA_RPPA_Core.Level_2.1.0.0/mdanderson.org_BRCA.MDA_RPPA_Core.SuperCurve.Level_2.fe1d6a4b-4950-48e6-92fd-7ddc10953a83.txt 
        #elif '??' in sFilename: ## !! Handle the secure data, too

        if 'microsat_i' in sFilepath:
            if sFilename.startswith('TCGA'):
                logger.info('Identified as Level 1 Microsat data')

                # !! Parse this type of file:
                # !! Examples:
                # !! ./secure/tumor/ucec/cgcc/nationwidechildrens.org/microsat_i/fragment_analysis/nationwidechildrens.org_UCEC.microsat_i.Level_1.49.0.0/TCGA-AX-A064-01A-01D-YYYY-23.txt

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    yield ['GEXP::%s:MicrosatelliteFragmentAnalysis' % \
                      rst[0].split(':',1)[1], rst[1]]

        if 'stddata' in sFilepath:
            # Firehose "Standard" data files feature a sparse set of formats
            #  identified by file name and header structure. We use the former
            #  for simplicity; the latter requires introspection into the first
            #  few lines of the files themselves.
            if sFilename.endswith('.data.txt'):
                logger.info('Identified as Broad Firehose "Standard" Data')
                if "miR_isoform_expression" in sFilename:
                    for rst in ColumnarKVGenerator( \
                      hFileHandle, \
                      keycols=[0,1], \
                      headerCount=1, \
                      additionalSkipLines=[0,0], \
                      connector=CONNECTOR):
                        blf = rst[0].split(':',2) # As [barcode : feature : loc]
                        yield ['FRHS:%s:%s:FirehoseStdData,%s' % \
                          (blf[2],blf[1],blf[0]),rst[1]]
                else:
                    for rst in ColumnarKVGenerator( \
                      hFileHandle, \
                      keycols=[0], \
                      headerCount=2, \
                      additionalSkipLines=[0,0], \
                      connector=CONNECTOR):
                        lbf = rst[0].split(':',2) # As [loc : barcode : feature]
                        yield ['FRHS:%s:%s:FirehoseStdData,%s' % \
                          (lbf[0],lbf[2],lbf[1]),rst[1]]

        if 'analyses' in sFilepath:
            if sFilename.endswith('.bestclus.txt'):
                logger.info( \
                  'Identified as Broad Firehose Cluster Analysis Data')
                
                ## !! Duplicate header (they have a bug)
                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[1,0], \
                  connector=CONNECTOR):
                    if rst[1].strip() not in EMPTY_VALUES:
                        yield ['SAMP::%s:BroadFirehoseBestClus' % \
                          rst[0].replace(CONNECTOR,','),rst[1]]

            if sFilename.endswith('.patients.counts_and_rates.txt'):
                logger.info( \
                  'Identified as Broad Firehose MutSig Rate Analysis Data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    if rst[1].strip() not in EMPTY_VALUES:
                        yield ['SAMP::%s:BroadFirehoseMutSigRate' % \
                          rst[0].replace(CONNECTOR,','),rst[1]]

            if sFilename.endswith('.sample_sig_gene_table.txt'):
                logger.info( \
                  'Identified as Broad Firehose MutSig Gene Analysis Data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    rst[0] = rst[0].replace('UCEC','TCGA')
                    if rst[1].strip() not in EMPTY_VALUES:
                        yield ['SAMP::%s:BroadFirehoseMutSigGene,%s' % \
                          tuple(rst[0].split(':')),rst[1]]

            if 'all_lesions' in sFilename and sFilename.endswith('.txt'):
                logger.info( \
                  'Identified as Broad Firehose Lesions Gistic Analysis Data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    if rst[1].strip() not in EMPTY_VALUES:
                        yield ['SAMP::%s:BroadFirehoseLesionsGistic' % \
                          rst[0].replace(CONNECTOR,','),rst[1]]

            if 'broad_values_by_arm.txt' in sFilename and sFilename.endswith('.txt'):
                logger.info( \
                  'Identified as Broad Firehose Arm Gistic Analysis Data')

                for rst in ColumnarKVGenerator( \
                  hFileHandle, \
                  keycols=[0], \
                  additionalSkipLines=[0,0], \
                  connector=CONNECTOR):
                    if rst[1].strip() not in EMPTY_VALUES:
                        yield ['SAMP::%s:BroadFirehoseArmGistic' % \
                          rst[0].replace(CONNECTOR,','),rst[1]]

    hFileHandle.close()


def getFilename(sFilepath):
    """ Returns the canonical filename from a given path, if one exists

    Parameters
    ----------
    sFilepath : string
        String filepath to extract the filename from

    Return Value
    ------------
    string
        The extracted filename. None if a filename couldn't be found.

    Examples
    --------
    assert(getFilename('/path/to/myfile.tsv') != None)
    myFilename = getFilename('/path/to/myfile.tsv')

    """
            
    try:
        sFilename = sFilepath.rsplit(os.sep,1)[1]
    except:
        logger.warn('Malformed sFilename detected: %s.' \
            + ' Skipping this file!' % sFilepath)

    if sFilename:
        return sFilename
    # else, return None


def identifyAs(sMime, sFilepath):
    """Performs pseudo-mime identification, by extension

     We prefer this approach over Python's mimetype libraries due to the
     need to strictly define custom types without the aid of other files, 
     including (but not limited to) /etc/mime.types. Therefore, we define 
     our own greatly simplified routine instead of relying on turning off
     the default imports.

    Parameters
    ----------
    sMime : string
        String pseudo-mimetype used to query our global dictionary

    sFilepath : string
        String filepath to perform pseudo-mimetyping against

    Return Value
    ------------
    bool
        True if the pseudomime matches by extension; false otherwise 

    Examples
    --------
    assert(identifyAs('TSV','myfilepath.tsv') == True)

    """
    for exten in PSEUDO_MIMETYPES[sMime]:
        if sFilepath.endswith(exten):
            return True
    return False


def normalizeChrom(chrom):
    """ Normalize the given chromosome string by removing any extraneous
    identifiers, such as "chr", "chrom", and translating X and Y

    Parameters  
    ----------
    generator : Python generator
        ColumnarKVGenerator or equivalent that returns key-value pairs

    Return Value
    ------------
    string
        Normalized representation of this chromosome's identifier

    Examples
    --------
    >>> assert(normalizeChrom('chr1') == '1') # Normalize this chromosome

    """
    if chrom:
        return chrom.lower()\
          .replace('hs','')\
          .replace('chr','')\
          .replace('chrom','')\
          .replace('23','X')\
          .replace('24','Y')\
          .upper()


def parseXml(reader):
    """ Generate flattened key-value pairs from an XML file

    Parameters
    ----------
    reader : streamlike
        An open read handle, passed to Python's data object model

    Return Value
    ------------
    generator of string
        A generator that may be used to extract key-value pairs from this
        XML input, with performance guarantees synonymous with those
        provided by the selected buffering mechanism.

    Examples
    --------
    parseXml(open('myfile.xml','rU'))   # myfile.xml is opened and parsed
   
    """ 
    for event in cElementTree.iterparse(reader):
        elem = event[1]
        if elem.text and elem.text.strip():
            yield [elem.tag.split('}',1)[1],elem.text.strip()]

            # To parse attribute data as well, uncomment here
            #for attr in elem.attrib.items():
            #    yield ["%s/%s" % \
            #      (elem.tag.split('}',1)[1],attr[0]), attr[1]]

    
def processByConfigFile(sConfigFile, sWorkDir):
    """Parses a config file pertinent to this system, performing any defined
    actions

    Parameters
    ----------
    sConfigFile : string
        Relative or absolute path spec pointing to a valid configuration file

    sWorkDir : string
        Work directory used to perform data transformations and post-
        processing operations, including the final feature matrix merge. 

    Return Value
    ------------
    int
        A CLI-conformant return code 
        0 for success; enumerated failure code otherwise 

    Examples
    --------
    assert(processByConfigFile( \
        '/path/to/my/config/file/here.conf','/path/to/my/work/dir') == 0)
    
    """
    # Read our configuration file
    config = ConfigParser.ConfigParser()
    config.read(sConfigFile)

    # Variables from our configuration file
    #lSdrfPaths = config.get("Main","sdrfPaths").split(',')
    lDataPaths = config.get("Main","dataPaths").split(',')
    lCancerTypes = config.get("Main","cancerTypes").split(',')
    lCannedRegexps = config.get("Main","cannedRegexps").split(',')
    iMaxSymTraversal = int(config.get("Main","maxSymTraversal"))
    sFeatureFilename = os.path.join(sWorkDir,config.get("Main","featureFile"))

    # ---
    # ! Step 0: sanity checks
    # Attempt to create our work directory, if it doesn't already exist
    if not os.path.exists(sWorkDir):
        os.mkdir(sWorkDir)  # Exceptions here will fail to the top level

    # Ensure we have some type of directory there now
    if not os.path.isdir(sWorkDir):
        raise OSError( \
            'The given workdir does not appear to actually be a directory')

    # Variables that require our work directory to actually exist
    cBarcodizer = Barcodizer( \
      os.path.join(sWorkDir,config.get("Main","cacheFile")), \
      logger) 
    cSdrfIndexer = SdrfIndexer(cBarcodizer,logger)

    sTranslationFilename = os.path.join(sWorkDir,config.get("Main","translationFile"))
    cFeatureTranslator = FeatureTranslator(logger) 
    if os.path.exists(sTranslationFilename) and \
      not os.path.isdir(sTranslationFilename):
        cFeatureTranslator.appendFromFile(sTranslationFilename)
    else:
        logger.warn("%s didn't exist for translating. Continuing happily..." % \
          sTranslationFilename)

    sGolemizeConfig =\
      os.path.join(sWorkDir,config.get("Main","golemizeConfig"))
    cGolemizer = None
    if os.path.exists(sGolemizeConfig) and \
      not os.path.isdir(sGolemizeConfig):
        logger.info("Creating REAL golemizer for parallel jobs.")
        cGolemizer = golemize.dictToGolemizer( \
          json.load(open(sGolemizeConfig,"rU")))
    else:
        logger.info("Creating FAKE golemizer for parallel jobs" \
          " (parallelization disabled).") 
        cGolemizer = fake_golemize.Golemizer()

    # ---
    # ! Step 1: Find all of the files we're interested in
    # Walk our repository 
    for sDataPath in lDataPaths:
        for sCancerType in lCancerTypes:
            dirPath = os.path.join(sDataPath,sCancerType)
            if not os.path.exists(dirPath):
                logger.error("%s does not exist" % dirPath)
                exit(-1)
                
            os.path.walk( \
                dirPath, \
                    treewalk, [sWorkDir, lCannedRegexps, iMaxSymTraversal]) 

    # As a post-processing step, shuffle the list of identified files. This
    #  will allow us to get stochastically even load distributions in our later
    #  parallelization steps, erasing any unexpected ill effects on file size
    #  or complexity that traversal order might have.
    shuffle(gLMatchedFiles)


    # ---
    # ! Step 2: Read in only the most current SDRF files for each selected
    # archive, extracting metadata into a lookup table for identification

    archives = {}
    for filepath in gLMatchedFiles:
        if identifyAs('SDRF',filepath):

            # SDRF files are found in mage-tab directories containing critical
            #  path characteristics. Among these are the archive version and
            #  revision, which much hullabaloo has been made of previously.
            #
            #  We take a very simple approach to parsing: select only archives
            #  of the same type at maximal revision. This is handled for us
            #  thanks to dcc-snapshot, so we assume all incoming archives are
            #  ones we're interested in.

            #  Grab the interesting, useful sections from each SDRF file (ie, the
            #  ones containing actual data sections) by parsing the header. It
            #  is established that the TCGA headers are consistent within each
            #  SDRF file, such that parsing them gives us consistent contents
            #  (see
            #  https://wiki.nci.nih.gov/display/TCGA/RNASeq+Data+Format+Specification,
            #  https://wiki.nci.nih.gov/display/TCGA/Sample+and+Data+Relationship+Format). 

            logger.info('Found SDRF file: ' + filepath)
                
            cSdrfIndexer.appendFromFile(filepath)


    # ---
    # ! Step 3: Organize all of the features and barcodes we'll need in our matrix
    stFeatures = set()
    stBarcodes = set()
    dPatientBarcodes = {}
    dCnvrData = {}
    stCnvrFeatures = set()
    
    dataTuple = (cSdrfIndexer)
    myResults = \
      cGolemizer.goDoIt ( gLMatchedFiles, dataTuple, doStep3, \
      label="fmParse Step 3" )#, binplace=False, alternateSource="/users/trobinso/staging" )
    for s in myResults:
        stFeatures.update(s[0])
        for identifier in s[1]:
            rstBarcodes = cBarcodizer.extractBarcodesFromString(identifier)
            if len(rstBarcodes) != 1:
                raise AssertionError( \
                  'Precondition failed: returned identifiers should always' \
                  ' be of length 1') 
            barcode = rstBarcodes[0].upper()
            stBarcodes.add(barcode)
    
            patientBarcode = barcode[0:12]
            if patientBarcode not in dPatientBarcodes:
                dPatientBarcodes[patientBarcode] = set()

            dPatientBarcodes[patientBarcode].add(barcode)
        for key in s[2]: # Note: for C compilation reasons, this may be a slower
                         #   operation than list.extend(). We still need to go
                         #   over this anyway for our residual and incremental.
            if key not in dCnvrData:
                dCnvrData[key] = s[2][key]
            else:
                dCnvrData[key] += s[2][key]
        stCnvrFeatures.update(s[3])

    # As a post-processing substep of Step 3, cluster the CNVR data and use it
    #  to extract the correct feature matrix identifiers and contained
    #  data. We'll use this again in Step 4.

    lStarts = []            # Starts for each original breakpoint
    lStops = []             # Stops for each original breakpoint

    dReseg = {}             # Per-chromosome dictionary for CNVR partitioning
    if dCnvrData:

        # Before we continue, we require two walks of our data to fill our
        #  tiling bins before resegmentation. This was originally abstracted
        #  away into a separate class that performed fundamentally the same
        #  operation, but due to large amounts of memory being passed around (in
        #  spite of parallelism), this was slow. This is a shame, because it's
        #  the elegant online solution to this problem.

        # (To be fair, the problem there is a design invariant that did not
        #  hold true, as we're using a pathological split size.)

        # !! The hardcoded constant factor here will need to be removed.

        for item in dCnvrData.items():
            # Sort order is lexicographic key, then position, then ID, then
            # value. Given our invariants, this ordering is fine.
            divisor = abs(float(item[0][2] - item[0][1]))
            if not divisor:
                divisor = 1.0
            lStarts.append((item[0][0],item[0][1],item[1] / divisor))
            lStops.append((item[0][0],item[0][2],item[1] / divisor))
        lStarts.sort()
        lStops.sort()

        chromosome = None
        step = 1000
        position = 0
        incremental = 0.0
        tiles = {}
        startIdx = 0
        stopIdx = 0
        current = 0.0
        p = 0
        while startIdx < len(lStarts):
            if not p:
                start = lStarts[startIdx]
                chromosome = start[0]
                position = (start[1] / step) * step
                incremental += start[2]
                startIdx += 1
                p += 1
                logger.info("Now working on chromosome " + chromosome)
            while startIdx < len(lStarts) and \
              lStarts[startIdx][0] == chromosome and \
              (lStarts[startIdx][1] / step) * step <= position:
                incremental += lStarts[startIdx][2]
                startIdx += 1
                p += 1
            while stopIdx < len(lStops) and \
              lStops[stopIdx][0] == chromosome and \
              (lStops[stopIdx][1] / step) * step <= position:
                incremental -= lStops[stopIdx][2]
                stopIdx += 1
                p -= 1 # Invariant: p can never be less than 0

            current += incremental * step
            tiles[(chromosome,position - step,position)] = abs(current)
            current = 0.0
            position += step
    
        # Our clustering routine features a fixed cutoff value derived
        #  from the sorted difference vector of our results. It's essentially
        #  naive slope detection, ideally augmented by gaussian_kde or JPEG
        #  decomposition for more robust results, with additional
        #  implementations of this logic provided by, for example,
        #  aroma.affymetrix in R.
        logger.info("Resegmenting...")
        values = tiles.values()
        idx = int(len(values) * 0.8)

        # Perform in-place transformation of values into our diffVec
        for i in reversed(xrange(1,len(values))):

            # Walk downwards, transforming our values
            values[i] = abs(values[i] - values[i-1])

        # Grab the cutoff
        cutoff = sorted(values)[idx]

        # Now, walk our data, forming new segment breakpoints
        lBreaks = []                    # List of segment breakpoints
        chrom = None                    # The current chromosome
        lastElem = len(tiles) - 1   # The last index in the array
        keys = sorted(tiles.keys())
        for i in xrange(len(keys)):
            k = keys[i]
                
            # A small sanity check
            if k[0] == None:
                raise ValueError("Given chromosome cannot be None")

            # A small check for the first entry
            if i == 0:
                # If this is the first entry, store it.
                lBreaks.append(k) 
            else:  
                # Otherwise...
                # Add any entries where we have a chromosome break 
                if k[0] != chrom:
                    chrom = k[0]
                    lBreaks.append(keys[i-1])
                    lBreaks.append(k)

                # Add any entry with difference on or exceeding our cutoff
                elif values[i] >= cutoff:
                    lBreaks.append(keys[i-1])
                    lBreaks.append(k)

            # Add the last entry of any sequence
            if i == lastElem:
                lBreaks.append(k)

        # Add these as key partitions for later processing
        for i in xrange(0,len(lBreaks)/2):
            first = lBreaks[i*2]
            second = lBreaks[(i*2)+1]

            if first[0] == second[0]:
                if first[0] not in dReseg:
                    dReseg[first[0]] = []
                dReseg[first[0]].append((first[1],second[2]))


    # ---
    # ! Step 4: Create the initial merge file

    # Remove features if they match entries in our feature translator
    for feature in stFeatures:
        logger.info('Trying translators on: %s' % feature)

        # Match this feature
        if cFeatureTranslator.match(feature):
            logger.info('Filtered out: %s' % feature)
            stFeatures.remove(feature)

    for cnvrFeature in stCnvrFeatures:
        for chrom in dReseg.keys():
            for seg in dReseg[chrom]:
                feature = "CNVR:(%s,%d,%d,+):%s:%s" % \
                  (chrom,seg[0],seg[1],cnvrFeature[0],cnvrFeature[1])

                logger.info('Trying translators on: %s' % feature)

                # Match this feature
                if cFeatureTranslator.match(feature):
                    logger.info('Filtered out: %s' % feature)
                    continue

                # Perform any necessary feature name translating
                stFeatures.add(feature)

    # Build our feature matrix with these translations
    cFeatureMatrix = FeatureMatrix( \
                       sorted(stBarcodes), sorted(stFeatures), logger=logger) 

    # Walk our files again, populating the feature matrix
    dataTuple = (cSdrfIndexer, dReseg, stFeatures, stBarcodes, \
      dPatientBarcodes)
    myResults = \
      cGolemizer.goDoIt ( gLMatchedFiles, dataTuple, doStep4, \
      label="fmParse Step 4" )#, binplace=False, alternateSource="/users/trobinso/staging" )
    for s in myResults:
        for t in s:        
            rstBarcodes = cBarcodizer.extractBarcodesFromString(t[0])
            if len(rstBarcodes) != 1:
                raise AssertionError( \
                  'Precondition failed: returned identifiers should always' \
                  ' be of length 1') 
            barcode = rstBarcodes[0].upper()
    
            try:
                cFeatureMatrix.set(barcode,t[1],t[2])
            except ValueError:
                logger.warn( \
                  'Skipping (%s,%s) ' % (barcode,t[1]) + ' due to' \
                  + ' non-deterministic averaging in matrix creation!' \
                  + ' Continuing happily...')

                # A ValueError signals duplicate data, in which we mask the
                #  data and log a warning about it.
                cFeatureMatrix.setVal2DMask(barcode, t[1], True)

    # Since we can now assume that we have a valid unique identifier,
    #  we can use it to find the appropriate row for this record
    cFeatureMatrix.writeFeatureMatrix(sFeatureFilename,\
                                        treatMasked2DAsNA=True,\
                                        removeMaskedCols=True, \
                                        clobber=True, transpose=True) 


    # ---
    # ! Step 5: Run analyses, collecting the output in our work directory and
    #  performing secondary filtering
    
    # We assume this happens as part of a different process, inclusive of the
    #  filtering of the feature matrix. Therefore, we just output our
    #  top-level, tersely-filtered matrix here.

    return ERRCO_SUCCESS


def treewalk(arg, dirname, names):
    """Attempts to match canned regular expressions during tree traversal,
    additionally walking a specified depth of symlinks

    Parameters
    ----------
    arg : list
        Argument list passed in for processing: 
        [sWorkDir, lCannedRegexps, iMaxSymTraversal]
    
        See callers for these definitions.

    dirname : string
        Pathspec for the current working directory
        Note that this differs from our "work directory," sWorkDir, above.

    names : list
        List containing the filepaths in the current directory

    Return Value
    ------------
    None (provides a list of files matching our regular expressions)

    Examples
    --------
    assert(processByConfigFile( \
        '/path/to/my/config/file/here.conf','/path/to/my/work/dir') == 0)

    """

    # If symlink traversal has been exhausted, return early
    if arg[2] == 0:
        return

    # Otherwise, perform matching
    for name in names:

        # Complex loop continuation that amounts to: consider symlinks as
        #  directories to a given tolerable depth, don't consider directories
        #  themselves during regexp matching.
        if os.path.isdir(name):
            if os.path.islink(name):
                os.path.walk(name,treewalk,[arg[0],arg[1],arg[2] - 1])
            continue
    
        # Prepend the path to the filename
        name = os.path.join(dirname,name)

        # Perform RE matching for the given canned regular expressions
        #  Note that this must match from the start of the full pathspec, in
        #  line with the design of re.match(...).
        for regexp in arg[1]:
            
            # If we match one of our regular expressions, perform symlinking
            #  with copy failover.
            if re.match(regexp,name):

                logger.info('Matched %s; adding it to our file list' % (name))
                gLMatchedFiles.append(name)
                break


def _fmain():
    """Function main: analog for the "main" function in other languages

     This is non-standard, and should not be confused with __main__, 
     the (otherwise anonymous) scope in which the Python interpreter's main
     program executes. We call this function through a check for it, though.
    
     If the right number of arguments are given to sys.argv, perform
     operation. Otherwise, echo usage text and exit.

    Parameters
    ----------
    None (arguments supplied by sys.argv)

    Return Value
    ------------
    int
        A CLI-conformant return code 
        0 for success; enumerated failure code otherwise 

    Examples
    --------
    assert(_fmain() == 0)

    """ 
    if len(sys.argv) == 3:
        sConfigFile = sys.argv[1]
        sWorkDir = sys.argv[2]

        # Begin operation based upon the supplied configuration file
        return processByConfigFile(sConfigFile, sWorkDir)
    else:
        return echoHelp()


# Initialization and Execution
#  Direct invocation
if __name__ == "__main__":
    exit(_fmain())

# Invoke our inline doctests when this library is invoked directly
if __name__ == "__main__":
    import doctest
    doctest.testmod()

# Library invocation
#  If included as part of a package, the caller is responsible for
#  initialization, setup, and any additional operations.
#
#  (For example, executing _fmain())
