__author__ = 'anorberg'

import tsv
import csv
import os
import glob

"""
A library of loosely-related utility methods to perform various operations on data tables.
"""

def cleave(table, columnId):
    """
    Split a TSV-like data table into an unordered collection of tables based on the value of a given column.
    
    Parameters:
      table -- a sequence of objects that support the index operator with the value columnId.
      columnId -- an object to use as an index into the objects contained in table.
    
    Returns:
      A dict mapping the possible values of columnId to lists of elements in table. The value of item[columnId] is
      guaranteed to have equaled the value in the key at the time the cleave was performed. The lists are
      guaranteed to be in the same order the elements were retrieved from table, simply collated out into
      different collections.
    
    Mutates:
      table will be iterated over, but not queried in any other way. Each element in table will be indexed
      exactly once, by column ID. Objects are not being copied in any way- the cleaved sublists are
      shallow copies, so changes to the elements in them will write through to the original table, although
      changes to the sublist structure will not.
    """
    ret = {}
    for record in table:
        item = record[columnId]
        if item not in ret:
            ret[item] = []
        ret[item].append(record)
    
    return ret

def WeldFiles(sourceFiles, schema, overrideHeader=None, dialect=csv.excel_tab, missingfill="", all_headerness = None):
    """
    Take a splat path, a pathsep-delimited list of splat paths, or a sequence of the former and iterate them as a single data set.
    
    WeldFiles is simply a deeply-nested generator shell over tsv.TsvReader. Using the
    glob library and some typical abuses of os.path and type sensitivity, the sourceFiles
    parameter becomes a sequence of things to feed to the sourceFile parameter of a
    tsv.TsvReader constructor. schema, overrideHader, dialect, and missingfill are fed
    directly to each constructor and thus have the meaning they have for TsvReader, with
    the proviso that they will be attached to several files.
    
    WeldFiles consecutively iterates across each TsvReader, moving onto the next when
    and only when the previous is exhausted. It follows the patterns in order.
    Errors in a path will be discovered late, not early. WeldFiles makes absolutely
    no effort to confirm that it is sane to interpret a file as a TSV. Good luck.
    
    all_headerness is passed into tsv.TsvReader's hasHeader value every time. If not
    all files have the same headeredness, this must be None, even if you have some
    way of knowing on a per-file basis. None causes autodetection.
    """
    if isinstance(sourceFiles, str):
        splatList = sourceFiles.split(os.path.pathsep)
    else:
        splatList = []
        for filename in sourceFiles:
            splatList.extend(filename.split(os.path.pathsep))
    
    for splat in splatList:
        for matchedPath in glob.iglob(splat):
            if os.path.isdir(matchedPath):
                target = os.listdir(matchedPath)
            else:
                target = [matchedPath]
            for thing in target:
                inFile = open(thing, "rb")
                for row in tsv.TsvReader(inFile, schema, overrideHeader, dialect, missingfill, all_headerness):
                    yield row
                inFile.close()
            
def ReadSchemaFile(schemaFile):
    """
    Read a schema out of a schema file. Takes open file-like, returns tuple: (schema format list, override header list)
    
    A schema file is two newline-delimited lists, separated by a blank line,
    the second of which may be empty and is terminated by EOF.
    
    The first list defines the schema. Each line is itself a tab-delimited
    list of synonymous headers. Quotes are respected, since the schema
    file is parsed with csv.Reader using the excel_tab schema and an
    expectation of ragged lines.
    
    The second list must contain one string per line and defines the
    override header: a literal header to forcibly inflict on the table. Most
    schemata should omit this list, which will result in None as the 
    header override, which is correct for every TSV that actually specifies
    its own header correctly, which will hopefully be the majority of them.
    """    
    
    schema = []
    
    schemaReader = csv.reader(schemaFile, dialect=csv.excel_tab)
    
    for line in schemaReader:
        if line:
            schema.append(line) #a seq of one is equivalent to the string, this is safe
        elif schema:
            break #blank line signals the end. leading blank lines are spurious and ignored.
    
    forcedHeader = []
    for line in schemaReader: #will instantly fail if schemaReader already exhausted
        if not line:
            continue
        if len(line) > 1:
            raise ValueError("Improperly formatted schema file. The Forced Header section must not contain more than one value per line.")
        forcedHeader.append(line[0])
    
    if not forcedHeader:
        forcedHeader = None #because None isn't the same as [] to tsv.Reader, but it means the same to us
    
    return (schema, forcedHeader)