#!/usr/bin/env python
"""

    parse_delimited.py

        Given a stream (usually an open file),
        will read through each line, splitting into delimited fields

        You can optionally specify 
            1) a list of data types, into which the values will be converted
               (If there are more values than specified, these will be retained as strings)
               
            2) That the 1st line in the data represents column headers
            
                Preceding hashes in the header line are ignored
            
                in which case, the function will return

                A) if a list of field_names is not specified,
                    a dictionary of values indexed by field name in the headers line
                b) if a list of field_names is specified,
                    only the values which correspond to this list
                    Missing values are returned as None
        
        Empty lines at the end of a file are ignored
        Empty lines are otherwise returned as empty ``list``\ s / ``dict``\ s
        
"""

################################################################################
#
#   parse_delimited
#
#
#   Copyright (C) 2009 Leo Goodstadt
#
#   This program is free software; you can redistribute it and/or
#   modify it under the terms of the GNU General Public License
#   as published by the Free Software Foundation; version 2
#   of the License
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
#################################################################################

import sys, os
from collections import defaultdict


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Functions        


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def check_data_types(data_types, cnt_fields):
    """
    helper function to check and set data_types to defaults if None
    """
    #
    #   if no "data_types", use data_type = str which means no conversion
    if data_types == None:
        data_types = [str] * cnt_fields


    # data_types should be list of types
    if not issubclass(data_types.__class__, list):
        raise Exception("The data_types parameter should be a list of "
                         "types. E.g. [int, str,int]")
    
    return data_types
    
class MissingFieldError (RuntimeError):
    pass
    
def parse_delimited_with_header (datastream, data_types = None, field_names = None, 
                                 delimiter = "\t", comments = '#'):
    """
    Parses delimited streams of data

    :param data_types: types into which each data values should be converted.
    :type: list of types (e.g. [int, double, str])
    :param field_names: fields (specified in the header line) which are retained 
    :type: list of names (e.g. ["1st", "second", "chromosome"])
    :param delimiter: column separator. Defaults to tab character
    :param comments: list of letters which preface ignored comment lines 
    """
    #
    #   We want to discard terminal empty lines:
    #   We skip empty lines but reinsert them if we discover that we are not
    #       at the end of the stream/file after all
    # 
    
    
        
    # convert comments to set so that we can look it up faster
    comments = set(comments)
    

    
    #
    #   get header line
    # 
    datastream = iter(datastream)
    columns = datastream.next().rstrip()
    if columns[0] in comments:
        columns = columns[1:]
    header_field_names = columns.split(delimiter)
    
    # use default of str for data_types
    data_types = check_data_types(data_types, len(header_field_names))
        

    #
    #   No specified fields:
    #   
    #       Match corresponding fields to header field names 
    #       Return as dictionary
    # 
    if field_names == None:
        empty_line = 0
        padding_missing_fields = [None] * len(header_field_names)
        blank_line_result = dict(zip(header_field_names, padding_missing_fields))
        for i, l in enumerate(iter(datastream)):
            
            l = l.rstrip()
            
            #
            #   We only want to discard terminal empty lines:
            #   Skip blanks but reinsert them if we then discover that we are not stream end
            # 
            if len(l) == 0:
                empty_line += 1
                continue
            if empty_line:
                for i in range(empty_line):
                    yield blank_line_result
                empty_line = 0
                
            #   ignore comment line
            if l[0] in comments:
                continue
                
            # 
            #   1. Split into fields                
            #   2. Map values to type
            #   3. Match to corresponding field name
            # 
            try:
                values = l.split(delimiter)
                values= [tt(vv) for tt, vv in zip(data_types, values)]
                yield dict(zip(header_field_names, values + padding_missing_fields))
            except ValueError:
                raise ValueError("Could not cast value to the specified type in line "
                                 "#%d: '%s'" % (i +2, l))
            
        return

        
    #
    #   Specified fields:
    #   
    #       Match corresponding fields to header field names 
    #       Return specified fields as a list, with missing values as Nulls
    #   N.B. Blank lines return empty lists
    # 

    # 
    #   Lookup specified positions and types of desired fields 
    #       from the list of field names in the header line
    # 
    retained_fields = list()
    header_field_positions = dict(zip(header_field_names, range(len(header_field_names))))
    for field_name, data_type in zip(field_names, data_types):
        if field_name not in header_field_positions:
            #retained_fields.append((100000, None))
            raise MissingFieldError("Field [%s] is missing from the data" % field_name)
        else:
            retained_fields.append((header_field_positions[field_name], data_type))
            
        
        
    empty_line = 0
    for i, l in enumerate(iter(datastream)):

        #
        #   We only want to discard terminal empty lines:
        #   Skip blanks but reinsert them if we then discover that we are not stream end
        # 
        l = l.rstrip()
        if len(l) == 0:
            empty_line += 1
            continue
        if empty_line:
            for i in range(empty_line):
                yield [None] * len(retained_fields)
            empty_line = 0

        #   ignore comment line
        if l[0] in comments:
            continue

        # 
        #   1. Split into fields                
        #   2. Map desired values to type
        # 
            
        values = l.split(delimiter)

        try:
            yield list( t(values[i]) if i < len(values) else None for i, t in retained_fields)
        except ValueError:
            raise ValueError("Could not cast value to the specified type in line "
                             "#%d: '%s'" % (i +2, l))

    return
        

def _parse_delimited_sans_header (datastream, data_types = None, delimiter = "\t", comments = '#'):
    """
    Parses delimited streams of data

    :param data_types: types into which each data values should be converted.
    :type: list of types (e.g. [int, double, str])
    :param delimiter: column separator. Defaults to tab character
    :param comments: list of letters which preface ignored comment lines 
    """



    # convert comments to set so that we can look it up faster
    comments = set(comments)


    # use default of str for data_types
    data_types = check_data_types(data_types, 100)

    empty_line = 0
    for i, l in enumerate(iter(datastream)):

        #
        #   We only want to discard terminal empty lines:
        #   Skip blanks but reinsert them if we then discover that we are not stream end
        # 
        l = l.rstrip()
        if len(l) == 0:
            empty_line += 1
            continue
        if empty_line:
            for i in range(empty_line):
                yield []
            empty_line = 0
            
        
        #   ignore comment line
        if l[0] in comments:
            continue

        # 
        #   1. Split into fields                
        #   2. Map values to type
        #   3. Match to corresponding field name
        # 
        try:
            values = l.split(delimiter)
            if len(data_types) < len(values):
                data_types = data_types + [str] * len(values)
            yield  [tt(vv) for tt, vv in zip(data_types, values)]
        except ValueError:
            raise ValueError("Could not cast value to the specified type in line "
                             "#%d: '%s'" % (i + 1, l))
        
    

def parse_delimited (datastream, header = False, data_types = None, field_names = None, delimiter = "\t", comments = "#"):
    """
    Parses delimited streams of data

    :param header: Whether the first line contains column names
    :type header: True/False
    :param data_types: types into which each data values should be converted.
    :type: list of types (e.g. [int, double, str])
    :param field_names: fields (specified in the header line) which are retained 
                    Ignored if there is no header line
    :type: list of names (e.g. ["1st", "second", "chromosome"])
    :param delimiter: column separator. Defaults to tab character
    :param comments: list of letters which preface ignored comment lines 
    """
    

    if not header:
        return _parse_delimited_sans_header (datastream, data_types, delimiter, comments)
    else:
        return parse_delimited_with_header (datastream, data_types, field_names, delimiter, comments)    

#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888

#   Testing


#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest, json
class Test_parse_delimited(unittest.TestCase):

    #       self.assertEqual(self.seq, range(10))
    #       self.assert_(element in self.seq)
    #       self.assertRaises(ValueError, random.sample, self.seq, 20)



    def test_no_header_without_types(self):
        """
        parsing file without column heading
        """
        lines = \
        """a\tb\t1\t2\tc
        a\te\t3\t4\td
        
        
        #
        zz\tgg\t4\t7\tending
        
        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines))
        self.assertEqual(results, 
                            [["a", "b", "1", "2", "c"], 
                             ["a", "e", "3", "4", "d"], 
                             [], 
                             [], 
                             ["zz", "gg", "4", "7", "ending"]])

    def test_no_header_with_types(self):
        """
        parsing file without column heading
        """
        lines = \
        """1\t2\t4.5\ta\tc
        2\t4\t3.6\tdd\tff

        7\t8\t9.5\tzz\tending

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, data_types = [int, int, float]))
        self.assertEqual(results, 
                            [[1, 2, 4.5, 'a', 'c'], 
                             [2, 4, 3.6, 'dd', 'ff'], 
                             [], 
                             [7, 8, 9.5, 'zz', 'ending']])
    def test_headed_data_parse(self):
        """
        parsing file with column heading
        """
        lines = \
        """1st\t2nd\t3rd\t4th\t5th
        a\tb\t1\t2\tc
        a\te\t3\t4\td


        zz\tgg\t4\t7\tending

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True))
        self.assertEqual(results, 
                            [{'4th': '2', '5th': 'c', '2nd': 'b', '3rd': '1', '1st': 'a'}, 
                             {'4th': '4', '5th': 'd', '2nd': 'e', '3rd': '3', '1st': 'a'}, 
                             {'4th': None, '5th': None, '2nd': None, '3rd': None, '1st': None}, 
                             {'4th': None, '5th': None, '2nd': None, '3rd': None, '1st': None}, 
                             {'4th': '7', '5th': 'ending', '2nd': 'gg', '3rd': '4', '1st': 'zz'}
                             ])

    def test_headed_data_parse_without_data_types(self):
        """
        parsing file with column heading without specifying data_types
        """
        lines = \
        """1st\t2nd\t3rd\t4th\t5th
        a\tb\t1\t2\tc
        a\te\t3\t4\td


        zz\tgg\t4\t7\tending

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["1st", "3rd"]))
        self.assertEqual(results, 
                            [['a',  '1'], 
                             ['a',  '3'], 
                             [None, None], 
                             [None, None], 
                             ['zz', '4']
                             ])

    def test_headed_data_parse_with_data_types(self):
        """
        parsing file with column heading specifying data_types
        """
        lines = \
        """1st\t2nd\t3rd\t4th\t5th
        a\tb\t1\t2\tc
        a\te\t3\t4\td


        zz\tgg\t4\t7\tending

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["1st", "3rd"], 
                                        data_types = [str, int]))
        self.assertEqual(results, 
                            [['a',  1], 
                             ['a',  3], 
                             [None, None], 
                             [None, None], 
                             ['zz', 4]
                             ])
    
        #
        #   wrong data types should raise ValueError
        # 
        def call_parse_delimited ():
            results = list(parse_delimited(lines, header = True, 
                                            field_names = ["3rd", "1st"], 
                                            data_types = [int, int]))
            
        self.assertRaises(ValueError, call_parse_delimited)


    def test_headed_data_parse_with_field_names_in_wrong_order(self):
        """
        parsing file with column heading specifying data_types but out of order
        """
        lines = \
        """1st\t2nd\t3rd\t4th\t5th
        a\tb\t1\t2\tc
        a\te\t3\t4\td


        zz\tgg\t4\t7\tending

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st"], 
                                        data_types = [int, str]))
        self.assertEqual(results, 
                            [[1, 'a' ], 
                             [3, 'a' ], 
                             [None, None], 
                             [None, None], 
                             [4, 'zz']
                             ])
        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st"]))
        self.assertEqual(results, 
                            [['1', 'a' ], 
                             ['3', 'a' ], 
                             [None, None], 
                             [None, None], 
                             ['4', 'zz']
                             ])
        
    def test_headed_data_parse_with_missing_columns(self):
        """
        parsing file with column heading specifying data_types but out of order
        and with missing columns in the data
        """
        lines = \
        """1st\t2nd\t3rd\t4th\t5th
        a\tb\t1\t2\tc
        a\te\t3\t4\td


        zz

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st"], 
                                        data_types = [int, str]))
        self.assertEqual(results, 
                            [[1, 'a' ], 
                             [3, 'a' ], 
                             [None, None], 
                             [None, None], 
                             [None, 'zz']
                             ])
        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st", "missing"], 
                                        data_types = [int, str, int]))
        self.assertEqual(results, 
                            [[1, 'a'    , None], 
                             [3, 'a'    , None], 
                             [None, None, None], 
                             [None, None, None], 
                             [None, 'zz', None]
                             ])

    def test_alternative_comment_lines_separators(self):
        """
        parsing file with column heading specifying data_types but out of order
        and with missing columns in the data, alternative comment symbols "%;"
        and separators ","
        """
        lines = \
        """%1st,2nd,3rd,4th,5th
        %a\tb\t1\t2\tc
        ;a\tb\t1\t2\tc
        a,b,1,2,c
        a,e,3,4,d


        zz

        """.replace("\n        ", "\n")
        lines =[l + "\n" for l in lines.split("\n")]

        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st"], 
                                        data_types = [int, str],
                                        comments = "%;",
                                        delimiter = ",",
                                        ))
        self.assertEqual(results, 
                            [[1, 'a' ], 
                             [3, 'a' ], 
                             [None, None], 
                             [None, None], 
                             [None, 'zz']
                             ])
        results = list(parse_delimited(lines, header = True, 
                                        field_names = ["3rd", "1st", "5th"], 
                                        data_types = [int, str, str],
                                        comments = "%;",
                                        delimiter = ",",))
        self.assertEqual(results, 
                            [[1, 'a'    , "c"], 
                             [3, 'a'    , "d"], 
                             [None, None, None], 
                             [None, None, None], 
                             [None, 'zz', None]
                             ])

#
#   debug code not run if called as a module
#     
if __name__ == '__main__':
    if sys.argv.count("--debug"):
        sys.argv.remove("--debug")
    sys.argv.append("--verbose")
    unittest.main()

    
