import collections
import contextlib
import cStringIO
import csv
import itertools
import operator
import re
import StringIO

import relation as rel

zip = itertools.izip

__all__ = ['_loader', 'loadcsv', 'CSVLoader', 'savecsv']


# ------------------------------------------------------------------------
# decoders

def int_comma_separator(s):
    return int(s.replace(',',''))

def float_comma_separator(s):
    return float(s.replace(',',''))

def percentage(s):
    s = s.strip()
    assert s[-1] == '%'
    return 0.01 * float(s[:-1])

CANDIDATE_VALIDATORS = [
    int,
    float,
    int_comma_separator,
    float_comma_separator,
    percentage,
    #iso8601 parser?
    #boolean parser?
]


# last_loader is for diagnosis
last_loader = None
def _loader():
    return last_loader


def loadcsv(*args, **kwargs):
    """
    Convenient function to instantiate a CSVLoader and load CSV file into relation object.

    See CSVLoader for documentations.
    """
    loader = CSVLoader(*args, **kwargs)
    # keeps a reference of last_loader around for debugging.
    global last_loader
    last_loader = loader
    try:
        R = loader.prepare_and_load()
    finally:
        loader.close()
    return R



class CSVLoader(object):
    """
    Load CSV into a relation.

    Most paramaters are optional. The data format is deduced from information
    available. Most importantly it support sniffing to deduce data format by
    reading the beginning of the file.

    Notice the two set of paramerter. The CSV parsing parameters describes the
    data use to parse and interpret the CSV file. The Relation spec describe the
    desired output relation. The two set of parameter do not necessary match
    exactly. For example, a CSV file may have 50 columns of data. But when we
    load it, we are only interested in 10 column of them. So by specifying
    col_names, caller can select a subset of columns and return it in desired
    order.

    The deduction of header is particular flexible. There are many combinations
    base on the information available and the subset of data the user wants.

    First note that why most parameter describe the format of the CSV file,
    there are two parameters describes the relation the caller wants.
    `col_names` define the column names and `default_values` define the filler
    to use for missing or invalid values. `col_names` can be a subset of the CSV
    header. In that case the caller want to select a subset of columns from the
    CSV file. `col_names` can also be a superset of CSV header with extra
    columns filled with default values.

    After gathering data from sniffing, there are 4 possibility
    (note: if we sniff but find no header, we will generate default names base
    on the number of column founds. in this case it will ends in the third or
    fourth type, never the first.)

    csv_header    col_names
    -----------------------
    null          null            this is currently not supported (only happens when not sniffing)
    null          values          col_names apply to csv in order, possibly skipping column
    values        null            instantiate relation with all columns in csv_header
    values        values          col_names apply to select csv column in any order

    """

    MAX_COLUMNS = 1000

    SNIFF_FMT_SIZE = 8192

    INSPECT_ROWS = 100

    def __init__(self,
        inputfile,
        col_names        = None,
        default_values   = None,
        has_header       = None,
        sniff            = True,
        delimiter        = None,
        doublequote      = None,
        escapechar       = None,
        lineterminator   = None,
        quotechar        = None,
        quoting          = None,
        skipinitialspace = None,
        ):
        """
        Load CSV data into a relation. Optionally sniffing the file to deduce the data format.

        @param inputfile:       filename or file like object or the text data itself. If the
                                input contains \n, it is consider text data instead of filename.

        @param col_names:       list or command separated attribute names. For a CSV
                                without header, it can indicates columns to be
                                skipped by inserting blank name in a list or
                                consecutive ',' in a comma separated list.

        @param default_values:  a scalar default value or a list of default
                                value. If it is a scalar, the default value
                                applies to all columns. If it is a list, each
                                element is the default of the corresponding column.

        @param has_header:      a flag to indicates if the first line contains the header.

        @param sniff     :      a flag to indicates if it should read the beginning
                                of the file to deduce the format.

        @param delimiter
        @param doublequote
        @param escapechar
        @param lineterminator
        @param quotechar
        @param quoting
        @param skipinitialspace:    format parameter as in csv.reader().

        @return - a relation loaded from the CSV file
        """

        self.inputfile = inputfile
        self.fp = None

        # relation parameters, they should have the same length
        self.col_names      = self._parse_col_names(col_names)
        self.default_values = default_values
        # index to csv columns
        self.rel_csv_index  = []

        # fillin csv_fmtparam
        self.csv_fmtparam = {}
        if delimiter        != None: self.csv_fmtparam['delimiter'       ] = delimiter
        if doublequote      != None: self.csv_fmtparam['doublequote'     ] = doublequote
        if escapechar       != None: self.csv_fmtparam['escapechar'      ] = escapechar
        if lineterminator   != None: self.csv_fmtparam['lineterminator'  ] = lineterminator
        if quotechar        != None: self.csv_fmtparam['quotechar'       ] = quotechar
        if quoting          != None: self.csv_fmtparam['quoting'         ] = quoting
        if skipinitialspace != None: self.csv_fmtparam['skipinitialspace'] = skipinitialspace

        # CSV parsing parameters
        self.has_header   = has_header
        self.sniff        = sniff
        self.sniff_error  = None
        self.csv_has_header = self.has_header
        self.csv_max_width  = 0
        self.csv_headers    = []
        self.csv_decoders   = []
        self.csv_defaults   = []
        self.csv_first_row  = []

        # place holder for sniffed rows
        self.inspected_rows  = []


    @staticmethod
    def _parse_col_names(col_names):
        # note: you can put consecutive comma to indicate skipping of column
        if isinstance(col_names,basestring):
            col_names = [item.strip() for item in col_names.split(',')]
        return col_names


    def close(self):
        # close only file that we opened
        if self.fp and (self.fp is not self.inputfile):
            self.fp.close()


    def prepare_and_load(self):
        self._open_inputfile()

        if self.sniff:
            self.csv_has_header, self.csv_max_width, self.csv_headers, self.csv_decoders = self._sniff()

        # open fp, it should be at beginning position
        reader = csv.reader(self.fp, **self.csv_fmtparam)


        # Consume the header line. Note self.has_header take precedence to sniffing.
        if self.has_header is None:
            if self.csv_has_header:
                # just consume the header line
                self.csv_first_row = reader.next()
        elif self.has_header:
            self.csv_first_row = reader.next()
            # csv_headers should be defined already if sniffing
            if not self.csv_headers:
                self.csv_headers = self.csv_first_row


        # fill in default for self.col_names and csv_headers
        if not (self.col_names or self.csv_headers):
            if self.csv_max_width:
                # use default names
                self.col_names = rel.Heading._generate_default_names(self.csv_max_width)
                self.csv_headers= self.col_names
            else:
                raise IOError("Unable to read header. Please define col_names explicitly")

        elif not self.col_names:
            self.col_names = self.csv_headers

        elif not self.csv_headers:
            self.csv_headers = self.col_names


        # Create blank relation
        # null signify skipping of CSV column. Now that we have assign it to
        # csv_header, remove them from col_names. We never create relation with
        # blank column in it.
        self.col_names = filter(None,self.col_names)
        if not self.col_names:
            raise ValueError("Unable to load csv as a 0 width relation")

        R = rel.relation(heading=self.col_names)


        # prepare default_values
        if isinstance(self.default_values, basestring) or (not isinstance(self.default_values, collections.Iterable)):
            self.default_values = [self.default_values] * len(self.col_names)

        if len(self.default_values) != len(self.col_names):
            raise ValueError("Unmatch number of default %s, expects %s" % (len(self.default_values), len(self.col_names)))


        # build map of csv_name -> (csv_idx, decoder)
        csv_column_data = dict(
            (name, (csv_idx, decoder))
            for csv_idx, (name, decoder) in enumerate(
                itertools.izip_longest(self.csv_headers, self.csv_decoders, fillvalue=None)
                )
            if name
            )
            # be careful when csv_headers and csv_decoders have different length

        # prepare getter and decoders. This also reconcile self.col_names with csv_headers
        self.rel_csv_index = []
        decoders = []
        # note: csv_defaults is self.default_values but ordered in csv column order
        self.csv_defaults = [None] * len(self.csv_headers)

        for col_name, default in zip(self.col_names, self.default_values):

            csv_idx, csv_decoder = csv_column_data.get(col_name, (None, None))

            if csv_idx != None:
                self.csv_defaults[csv_idx] = default

            else:
                # if column not defined in CSV, use decoder to fill in default
                csv_idx = 0
                csv_decoder = lambda s: default

            self.rel_csv_index.append(csv_idx)
            decoders.append(csv_decoder)

        assert len(self.col_names) == len(decoders) == len(self.default_values) == len(self.rel_csv_index)

        if len(self.rel_csv_index) == 1:
            # exception to operator.itemgetter
            getter = lambda v, idx=self.rel_csv_index[0]: [v[idx]]
        else:
            getter = operator.itemgetter(*self.rel_csv_index)

        # loading and decoding
        for csv_row in reader:
            #  pad csv_defaults to the right
            csv_row += self.csv_defaults[len(csv_row):]
            tuple = []
            # note: the 3 iterators should have same length by construction
            for v, decoder, default in zip(getter(csv_row), decoders, self.default_values):
                if decoder:
                    try:
                        v = decoder(v)
                    except:
                        # TODO: have strict mode?
                        v = default
                tuple.append(v)

            # pad with default_values
            tuple += self.default_values[len(tuple):]

            R.append(tuple)

        return R


    def _open_inputfile(self):
        """
        Prepare an input file, which is either be a filename or a file like object.
        """
        if isinstance(self.inputfile, basestring):
            if '\n' in self.inputfile:
                self.fp = StringIO.StringIO(self.inputfile)
            else:
                self.fp = open(self.inputfile, 'rb')
        else:
            self.fp = self.inputfile

        if self.sniff:
            # make sure this file like object can be rewind and is currently at the beginning
            # this preclude us from using compressed data stream?
            assert hasattr(self.fp, 'seek') and hasattr(self.fp, 'tell'), "inputfile need to support seek and tell for sniffing: %s" % inputfile


    def _sniff(self):
        """
        1. Sniff the CSV format
        2. Sniff the column data format

        @return - csv_has_header, csv_max_width, csv_headers, csv_decoders
        """

        # 0. read a sample
        original_pos = self.fp.tell()
        sample = self.fp.read(self.SNIFF_FMT_SIZE)
        self.fp.seek(original_pos)

        # 1. sniff the CSV format
        sniffer = csv.Sniffer()
        delimiters = self.csv_fmtparam.get('delimiter', None)

        try:
            dialect = sniffer.sniff(sample, delimiters)
            # override user supply parameters with those sniffed
            self.csv_fmtparam['dialect'] = dialect
        except csv.Error, e:
            # too bad sniff fails
            self.sniff_error = str(e)


        # 2. second round, read header and sample data rows
        reader = csv.reader(cStringIO.StringIO(sample), **self.csv_fmtparam)

        # check for csv_headers
        csv_has_header = self.has_header
        if self.has_header == None:
            try:
                csv_has_header = sniffer.has_header(sample)
            except csv.Error, e:
                # too bad sniff fails
                self.sniff_error = str(e)

        if csv_has_header:
            csv_headers = reader.next()
        else:
            csv_headers = []

        # 3. sniff the column data format: csv_decoders

        # inspect the rows from the beginning
        self.inspected_rows = [row for i, row in zip(xrange(self.INSPECT_ROWS), reader)]
        # there can be less rows than INSPECT_ROWS?

        csv_max_width = max(map(len,self.inspected_rows)) if self.inspected_rows else 0
        csv_max_width = max(csv_max_width, len(csv_headers))
        csv_max_width = min(csv_max_width, self.MAX_COLUMNS)

        # Initially fill in CANDIDATE_VALIDATORS for each column
        # Then iterate the inspected_rows and eliminate validators that fails
        candidate_validators = [CANDIDATE_VALIDATORS[:] for i in xrange(csv_max_width)]

        for i, row in enumerate(self.inspected_rows):
            for value, candidates in zip(row, candidate_validators):
                # notice if row is shorter than decoders, the extra validator
                # will not included in the loop (the behavior of izip)

                # blank value should not invalidate anything
                value = value.strip()
                if not value: continue

                # check the candidate_validators starting from the end
                for k in xrange(len(candidates)-1,-1,-1):
                    validator = candidates[k]
                    try:
                        validator(value)
                    except:
                        candidates.pop(k)

        # choose the topmost item remain in candidate_validators
        csv_decoders = [(v[0] if v else None) for v in candidate_validators]

        return csv_has_header, csv_max_width, csv_headers, csv_decoders


    def __unicode__(self):

        dialect = self.csv_fmtparam.get('dialect')

        def _fmtparam_str(attr):
            if attr in self.csv_fmtparam:
                v = self.csv_fmtparam[attr]
            else:
                v = getattr(dialect, attr, '?')
            return str(v).encode('string_escape')

        def _format_default(default):
            return ('(%s)' % default) if default else ''

        msg_csv_dialect = \
u"""\
  delimiter        = %s
  doublequote      = %s
  escapechar       = %s
  lineterminator   = %s
  quotechar        = %s
  quoting          = %s
  skipinitialspace = %s
""" % (
    _fmtparam_str('delimiter'       ),
    _fmtparam_str('doublequote'     ),
    _fmtparam_str('escapechar'      ),
    _fmtparam_str('lineterminator'  ),
    _fmtparam_str('quotechar'       ),
    _fmtparam_str('quoting'         ),
    _fmtparam_str('skipinitialspace'),
)

        msg_csv_inspect = \
u"""\
  sniff            = %s%s
  has_header       = %s
    %s
  max_width        = %s
  header : decoder <- csv_defaults
    %s
""" % (
            self.sniff,
            ' (%s)' % self.sniff_error if self.sniff_error else '',
            self.csv_has_header,
            repr(self.csv_first_row),
            self.csv_max_width,
            '\n    '.join('%2d - %s : %s <- %s' % (i,h,d,df)
                for i,(h,d,df) in enumerate(
                    itertools.izip_longest(self.csv_headers, self.csv_decoders, self.csv_defaults, fillvalue=None))),
        )

        msg_rel_struct = '    ' + '\n    '.join('%2d - %s [%s] <- %s' % (i,n,idx,d)
                for i,(n,idx,d) in enumerate(
                    itertools.izip_longest(self.col_names, self.rel_csv_index, self.default_values, fillvalue=None)))

        return u"""CSVLoader

Dialect
%s
CSV Inspection
%s
Relation
  col_name [csv_index] <- default
%s
""" % (msg_csv_dialect, msg_csv_inspect,msg_rel_struct)


    def __repr__(self):
        return unicode(self).encode('utf-8', 'xmlcharrefreplace')



def savecsv(R, pathname):
    with open(pathname, 'wb') as fp:
        writer = csv.writer(fp)
        writer.writerow(list(R.heading))
        writer.writerows(R)
