"""
import relation as rel
R = rel.loadcsv('res/movie.tsv')


Accessing a column
------------------

R.age

Note that you cannot assign to an column attribute
R.age = [10,20,30]

Alternate syntax

R.age[:] = [10,20,30]
R.col['age'] = [10,20,30]

You can use augmented assignment statement, e.g.

R.age += 1

"""

import collections
import itertools
import json
import numbers
import operator
import os
import re
import string
import sys
from StringIO import StringIO

import rel_series


range = xrange
zip = itertools.izip
imap = itertools.imap

def regular_itemgetter(*items):
    """
    Regularized version of operator.itemgetter.

    In case when items' length is 1 or 0, return a 1-tuple and 0-tuple
    respectively. This patch the special behavior of operator.itemgetter when
    there is one argument.
    """
    if len(items) > 1:
        return operator.itemgetter(*items)
    elif items:
        item0 = items[0]
        # unforunately this lambda function is slower than operator.itemgetter.
        return lambda x: (x[item0],)
    else:
        return lambda x: ()


def regular_attrgetter(*items):
    """
    Regularized version of operator.attrgetter.

    In case when items' length is 1 or 0, return a 1-tuple and 0-tuple
    respectively. This patch the special behavior of operator.attrgetter when
    there is one argument.
    """
    if len(items) > 1:
        return operator.attrgetter(*items)
    elif items:
        item0 = items[0]
        # unforunately this lambda function is slower than operator.itemgetter.
        return lambda x: (getattr(x,item0),)
    else:
        return lambda x: ()


def _parse_comma_separated(s):
    s = s.strip()
    if s:
        items = [item.strip() for item in s.split(',')]
        # there should not be any blank item
        if not all(items):
            raise ValueError('Invalid specifier: %s' % s)
        return items
    else:
        return []


class MetaNamedTuple(type):
    """
    MetaNamedTuple generates getter and setter properties for each attr
    """
    def __new__(cls, classname, bases, classdict):
        attrs = classdict['_attrs']

        classdict['__slots__'] = ()
        classdict['_default'] = [None for _ in attrs]
        classdict['_attr_index'] = dict(zip(attrs, xrange(len(attrs))))

        for i, attr in enumerate(attrs):
            # do not overwrite any existing attrbiutes.
            if attr in classdict:
                continue

            get_attr = operator.itemgetter(i)
            # note: using default parameter is faster than closure.
            def set_attr(self, value, i=i):
                self.__setitem__(i,value)

            # TODO: should check if attr is valid identifier. But what if it is not? does it hurt?
            classdict[attr] = property(get_attr, set_attr)


        return type.__new__(cls, classname, bases, classdict)


    def __repr__(cls):
        return 'Tuple<%s>' % ','.join(cls._attrs)



class BaseTuple(list):
    """
    This is the base class of NamedTuple. Use _subclass() to create actual
    NamedTuple class with attribute properties.

    >>> T = BaseTuple._subclass(['x','y','z'])

    >>> t=T((1,2,3))
    >>> t
    (x=1,y=2,z=3)

    >>> t.x,t.y,t.z
    (1, 2, 3)


    These are the various input we can use to create a NamedTuple

    >>> T._create(t)
    (x=1,y=2,z=3)

    >>> T._create({'x':1,'y':2})
    (x=1,y=2,z=None)

    >>> T._create([3,2])
    (x=3,y=2,z=None)

    >>> class Obj(object): pass
    >>> a=Obj(); a.x=1; a.y=2; a.z=3
    >>> T._create(a)
    (x=1,y=2,z=3)


    Note, the constructor of BaseTuple is not overriden because of its superior
    performance. You should always call it with a iterable of the length of the
    Tuple. More customized construction, including filling of default value,
    should be done by the _create() method.
    """
    __metaclass__ = MetaNamedTuple
    __slots__ = ()
    _attrs = ()
    # Does it make sense to have a default always?
    _default = []
    _attr_index = {}


    @staticmethod
    def _subclass(attrs):
        """
        Class factory to create a concrete NamedTuple with supplied attributes.

        @param attrs - a list of attribute names

        """
        if isinstance(attrs, basestring):
            raise ValueError('_subclass() expect a list of attribute names, not: %s' % repr(attrs))
        return type('NamedTuple',(BaseTuple,),dict(_attrs=list(attrs)))


    # Note: We deliberately create a separate _create() method rather than using
    # __init__ for performance reason. We want to keep the built-in
    # list.__init__ because it is the fastest. The _create method is flexible to
    # support multiple data type with custom logic but it is much slower.
    @classmethod
    def _create(cls, atuple):
        """
        Create a NamedTuple from atuple.

        `atuple` can be a NamedTuple, a map, a sequence, or an object. It
        support the same range of input as the initializer of relation.
        """
        if isinstance(atuple, cls):
            obj = cls(atuple)

        elif isinstance(atuple, collections.Mapping):
            obj = cls(atuple.get(attr, default) for attr, default in zip(cls._attrs,cls._default))

        elif isinstance(atuple, collections.Sequence):
            # note atuple can be 0-tuple
            degree = len(cls._attrs)
            obj = cls(atuple[:degree])
            # fill with default if necessary
            if len(obj) < degree:
                obj += cls._default[len(obj):]

        else:
            # treat as an object with attributes map to NamedTuple's attributes
            # break the symmetry by not supporting default
            # if seems too permissive if we are pass in anything, like None or int, and have them filled by default
            obj = cls(getattr(atuple,attr) for attr in cls._attrs)

        return obj


    def __eq__(self, other):
        """
        For Two NamedTuple, return True if both their value and attributes name
        are the same.

        For sequence or tuple, return True if their values are
        the same (lenient mode?)
        """

        # Be careful with the pitfall of `NotImplemented`
        #
        # If `other` is non-list, it cannot be compared to list. instead of
        # returning False, it returns `NotImplemented`, which is True!?
        # Therefore we compares it with True explicitly.
        #
        # 3. Data model - Python v2.7.2 documentation
        # http://docs.python.org/reference/datamodel.html#the-standard-type-hierarchy
        if list.__eq__(self, other) == True:
            try:
                # The default path assumes other is a NamedTuple
                return self._attrs == other._attrs
            except AttributeError:
                # AttributeError implies other has no _attrs. Here we assume it
                # is a list and compare only by value.
                return True

        if isinstance(other, tuple):
            return tuple(self) == other
        else:
            return False


    def __ne__(self, other):
        return not (self == other)


    # TODO: why do we have get and set here? should vacate the namespace to user's attribute?
    def get(self, key):
        return self[self._attr_index[key]]


    # TODO:
    # t.set('foo', 'bar') syntax is not ideal
    # consider something like t.attr['foo'] = 'bar'
    def set(self, key, value):
        self[self._attr_index[key]] = value


    def to_dict(self):
        return dict(zip(self._attrs, self))


    def __repr__(self):
        # For special case when one of the value is itself a relation, just show high level info.
        values = [(
                    '[%s rows]' % len(v)
                    if isinstance(v, basic_relation)
                    else v
                  )
                  for v in self]
        return '(%s)' % ','.join(
            '%s=%s' % (n,repr(v)) for n,v in zip(self._attrs, values)
        )



class Heading(collections.Sequence):
    """
    Heading represents an ordered list of attribute names.
    """
    def __init__(self, heading_spec):
        """
        Instantiate Heading by heading_spec.

        @param heading_spec: the heading can be specify by

            1. a string of attribute names separated by comma
            2. an integer, this generates n attribute names as 'A','B',...,
               then 'AA', 'AB' and so on.
            3. a list of attribute names.
            4. another Heading object

        """
        if isinstance(heading_spec, basestring):

            # string of commas/space separated attribute names
            self.attrs = _parse_comma_separated(heading_spec)

        elif isinstance(heading_spec, int):

            self.attrs = self._generate_default_names(heading_spec)

        # list of attribute names or another Heading object
        elif isinstance(heading_spec, collections.Iterable):

            self.attrs = list(heading_spec)

        else:
            raise ValueError('heading_spec must be a string, Sequence of names or an integer: %s' % heading_spec)

        counts = collections.Counter(self.attrs)
        if len(counts) < len(self.attrs):
            dups = ','.join(a for a,c in counts.items() if c >1)
            raise ValueError('Duplicated attributes: %s' % dups)      # TODO: <<<<<<<<<<<<<<<<<<<<<<<<<< TEST

        # attribute name to position index
        self.attr_to_index = dict(zip(self.attrs, range(len(self.attrs))))


    @staticmethod
    def _generate_default_names(count):
        """
        A scheme to generate list of names - A, B, ..., Z, AA, AB, ...
        """
        names = []
        for n in xrange(count):
            A = ord('A')
            n,d = divmod(n,26)
            name = chr(A+d)
            while n:
                n,d = divmod(n-1,26)
                name = chr(A+d) + name
            names.append(name)
        return names


    def __len__(self):
        return len(self.attrs)


    def __bool__(self):
        return bool(self.attrs)


    def __iter__(self):
        return iter(self.attrs)


    def __eq__(self, other):
        return hasattr(other,'attrs') and (self.attrs == other.attrs)


    def __ne__(self, other):
        return (not hasattr(other,'attrs')) or (self.attrs != other.attrs)


    def __contain__(self, name):
        return name in self.attr_to_index


    def __getitem__(self, key):
        return self.attrs[self.index(key)]


    def index(self, key):
        """
        Return the 0-based index of attr in heading.

        `key` can be attribute name or integer index to support flexible general
        indexing. When it is an integer, the return value is `key` itself.

        @raise IndexError if the attribute is not valid
        """
        if isinstance(key, basestring):
            try:
                return self.attr_to_index[key]
            except KeyError:
                raise IndexError(key)
        return key


    def select_attrs(self, attrs):
        """
        Returns a list of attribute index specified by `attrs`.

        Supports flexible attribute selection by accepting several parameter format.

        @param attrs: can be in one of these format

            an integer
                select_attrs(3) -> [3]

            a string of comma separated attribute names
                select_attrs("name,age,salary") -> [0, 1, 4]

            a sequence of integer index
                select_attrs((3,1,4)) -> [3, 1, 4]

            a sequence of attribute names
                select_attrs(["name","age","salary"]) -> [0, 1, 4]

            Note it is also valid to use 0-tuple
                select_attrs(()) -> []

        @raise IndexError: when there are any invalid attribute.
        """

        if isinstance(attrs, int):
            # a single attrbiute index
            attrs = [attrs]

        elif isinstance(attrs, basestring):
            # string of commas/space separated attribute names
            attrs = _parse_comma_separated(attrs)

        return map(self.index, attrs)


    def select_attrs_getter(self, attrs):
        """
        Returns a getter function to the attributes specified.

        @param attr: <attribute selector> as in select_attrs()

        @return: attribute getter, or none if attrs is empty
        """
        if attrs or isinstance(attrs, int):
            # note: attrs = 0 is also a valid index
            return operator.itemgetter(*self.select_attrs(attrs))
        else:
            return None


    @staticmethod
    def make_unique_name(attrs, new_attr):
        """
        Find a unique `new_attr` to add into `attrs`.

        If `new_attr` is already exist in attrs, mangle it (by adding a
        sequential number) to make it unique in attrs.
        """
        name = new_attr
        for i in xrange(1,sys.maxint):
            if name not in attrs:
                return name
            name = '%s_%s' % (new_attr, i)


    def __repr__(self):
        return 'Heading<%s>' % ', '.join(self.attrs)



class ColumnCollection(object):

    def __init__(self, relation):
        self.relation = relation
        self.col_views = [rel_series.ColumnView(relation, i) for i in range(len(relation.heading))]


    def __getitem__(self, key):
        """
        You can access the column by either its 0-based index or the attribute name.

        Note if you apply slicing on cols, you get a list of individual columns, not a relation.
        """
        index = self.relation.heading.index(key)
        return self.col_views[index]


    def __setitem__(self, key, value):
        try:
            idx = self.relation.heading.index(key)
        except IndexError:
            # key does not exist, append as new column
            self.relation._append_column(key,value)
            return

        # syntatic sugar for col[:] = value
        # pros: it does length validation and scalar assignment
        # cons: this does an unnecessary fancy indexing
        self[idx][:] = value


    def rename(self, old_name, new_name):
        """
        Rename attribute of `old_name` to `new_name`.
        """
        heading = self.relation.heading
        index = heading.index(old_name)
        new_attrs = heading.attrs[:]
        new_attrs[index] = new_name
        self.rename_all(new_attrs)


    def rename_all(self, attr_names):
        """
        Rename all attributes by `attr_names`.

        @param attr_names: a comma separated attribute names string or a list of names.

        """
        # validation the syntax and number of attributes
        _heading = Heading(attr_names)
        if len(self.relation.heading) != len(_heading):
            raise ValueError("Needs %s attribute to rename" % len(self.relation.heading))

        # the actual renaming is implement in the relation
        self.relation._rename(attr_names)



# Note: I have consider to inherit from collections.Sequence. I decided against
# it because I rather not have method like .count() to pollute the name space.
class basic_relation(collections.Iterable):
    """
    Basic model of relation as a list of tuples.

    basic_relation add to a list of tuples by allowing user to specify the
    attributes of the relation. By creating and instantiating `namedtuple`, it
    allows user to reference attribute value by name.

    """
    RAISE_ERROR = object()

    class NO_INIT_DATA(RuntimeError): pass

    def __init__(self, init_data=[], heading=None, default=RAISE_ERROR):
        """
        Create a basic relation.

        The __init__ method allows initializing a relation from a number of
        common data type. It also look into the first row of `init_data` to
        deduce the format and minimize manual speficication. The heading
        parameter is leave optional because sometimes it can be deduced from the
        data row or generated.


        @param init_data: data used to initialize the relation.

            Several format are accepted for `init_data`. They can be sequence or
            iterator of rows, where rows are:

            - NamedTuple, with same attributes in the same order
            - NamedTuple, not necessary with same attributes or order
            - sequence
            - map, not necessary has the same set of attributes.
            - object, those attributes corresponds to the columns.

            Rows of `init_data` should be of homogeneous type. I.e. if the type
            of first row is NameTuple<x,y,z>, then all rows are expected to be
            NameTuple<x,y,z>. Rows of map and sequence can be of varying size.
            However if the first row is a map or a sequence, then all rows
            should be a map or sequence.

            If the row contains more data than needed, they are discard.

            If the row contains less data than needed, they are filled in with
            `default`. If `default` is not specified, ValueError is raised.

        @param heading: Specifies the attributes of the relations.

            None     - deduce degree from init_data and generates attr names
            integer  - define relation's degree, generate attr names as A,B,C,...
            string   - comma separated list of attribute names
            iterable - sequence of attribute names
            Heading  - used as iterable

        @param default: default value to fill into missing attributes.

            If `default` is not specified, the initialization is done in strict
            mode. When there are missing values, an Exception is raised.

        """

        # first of all initialize heading by assigning to __dict__
        # this setup is required to make our __setattr__ works
        self.__dict__['heading'] = []

        self.data = []

        try:
            if not init_data:
                raise self.NO_INIT_DATA()
            assert not isinstance(init_data, basestring)
            assert isinstance(init_data, collections.Iterable)
            row0, fetched_first_row = self._inspect_first_row(init_data)
        except self.NO_INIT_DATA:
            # Note: there are a few ways there can be NO_INIT_DATA. `init_data` can be either None or [] or iter([])
            # In this case just initialize the heading
            self._init_heading(heading or 0)
            return

        # Base on the data type of the first row, apply a fast vectorized
        # initialization function. Also the heading is not yet fixed at this
        # point.
        if isinstance(row0, collections.Mapping):
            self._init_list_of_map(row0, fetched_first_row, init_data, heading, default)

        elif isinstance(row0, BaseTuple):
            self._init_list_of_base_tuple(row0, fetched_first_row, init_data, heading, default)

        elif isinstance(row0, collections.Sequence):
            self._init_list_of_sequence(row0, fetched_first_row, init_data, heading, default)

        else:
            self._init_list_of_object(row0, fetched_first_row, init_data, heading, default)


    def _inspect_first_row(self, init_data):
        """
        Look at the first row from `init_data` to deduce how to initialize the relation.

        `init_data` can be a Sequence or a iterator. If it is a Sequence we just
        look at init_data[0]. If it is a iterator, we pop a row from it and there
        is no easy way to push it back (without a performance penalty).
        Therefore we return a flag `fetched_first_row` to remind caller a row is
        popped.

        If `init_data` is an empty iterator, raise NO_INIT_DATA.
        """
        try:
            # either it supports __getitem__
            row0 = init_data[0]
            fetched_first_row = False
        except TypeError:
            try:
                # or it is a iterator
                row0 = init_data.next()
                fetched_first_row = True
            except StopIteration:
                raise self.NO_INIT_DATA()

        return row0, fetched_first_row


    def _init_heading(self, heading):
        # note: this is called during object initialization and also when column is added or renamed
        self.heading = Heading(heading)
        self.Tuple = BaseTuple._subclass(self.heading.attrs)

        # accessor to ColumnView
        self.col = ColumnCollection(self)


    def _init_list_of_map(self, row0, fetched_first_row, init_data, heading, default):
        self._init_heading(heading or sorted(row0.keys()))

        map_getter = regular_itemgetter(*self.heading)

        if default is not self.RAISE_ERROR:
            # Use defaultdict to assign default value. This adds significant performance penalty.
            #
            # for 10 item dict x 100*1000:
            # - straight init by itemgetter - 77ms
            # - default with no replacement - 186ms
            # - default with 1 replacement - 240ms
            class ddict(dict):
                def __missing__(self,key):
                    return default
            init_data = imap(ddict,init_data)

        tt = self.Tuple

        # pass 1 - initialize self.data (only need 1 pass)
        if fetched_first_row:
            self.data = [tt(map_getter(row0))]
            self.data.extend(imap(tt,imap(map_getter, init_data)))
        else:
#            attrs = self.heading.attrs
#            self.data = map(tt,([row.get(a,default) for a in attrs] for row in init_data))
            self.data = map(tt,imap(map_getter, init_data))


    def _init_list_of_base_tuple(self, row0, fetched_first_row, init_data, heading, default):
        # default heading is auto label of width of row0 (can be 0)
        self._init_heading(heading or row0._attrs)

        # from index to this relation from row0
        from_heading = Heading(row0._attrs)
        mission_columns = []

        # special case: heading are compatible. Use faster _init_list_of_sequence.
        if self.heading == from_heading:
            self._init_list_of_sequence(row0, fetched_first_row, init_data, self.heading, default)
            return

        # 0 - create getter on initialization data
        if not self.heading:
            getter = lambda x: ()

        elif not from_heading:
            if default is self.RAISE_ERROR:
                raise ValueError(row0)
            # return default value for all attr and all rows
            default_col = [default] * len(self.heading)
            getter = lambda x: default_col

        elif default is self.RAISE_ERROR:
            # from_heading must have all attr from self.heading
            if len(self.heading) > 1:
                getter = from_heading.select_attrs_getter(self.heading)
            else:
                # special case: need to define our own getter because itemgetter
                #               doesn't work for us when there are just 1 item.
                index = from_heading.index(self.heading[0])
                def getter(r, index=index):
                    return (r[index],)

        else:
            # try our best to map from_heading to self.heading. Fill in default value for missing column
            from_idx = []
            for i, attr in enumerate(self.heading):
                try:
                    idx = from_heading.index(attr)
                except IndexError:
                    # 0 is filler value
                    idx = 0
                    mission_columns.append(i)
                from_idx.append(idx)
            getter = regular_itemgetter(*from_idx)

        # pass 1 - initialize self.data
        tt = self.Tuple
        if fetched_first_row:
            self.data = [tt(getter(row0))]
            self.data.extend(imap(tt,imap(getter, init_data)))
        else:
            self.data = map(tt,imap(getter, init_data))

        # pass 2 - back fill mission column with default value
        if mission_columns:
            default_col = [default] * len(self)
            for idx in mission_columns:
                self.col[idx] = default_col


    def _init_list_of_sequence(self, row0, fetched_first_row, init_data, heading, default):
        # default heading is auto label of width of row0 (can be 0)
        self._init_heading(heading or len(row0))

        # first pass - initialize self.data
        tt = self.Tuple
        if fetched_first_row:
            self.data = [tt(row0)]
            self.data.extend(imap(tt,init_data))
        else:
            self.data = map(tt,init_data)

        # second pass - check for missing value
        width = len(self.heading)
        default_tuple = [default] * width
        STRICT_MODE = bool(default is self.RAISE_ERROR)
        for i, row in enumerate(self):
            l = len(row)
            if l == width:
                # validated row
                continue
            elif l < width:
                if STRICT_MODE:
                    raise ValueError('Missing value in row %s: %s' % (i,row))
                else:
                    # pad row
                    row.extend(default_tuple[l:])
            else:
                # truncate row (or do we want a strict mode to raise exception?)
                del row[width:]

        # 2012-01-18 Performance snapshot [Python 2.7 Windows 7]
        #
        # l = 10-tuple x 1000*1000
        #
        # ms
        # 328   map - build list
        # 155   for loop - no missing value
        # 320   for loop - truncate 100% of rows
        # 580   for loop - pad 100% of rows
        # +70   overhead for if STRICT_MODE check


    def _init_list_of_object(self, row0, fetched_first_row, init_data, heading, default):
        self._init_heading(heading)

        attr_getter = regular_attrgetter(*self.heading)

        if default is not self.RAISE_ERROR:
            # Use defaultdict to assign default value. This likely adds significant performance penalty.
            class default_obj_wrapper(object):
                def __init__(self, obj):
                    self.obj = obj
                def __getattr__(self, attr):
                    return getattr(self.obj, attr, default)
            init_data = imap(default_obj_wrapper,init_data)

        tt = self.Tuple

        if fetched_first_row:
            self.data = [tt(attr_getter(row0))]
            self.data.extend(imap(tt,imap(attr_getter, init_data)))
        else:
            self.data = map(tt,imap(attr_getter, init_data))


    def clone(self, init_data=None, deepcopy=False):
        """
        Clone the relation with the same heading. Initialize the new relation
        with any subset of the rows.

        @param init_data    - iterable of rows. This should be a subset of self
                              with the same Tuple type.

        @param deepcopy     - default False mean tuples are shared between self and
                              clone. Update to clone shows up in self and vice
                              versa. deepcopy=True creates new tuple. Update to
                              the clone does not changes self. Deepcopy does not
                              go recursively however.
        """
        clone = self.__class__(heading=self.heading)
        if init_data:
            if deepcopy:
                # deep copy
                clone.data = map(clone.Tuple, init_data)
            else:
                # shallow copy
                clone.data = list(init_data)
        return clone


    def __len__(self):
        return len(self.data)


    def __iter__(self):
        return iter(self.data)


    def __eq__(self, other):
        return isinstance(other, basic_relation) \
            and (self.heading == other.heading) \
            and (self.data == other.data)


    def __ne__(self, other):
        return not self == other


    def __getitem__(self, key):
        """
        Get rows of a relation

        `key` can be an integer (incl. negative), a slice or a boolean array. If
        `key` is an integer, a row is returned. For slice and fancy indexing, a
        relation that is a subset of the original is returned.

        Array Fancy Indexing
        --------------------
        It supports numpy style fancy indexing. If `key` is an boolean array, a
        subset of the relation that corresponds to true rows are returned. The
        result is like this list:

            [row for row, flag in zip(self, key) if flag]
        """
        try:
            # This is the most optimal way to return the item when key is an integer.
            # Experiment on Python 2.7 shows that it beats isinstance(key,int) checking
            # key+0 is a trick to separate int key from slice key.
            return self.data[key+0]
        except TypeError:
            pass

        if isinstance(key, collections.Sequence):
            # fancy indexing
            # only boolean array of the same size is supported
            if all(isinstance(k,bool) for k in key):
                if len(key) != len(self):
                    raise IndexError(
                        'Fancy indexing with boolean array incorrect key size: %s, expects %s'
                        % (len(key), len(self)))
                return self.filter(key)
            else:
                raise TypeError('Fancy indexing requires boolean array: %s' % key)

        # else assume it is a slice
        return self.clone(self.data[key])


    def filter(self, predicate):
        if isinstance(predicate, collections.Sequence):
            # assume it is boolean array of the same size as self
            iterator = (row for row, flag in zip(self,predicate) if flag)
        elif callable(predicate):
            iterator = (row for row in self if predicate(row))
        else:
            raise ValueError('predicate should be a boolean array or a callable, not: %s' % predicate)
        return self.clone(iterator)


    def __getattr__(self, attr):
        """
        Return ColumnView of the column `attr`.

        This is an opportunistic convenient function. If the attr is a Python
        keyword or if it collide with an object attribute, you will not be able
        to access it as an attribute. You must retrive the ColumnView via the
        .col attribute.
        """
        try:
            return self.col[attr]
        except IndexError, e:
            # Refine the error message because there is some ambiguity. The
            # caller maybe access a column_view of he maybe access a regular
            # relation attribute.
            raise AttributeError('Undefined column or attribute: "%s"' % attr)


    def __setattr__(self, attr, value):
        if (attr in self.heading) and (attr not in self.__dict__):
            # assign column data
            # note attr in __dict__ take precedence to col attr
            self.col[attr] = value
        else:
            # otherwise assign attribute normally
            super(basic_relation, self).__setattr__(attr,value)


    def _rename(self, new_attrs):
        self._init_heading(new_attrs)
        # replace tuples' __class_ attribute so that the have new attribute accessor
        for row in self:
            row.__class__ = self.Tuple


    def extend_attrs(self, attributes, default=None):
        """
        Extend the relation with new attributes.

        @param attributes: a comma separated string, or a list of attribute names

        @param default: optional default value assign for the new attributes
            if one attribute is added, it should be the default value
            if multiple attributes are added, it should be a tuple of default values
            if this is not specified, the new attributes are default to None.
        """

        # parse the attributes parameter
        if isinstance(attributes, basestring):
            attributes = _parse_comma_separated(attributes)
        if not attributes:
            raise ValueError('No attributes defined')

        N = len(attributes)

        # find a N-tuple default
        if default == None:
            default = (None,) * N
        elif N == 1:
            # special case, default should be a "scalar" value
            # wrapped it into a 1-tuple
            default = (default,)
        try:
            assert N == len(default)
        except:
            raise ValueError('Default should be a %s-tuple' % N)

        return relation(
            (tuple(r) + default for r in self),
            self.heading.attrs + attributes,
        )


    def _append_column(self, key, value):
        if (not key) or (not isinstance(key,basestring)) or (key in self.heading):
            raise ValueError('Invalid column to append: %s' % repr(key))

        if isinstance(value, collections.Iterator):
            value = list(value)

        if isinstance(value, collections.Sequence) and not isinstance(value, basestring):
            if len(value) != len(self):
                raise ValueError('require `value` of size %s' % len(self))
        else:
            # broadcast scalar value
            value = itertools.repeat(value)

        # append `key` as the last attribute
        new_attrs = self.heading.attrs + [key]
        self._rename(new_attrs)

        # append value as the last column
        for row, v in zip(self, value):
            row.append(v)


    def append(self, tuple):
        t = self.Tuple._create(tuple)
        self.data.append(t)


    def insert(self, i, tuple):
        t = self.Tuple._create(tuple)
        self.data.insert(i,t)


    class ColFormatter(object):

        def __init__(self, count):
            self.count = count
            self.data = []

        def write(self, row):
            self.data.append(row)

        def format(self):
            # conversion - left adjusted by default
            conversion = ['-'] * self.count
            if len(self.data) > 1:
                for i, v in enumerate(self.data[1]):
                    if isinstance(v, numbers.Number):
                        conversion[i] = ''

            # convert everything to str
            self.data = [map(str,row) for row in self.data]

            # calculate the max width
            width = [0] * self.count
            for row in self.data:
                if len(row) != self.count:
                    raise RuntimeError("Expect %s-tuple: %s" % ((self.count), row))
                for i, s in enumerate(row):
                    l = len(s)
                    if l > width[i]:
                        width[i] = l

            # add a separator under the attribute name
            sep = ['-' * w for w in width]
            self.data.insert(1,sep)

            format_str = ' '.join('%%%s%ds' % (c,w) for c, w in zip(conversion,width)) + '\n'

            return ''.join(format_str % tuple(row) for row in self.data)


    def dump(self, count=1000, file=None):
        if file is None:
            file = sys.stdout

        writer = self.ColFormatter(1+len(self.heading))
        writer.write([' '] + self.heading.attrs)

        i = -1
        for i, row in enumerate(self):
            if count and i >= count:
                break
            fields = [i]
            fields.extend(
                '[%s rows]' % len(v) if isinstance(v, basic_relation) else v
                for v in row
            )
            writer.write(fields)

        file.write(writer.format())
        if count and i >= count:
            file.write('...%s of %s rows...\n' % (count, len(self)))


    def json_dumps(self):
        """ Encode as JSON string. Assume elements are JSON primitive objects """
        return RelationJSONEncoder().encode(self)


    def __str__(self):
        fp = StringIO()
        self.dump(file=fp)
        return fp.getvalue()


    def __repr__(self):
        return 'relation<%s x %s>' % (self.Tuple, len(self))



class RelationJSONEncoder(json.JSONEncoder):
    """ Custom JSONEncoder for relation data types """
    def default(self, obj):
        if isinstance(obj, basic_relation):
            return [dict(zip(row._attrs, row)) for row in obj]
        return json.JSONEncoder.default(self, obj)



class relation(basic_relation):
    """

    Instantiate a

        r = relation(data, heading)

    relation provides query functions on top of basic_relation.

    select()  - query
    groupby() - summarize and aggregate data
    where()   - filter
    orderby() - sort
    join()    - combine relation

    get()

    document <attribute selector>
    """

    def select(self, attrs):
        """
        @param attrs: comma separated or list of attributes.
        """
        index_list = self.heading.select_attrs(attrs)
        attr_list = [self.heading[i] for i in index_list]
        return self.__class__(self, heading=attr_list)


    _agg_regex = re.compile("(?P<count>count\(\*\)$)|(?P<agg>(?P<agg_func>sum|avg|min|max)\((?P<agg_item>\w+)\)$)")

    def _parse_agg_expr(self, agg_expr):
        if not agg_expr:
            return []
        agg_expr_list = []
        for expr in agg_expr.split(','):
            expr = expr.strip()
            m = self._agg_regex.match(expr)
            if not m:
                raise SyntaxError("Invalid aggregate expression: %s" % expr)

            if m.group('count'):
                agg_expr_list.append(('count',None,'count'))
            else:
                func = m.group('agg_func')
                item = m.group('agg_item')
                attr = '%s_%s' % (func, item)
                index = self.heading.index(item)
                agg_expr_list.append((func,index,attr))

        return agg_expr_list


    def groupby(self, key, agg_expr=None):
        """
        @param key - 1. one or more key attributes specified as comman separated
                        string or list of attribute name, or
                     2. a function.
        @param agg_expr - note aggregate expression only support alphanumeric attribute name.
        """
        single_key = False
        if callable(key):
            keyfunc = key
            # Cannot validate attributes are in group by key in this case.
            # Because we cannot deduce the key used from the custom function.
            key_indices = []
            single_key = True
            attrs = ['key']
        else:
            key_indices = self.heading.select_attrs(key)
            if not key_indices:
                raise ValueError("Please define one or more keys")
            keyfunc = operator.itemgetter(*key_indices)
            single_key = len(key_indices) == 1
            attrs = [self.heading.attrs[idx] for idx in key_indices]

        # optional aggregate_ex
        agg_expr_list = self._parse_agg_expr(agg_expr)

        # Build the result attribute list. If there are duplicated items, it will be checked below.
        attrs.extend(attr for _,_,attr in agg_expr_list)
        attrs.append('group')

        # build output relation grouped
        grouped = relation(heading=attrs)

        # assign rows into buckets keyed
        key_values = []
        key2grouped = collections.defaultdict(self.clone)
        for r in self:
            key = keyfunc(r)
            key_group = key2grouped[key]
            if not key_group:
                key_values.append(key)
            key_group.data.append(r)

        # aggregation
        for key in key_values:
            key_group = key2grouped[key]

            # 1. tuple start with keys
            t = [key] if single_key else list(key)

            # 2. then aggregated items
            for func, index, attr in agg_expr_list:
                if func == 'count':
                    t.append(len(key_group))
                elif func == 'sum':
                    t.append(sum(key_group.col[index]))
                elif func == 'avg':
                    t.append(float(sum(key_group.col[index])) / len(key_group))
                elif func == 'min':
                    t.append(min(key_group.col[index]))
                elif func == 'max':
                    t.append(max(key_group.col[index]))

            # 3. third group
            t.append(key_group)

            grouped.append(t)

        return grouped



    def _where_cond(self, _predicate=None, **kwargs):
        if (_predicate is not None) and kwargs:
            raise RuntimeError('Please provide either _predicate or kwargs')

        if callable(_predicate):
            return _predicate

        elif _predicate is not None:
            indices = self.heading.select_attrs(_predicate)
            getter = operator.itemgetter(*indices)
            if len(indices) == 0:
                _predicate = lambda r: True
            elif len(indices) == 1:
                _predicate = getter
            else:
                _predicate = lambda r: all(getter(r))

        elif kwargs:
            keys = kwargs.keys()
            values = tuple(kwargs.values())

            getter = self.heading.select_attrs_getter(keys)

            # special case - for single key, unpack the first value instead of
            # using the sequence to match the result of operator.itemgetter()
            if len(keys) == 1:
                values = values[0]

            _predicate = lambda r: getter(r) == values

        else:
            #TODO: should we raise Exception? Any use case?
            _predicate = lambda r: True

        return _predicate


    def where(self, _predicate=None, **kwargs):
        """
        Return a new relation with rows filtered by the _predicate.

        The _predicate can be a function that evaluate a row and return a truth
        value, e.g

            r.where(lambda r: r.age > 18)           # rows with age > 18

        Alternatively it can be a <attribute selector>. In this case a row is
        selected if all attributes evaluate to True. E.g.

            r.where(['married', 'children_count'])  # married with kids

        The _predicate can also be defined in keyword arguments syntax instead.
        This can be used to select rows with one or more attributes that equals
        to some values. E.g.

            r.where(gender='M', married=False)      # unmarried man

        Note that the keyword arguments syntax can only be used to specify
        equality condition. It cannot be used to expression other comparisons
        like !=, <, >, etc.

        @param _predicate: a function that computes a truth value from the row.
                          It can also be an <attribute selector>.

        """
        _predicate = self._where_cond(_predicate, **kwargs)
        return self.filter(_predicate)


    def get(self, _predicate=None, **kwargs):
        _predicate = self._where_cond(_predicate, **kwargs)
        for r in self:
            if _predicate(r):
                 return r
        return None


    def orderby(self, key=None, reverse=False):
        """
        Returns a new relation with rows sorted in the order specified.

        @param key:         A function return comparison key from each list
                            element. It can also be a <attribute selector>.

        @param reversed:    If set, sort in reverse order.
        """
        if not callable(key):
            key = self.heading.select_attrs_getter(key)

        R2 = self.clone(sorted(self, key=key, reverse=reverse))

        return R2


    def join(self, R2, keys=None):
        # note: if R2 has attr with the same name in R1 that's not in keys, R1's attribute will shadow R2's.

        R1attrs = set(self.heading)
        R2attrs = set(R2.heading)

        if keys is None:
            # default to find common attribute name
            key_attrs1 = [attr for attr in self.heading if attr in R2attrs]
            key_attrs2 = key_attrs1

        else:
            # decompose keys in two list of key_attrs1 and key_attrs2
            key_attrs1 = []
            key_attrs2 = []
            for k in keys:
                if isinstance(k, basestring):
                    key_attrs1.append(k)
                    key_attrs2.append(k)
                elif isinstance(k, collections.Sequence) and (len(k) == 2):
                    key_attrs1.append(k[0])
                    key_attrs2.append(k[1])
                else:
                    raise ValueError('keys can be an attribute name or a 2-tuple')


        assert len(key_attrs1) == len(key_attrs2)

        if key_attrs1:
            key_getter1 = self.heading.select_attrs_getter(key_attrs1)
            key_getter2 = R2.heading.select_attrs_getter(key_attrs2)
        else:
            # if keys is empty, this rule generates a cartesian product
            key_getter1 = key_getter2 = lambda r: ()

        # only R2 attributes not in R1 will contribute to the joined list
        attr2_subset = [attr for attr in R2.heading if (attr not in R1attrs)]
        attr2_subset_is_single = len(attr2_subset) == 1
        if attr2_subset:
            attr2_getter = R2.heading.select_attrs_getter(attr2_subset)
        else:
            attr2_getter = lambda r: ()

        # build joined relation
        attrs12 = list(self.heading)
        attrs12.extend(attr2_subset)
        R12 = relation(heading=attrs12)

        # implementation specific strategy - build a throwaway index of keys on R2
        index2 = collections.defaultdict(list)
        for row2 in R2:
            keys2 = key_getter2(row2)
            index2[keys2].append(row2)

        # TODO: optimization - what if only a small subset of R2 is used in join? An overkill to build an index?
        # TODO: optimization - if rows in index2 is used multiple times, run attr2_getter() one time and save the row in index?

        # loop R1 x index and populated R12
        single_key = len(attr2_subset)
        for row1 in self:
            key1 = key_getter1(row1)
            for row2 in index2.get(key1, []):
                if attr2_subset_is_single:
                    row12 = row1 + [attr2_getter(row2)]
                else:
                    row12 = row1 + list(attr2_getter(row2))
                R12.append(row12)

        return R12


