import sha
import sys
import numrange
from tokens import Tokenizer
import simplejson
import xapian
import math


__all__ = [ 'InvalidDocumentError', 'InvalidInputError',
'IndexableDocument', 
'FieldList', 
'CompositeField', 'DateField', 'IntegerField', 'PriceField', 'TextField', 'WordField',
]

class InvalidDocumentError(StandardError):
    pass

class InvalidInputError(StandardError):
    ''' An error that's raised if an unrecognized entry is used for data encoding of a document '''

class Field(object):
    '''
    Just a common base class so that we can pull out *only* the fields fom the
    indexabledocument definition.
    '''
    @classmethod
    def as_json(cls):
        '''
        just return the classname for the Field
        '''
        return [cls.__name__, ]

class AtomField(Field):
    '''
    Atom fields just store a single term
    '''
    @classmethod
    def from_json(cls):
        return cls()
    

class WordField(Field):
    '''
    Atom fields just store a single term
    '''
    def __init__(self, summarize = False):
        self._summarize = summarize

    def encode(self, token_name, arg, summary_data):
        tokenizer = Tokenizer(arg)
        for token in tokenizer:
            result = [(("%s:%s" % (token_name, token), ),)]
            if self._summarize:
                summary_data.append([((token, ),)])
            return result
        return []

    @classmethod
    def from_json(cls):
        return cls()
        

class IntegerField(AtomField):
    '''
    IntegerField instances store integers >= 0
    '''
    def encode(self, token_name, arg, summary_data):
        # use the numrange encoder here
        return [(tuple(numrange.encode(token_name, arg)),)]

class DateField(IntegerField):
    '''
    DateFields are integers with the extra constraint that they are just
    dates formatted as YYYYMMDD.

    Any term that doesn't parse into a valid date raises an exception.
    '''
    def __init__(self, summarize=False):
        self._summarize=summarize

    def encode(self, token_name, arg, summary_data):
        int_date = int(arg.strftime("%Y%m%d"))
        result = IntegerField.encode(self, token_name, int_date, summary_data)
        return result




class CompositeField(Field):
    '''
    composite fields are key/value name pairs of other fields
    '''
    def __init__(self, **kwargs):
        self._subfields = kwargs

    def field_names(self):
        return sorted(self._subfields.keys())

    def __getattr__(self, key):
        return self._subfields[key]

    def encode(self, token_name, kwargs, summary_data):
        result = {}
        for key in self.field_names():
            result[key] = self._subfields[key].encode("%s_%s" % (token_name, key), kwargs[key], summary_data)

        # flatten out the encodings
        return [tuple([value[0][0] for value in result.values()])]

    @classmethod
    def from_json(cls, json):
        cur_module = sys.modules[__name__]
        field_dict = {}
        for subfield in json:
            print subfield[:1], subfield[1:]

        return cls(**field_dict)

    def as_json(self):
        comp_def = self.__class__.__name__, [[k, v.as_json()] for k, v in self._subfields.items()]
        return comp_def

class PriceField(CompositeField):
    '''
    PriceField is a shorthand for a CompositeField with an integer field and a ccy field
    '''
    def __init__(self):
        CompositeField.__init__(self, amount = IntegerField(), ccy = WordField())

    def encode(self, token_name, kwargs, summary_data):
        return CompositeField.encode(self, token_name, kwargs, summary_data)

    @classmethod
    def from_json(cls, json):
        return cls()
            
class FieldList(Field):
    '''
    A field list is a list of 0 or more instances of a given field.
    '''
    def __init__(self, subfield):
        self._subfield = subfield

    def _get_field(self):
        return self._subfield
    field = property(_get_field)

    def encode(self, token_name, field_list, summary_data):
        results = []
        for field in field_list:
            row = []
            if isinstance(self._subfield, CompositeField):
                # decode field into a dictionary for 
                # CompositeField encoding
                kwargs = field
                for key, value in field.items():
                    composite_part = getattr(self._subfield, key)
                    if isinstance(composite_part, CompositeField):
                        # this is yet another composite field - decode
                        # Get just the first row and turn it into a column
                        for col in composite_part.encode("%s_%s" % (token_name, key), value, summary_data)[0]:
                            row.append(col)
                    else:
                        for col in composite_part.encode("%s_%s" % (token_name, key), value, summary_data)[0]:
                            row.append(col)
            else:
                # straight field encoding
                for col in self._subfield.encode(token_name, field, summary_data)[0]:
                    row.append(col)
            results.append(tuple(row))
        return results

    @classmethod
    def from_json(cls, json):
        cur_module = sys.modules[__name__]
        subfield_clsname = json[1][0]
        subfield_json = json[1][1:]
        field_class = getattr(cur_module, subfield_clsname)
        return cls(field_class.from_json(*subfield_json))

    def as_json(self):
        flist_def = self.__class__.__name__, self._subfield.as_json()
        return flist_def

class TextField(FieldList):
    '''
    Just a stream of text tokens
    '''
    def __init__(self, summarize=False):
        FieldList.__init__(self, WordField())
        self._summarize = summarize

    def encode(self, token_name, arg, summary_data):
        termgen = Tokenizer(arg)

        std_row = []
        sum_row = []

        if self._summarize:
            for term in termgen:
                std_row.append(("%s:%s" % (token_name, term),))
                sum_row.append((term,))
            summary_data.append([tuple(sum_row)])
        else:
            for term in termgen:
                std_row.append(("%s:%s" % (token_name, term),))

        return [tuple(std_row)]

    @classmethod
    def from_json(cls, json):
        return cls(*json[1:])

    def as_json(self):
        return (self.__class__.__name__, self._summarize)

class SummaryField(Field):
    '''
    Just a place holder for a magic summary field which will pull in all
    TextField parts of the document to support generic search.

    The process_fields method in the IndexableDocument
    '''

    def __init__(self, *field_names):
        self._field_names = field_names

    def as_json(self):
        return self.__class__.__name__, self._field_names

    def summary_keys(self):
        return self._field_names

class MetaIndexable(type):
    def __init__(cls, name, bases, d):

        if bases[0].__name__ == 'IndexableDocument':
            # Only postprocess subclasses of IndexableDocument
            cls._process_declarations(name, bases, d)


class IndexableDocument(object):
    __metaclass__ = MetaIndexable

    def __init__(self, xapian_data, **datadict):
        self._raw_doc = datadict
        self._xapian_data = xapian_data
        self._encoded_data = self._encode(datadict)

    @classmethod
    def cls_from_json(cls, json_class_def):
        # TODO: implement dynamic class construction using a JSON input string.
        # Foo = MetaIndexable('Foo', (IndexableDocument,), {'name': TextField()})

        cls_name = str(json_class_def[0])

        title_bool = json_class_def[1][u'title'][1]

        field_dict = {}

        for key, value in json_class_def[1].items():
            cur_module = sys.modules[__name__]
            field_class_name = value[0]
            field_class = getattr(cur_module, field_class_name)
            field_dict[key] = field_class.from_json(value)

        new_cls = MetaIndexable(cls_name, (IndexableDocument,), field_dict)
        return new_cls

    def tokeniter(self):
        results =[]
        xapdoc = self.as_xapian_doc()
        for terms in list(xapdoc.termlist()):
            terms[-1] = list(terms[-1])
            for inst in terms[-1]:
                results.append(terms[0])
        return results

    def __iter__(self):
        return self
            
    def next(self):
        for row in self._xaplist:
            for pos in row[-1]:
                return row[0]
        
    def as_xapian_doc(self):
        '''
        Convert the encoded data into a Xapian object
        '''
        doc = xapian.Document()
        doc.set_data(self._xapian_data)

        term_pos = 1
        for field in self.get_encoded_data():
            for row in field:
                for col in row:
                    for term in col:
                        doc.add_posting(term, term_pos)
                    term_pos += 1
                term_pos = int(math.ceil((term_pos + 100)/100.0) * 100)
        return doc

    @classmethod
    def _process_declarations(cls, name, bases, d):
        # First bind all the attributes to the class definition
        cls._field_dict = {}

        summary_fields = []
        for key, value in d.items():
            if isinstance(value, Field):
                cls._field_dict[key] = value

        cls._summary_field = SummaryField()

    @classmethod
    def field_names(cls):
        '''
        We need to expose field_names to help with introspection and dynamically generated interfaces for
        other languages. 

        This just returns the field names.
        '''
        return sorted(cls._field_dict.keys())

    @classmethod
    def json_classdef(cls):
        '''
        We need to expose field_names to help with introspection and dynamically generated interfaces for
        other languages. 

        This method exposes the raw field defintions that are bound to the IndexableDocument class
        as a JSON string.

        It's assumed that some external framework can compile the JSON string into a proper calling stub.
        '''
        clsdef_dict = {}
        for key, value in cls._field_dict.items():
            clsdef_dict[key] = value.as_json()
        return simplejson.dumps((cls.__name__, clsdef_dict))

    def get_raw_xapian_data(self):
        return self._xapian_data

    def get_xaphash(self):
        '''
        return a string hashcode for this document based on the xapian data
        '''
        return sha.new(self.get_raw_xapian_data()).hexdigest()

    def get_raw_document(self):
        return self._raw_doc

    def get_encoded_data(self):
        return self._encoded_data

    def _encode(self, datadict):
        '''
        Take in a dictionary of data and pass the data down to individual field
        definitions for encoding.

        The result of this should be a list of encoded 'rows' that can be
        streamed into Xapian.
        '''
        rows = []

        summary_data = []

        # Make sure that the field items are sorted by key
        sorted_items = sorted(self._field_dict.items(), lambda (k1,v1), (k2,v2): cmp(k1,k2))

        # Check that all keys are allowed
        for input_key in datadict.keys():
            if input_key not in self._field_dict:
                raise InvalidInputError, "The key [%s] is not recognized by this document class (%s)" % (input_key, self.__class__.__name__)

        for fname in self.field_names():
            # update summary field
            field_inst = self._field_dict[fname]

            payload = datadict.get(fname, None)
            if payload is None:
                raise InvalidInputError, "The key [%s] is required by this document class (%s)" % (fname, self.__class__.__name__)
                
            encoded_data = field_inst.encode(fname, payload, summary_data)

            rows.append(encoded_data)

        default_summary = []
        for field in summary_data:
            for row in field:
                default_summary.append(row)
        rows.append(default_summary)
        return rows


