import logging
import math
import re
import copy
from datetime import datetime, timedelta
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
from google.appengine.api.labs import taskqueue
from bts.porter_stemmers.en import Stemmer

# Defines how many terms to index at once (maximum is 1000)
TERM_LOAD = 2000

# Dictionary of indexes that have already been defined (keyed to index name)
existing_indexes = dict()

# Currently, we support only English stemming
stemmer = Stemmer('en')
stopwords = set(
        ',about,above,above,across,after,afterwards,again,against,all,almost,alone,along,already,also,although,always,am,among,amongst,amoungst,amount,an,and,another,any,anyhow,anyone,anything,anyway,anywhere,are,around,as,at,back,be,became,because,become,becomes,becoming,been,before,beforehand,behind,being,below,beside,besides,between,beyond,bill,both,bottom,but,by,call,can,cannot,cant,co,con,could,couldnt,cry,de,describe,detail,do,done,down,due,during,each,eg,eight,either,eleven,else,elsewhere,empty,enough,etc,even,ever,every,everyone,everything,everywhere,except,few,fifteen,fify,fill,find,fire,first,five,for,former,formerly,forty,found,four,from,front,full,further,get,give,go,had,has,hasnt,have,he,hence,her,here,hereafter,hereby,herein,hereupon,hers,herself,him,himself,his,how,however,hundred,ie,if,in,inc,indeed,interest,into,is,it,its,itself,keep,last,latter,latterly,least,less,ltd,made,many,may,me,meanwhile,might,mill,mine,more,moreover,most,mostly,move,much,must,my,myself,name,namely,neither,never,nevertheless,next,nine,no,nobody,none,noone,nor,not,nothing,now,nowhere,of,off,often,on,once,one,only,onto,or,other,others,otherwise,our,ours,ourselves,out,over,own,part,per,perhaps,please,put,rather,re,same,see,seem,seemed,seeming,seems,serious,several,she,should,show,side,since,sincere,six,sixty,so,some,somehow,someone,something,sometime,sometimes,somewhere,still,such,system,take,ten,than,that,the,their,them,themselves,then,thence,there,thereafter,thereby,therefore,therein,thereupon,these,they,thickv,thin,third,this,those,though,three,through,throughout,thru,thus,to,together,too,top,toward,towards,twelve,twenty,two,un,under,until,up,upon,us,very,via,was,we,well,were,what,whatever,when,whence,whenever,where,whereafter,whereas,whereby,wherein,whereupon,wherever,whether,which,while,whither,who,whoever,whole,whom,whose,why,will,with,within,without,would,yet,you,your,yours,yourself,yourselves,the'.split
                (','))

# This regex is used to split multi-word phrases/sentences into individual words
split_regex = re.compile(r'[\(\)\[\]\{\}\t\n ]+|[,\.\!\?][$ ]')

def split(string):
    return split_regex.split(string)

def tokenize(value, case_insensitive=True, stemmed=True):
    """Takes a value or list of values and tokenize them and returns a list of
    words.

    If the value is a list of other values, each of the values in the list
    will be tokenized individually. If case_insensitive is True, the tokenized
    words will be all lowercase. If stemmed is True, the tokenized words will be
    in their stemmed forms, stemmed according to the porter2 algorithm
    """
    words = []
    final_words = []
    # Collect words
    if isinstance(value, list):
        for value_item in value:
            words.extend(split(unicode(value_item)))
    else:
        words.extend(split(unicode(value)))
    # Collect words that don't appear in stopwords and adjust case if necessary
    for word in words:
        if word.lower() not in stopwords:
            if case_insensitive:
                final_words.append(word.lower())
            else:
                final_words.append(word)
    return stemmer.stemWords(final_words) if stemmed else final_words

class IndexEntry(polymodel.PolyModel):
    """An entry in the inverted index

    The entry is identified by the term (word) and holds a list of model_keys
    pointing to the indexed models that contain this term. Multiple entries may
    exist for the same term, so to find all model keys corresponding to a
    particular term, one should query for all IndexEntries with that term.

    Often, IndexEntry will be subclassed to include additional filter properties
    to allow for faceted search.
    """
    term = db.StringProperty(required=True)
    model_keys = db.ListProperty(db.Key, indexed=False, default=[])
    number_of_model_keys = db.IntegerProperty(required=True)
    is_compacted = db.BooleanProperty(required=True, default=False)
    dt_created = db.DateTimeProperty(auto_now_add=True)
    dt_updated = db.DateTimeProperty(auto_now=True)

    def __init__(self, parent=None, *args, **kwargs):
        super(polymodel.PolyModel, self).__init__(parent, *args, **kwargs)

class ModelTerms(db.Model):
    """An entry in the forward index

    The entry identifies which terms appear with a particular model object in a
    particular index (identified by the index_type). If a model has a large
    number of terms, there may be ModelTerms for this model in the forward
    index.
    """
    def __init__(self, parent=None, **kwargs):
        if not parent:
            parent = kwargs['model']
        super(ModelTerms, self).__init__(parent, **kwargs)

    index_type = db.StringProperty(required=True)
    model = db.ReferenceProperty(required=True)
    indexed_terms = db.StringListProperty(required=True, indexed=False)
    number_of_indexed_terms = db.IntegerProperty(required=True)
    is_compacted = db.BooleanProperty(required=True, default=False)

class SearchQuery(object):
    """A query on a particular index

    The query must be constructed with a query_string that contains at least one
    word. Currently, the query also requires that an order (for sorting) be
    specified prior to running the query.

    SearchQuery is intended to function similarly to a
    google.appengine.ext.db.Query and it supports filter(), order() and fetch()
    methods that function like the ones from db.Query.
    """
    def __init__(self, index, query_string):
        """Constructs a SearchQuery

        Typically, you would call Index.query(...) instead
        """
        if not query_string or len(query_string) <= 0:
            raise Exception(
                    "Please specify at least one term in your query_string")
        self.index = index
        self.query_string = query_string
        self.offset = 0
        self.limit = 10
        self.filters = []

    def order(self, order):
        """Order the results by the specified order.

        Orders in ascending order by default. To order in descending order,
        prepend '-' to the order, like "-date_created".

        Any field on which the results are to be ordered must appear in the
        index from which this query was created.
        """
        self.order = order
        return self

    def filter(self, condition, value):
        """Adds the specified filter to this query.

        This method functions identically to its analog on
        google.appengine.ext.db.Query.
        """
        self.filters.append((condition, value))
        return self

    def fetch(self, limit=10, offset=0, case_insensitive=True, stemmed=True,
              use_or=False):
        """Excutes this query and returns the results as a list.

        This method functions identically to its analog on
        google.appengine.ext.db.Query, except that it also accepts arguments
        to determine whether the query will be run case_insensitively and
        whether or the query string should be stemmed, and an argument to use
        boolean OR logic instead of the default AND.
        """
        if limit > 1000:
            raise Exception("Please specify a limit of 1000 items or fewer")
        if offset > 1000:
            raise Exception("Please specify an offset of 1000 or smaller")
        if not self.order:
            raise Exception("Please specify a sort order")
        logging.debug("Running search '%s'" % self.query_string)
        start = datetime.now()
        last_entry = offset + limit
        # Identify the sort order
        sort_key = self.order
        reverse = False
        if self.order.startswith('+'):
            sort_key = self.order[1:len(self.order)]
        elif self.order.startswith('-'):
            reverse = True
            sort_key = self.order[1:len(self.order)]
        query_terms = list(
                tokenize(self.query_string, case_insensitive, stemmed))
        if use_or or len(query_terms) == 1:
            model_keys = self.collect_union(query_terms, last_entry)
        else:
            model_keys = self.collect_intersection(query_terms, sort_key,
                                                   last_entry)
        finish = datetime.now()
        logging.debug(
                "Finished processing sort keys, returning whatever search results we can"
                )
        logging.info("Read index entries in %s" % (finish - start))
        start = datetime.now()
        result_list_length = len(model_keys)
        if result_list_length <= offset:
            return (0, [])
        if result_list_length < last_entry:
            last_entry = result_list_length
        results = self.index.model_type.get(model_keys[offset:last_entry])
        def key_function(value):
            return getattr(value, sort_key)
        results.sort(key=key_function, reverse=reverse)
        finish = datetime.now()
        logging.info("Read search results in %s" % (finish - start))
        return (result_list_length, results)

    def collect_union(self, query_terms, last_entry):
        model_keys = []
        """Collects all documents that match any of the query terms"""
        query = self.index.entry_type.all()
        query.filter("term in", query_terms)
        for filter in self.filters:
            logging.debug("Adding filter %s" % str(filter))
            query.filter(filter[0], filter[1])
        query.order(self.order)
        for i in range(0, 10):
            existing_entries = query.fetch(100, i*100)
            for entry in existing_entries:
            #Stop collecting if we've already collected enough to retrieve
            #the requested models
                if len(model_keys) > last_entry:
                    break
                for key in entry.model_keys:
                    if key not in model_keys:
                        model_keys.append(key)
            if len(model_keys) > last_entry:
                break
        return model_keys

    def collect_intersection(self, query_terms, sort_key, last_entry):
        """Collects all documents that match all of the query terms"""
        # Fetch existing entries for the first term
        query = self.index.entry_type.all()
        query.filter("term =", query_terms[0])
        for filter in self.filters:
            logging.debug("Adding filter %s" % str(filter))
            query.filter(filter[0], filter[1])
        query.order(self.order)
        existing_entries = []
        for i in range(0, 10):
            existing_entries.extend(query.fetch(100, i*100))
        number_of_entries = len(existing_entries)
        model_keys = []
        last_sort_key_value = None
        i = 1
        accumulated_keys = set()
        for entry in existing_entries:
            sort_key_value = getattr(entry, sort_key)
            logging.debug("Processing sort key %s" % sort_key_value)
            if i == (number_of_entries) or (last_sort_key_value and sort_key_value != last_sort_key_value):
                if not last_sort_key_value:
                    last_sort_key_value = sort_key_value
                    accumulated_keys.update(entry.model_keys)
                logging.debug("Matching keys ...")
                # Find entries for other terms
                query = self.index.entry_type.all()
                query.filter("term in", query_terms[1:len(query_terms)])
                for filter in self.filters:
                    query.filter(filter[0], filter[1])
                query.filter("%s =" % sort_key, last_sort_key_value)
                other_entries = []
                for i in range(0, 10):
                    other_entries.extend(query.fetch(100, i*100))
                combined_term_keys = dict()
                combined_term_keys[query_terms[0]] = accumulated_keys
                for other_entry in other_entries:
                    other_keys = combined_term_keys.get(other_entry.term, set())
                    other_keys.update(other_entry.model_keys)
                    combined_term_keys[other_entry.term] = other_keys
                model_keys.extend(
                        self.match_keys(query_terms, combined_term_keys))
                if len(model_keys) > last_entry:
                    logging.debug(
                            "Exceeded 1000 result limit on search, finishing up"
                            )
                    break
                accumulated_keys = set()
            accumulated_keys.update(entry.model_keys)
            i += 1
        return model_keys

    def match_keys(self, query_terms, combined_term_keys):
        """For a given map of terms/model keys, finds the keys that appear
        with all terms"""
        matching_keys = None
        all_terms_present = True
        logging.debug(
                "Ensuring that all query terms '%s' are present in group '%s" % (query_terms, combined_term_keys.keys
                        ()))
        for query_term in query_terms:
            if not combined_term_keys.has_key(query_term):
                all_terms_present = False
                break
        if all_terms_present:
            for value in combined_term_keys.values():
                if not matching_keys:
                    matching_keys = value
                else:
                    matching_keys = matching_keys.intersection(value)
        if not matching_keys:
            matching_keys = []
        return matching_keys

class Index(object):
    """A full-text index of model objects

    Use the function create_index() to create an index.
    """
    def query(self, query_string):
        """Query this index using the specified query_string"""
        return SearchQuery(self, query_string)

    def index(self, model, immediate=False, force=False):
        """Adds/updates the specified model in the index

        By default, the actual operation of updating the forward and inverted
        indexes for this model object is handled asynchronously via task queues.
        To index immediately, specify immediate as True - but be aware, this can
        have negative performance implications and is likely to time out in
        production (especially for big model objects).

        If force is True, all terms appearing in the model will be added to the
        inverted index whether or not the forward index indicates that the term
        has already been included.  This is useful when reindexing a model whose
        index entries may have become corrupted. By default, this is False to
        improve performance by not reindexing terms that have already been
        stored.      
        """
        logging.debug(
                'Attempting to index item in index %s' % self.__class__.__name__
                )
        # Collect all terms by tokenizing the indexed fields from the model
        terms = set()
        for name, settings in self.properties.iteritems():
            if not settings.get('filter', False):
                value = getattr(model, name)
                if value:
                    terms.update(tokenize(value,case_insensitive=settings.get(
                            'case_insensitive', True),
                                          stemmed=settings.get('stemmed', True))
                            )
        if not force:
        # Identify which terms need to be added to or removed from the index
            existing_terms = set()
            all_model_terms = ModelTerms.all().ancestor(model.key()).filter(
                    "index_type =", self.__class__.__name__).fetch(1000)
            for model_terms in all_model_terms:
                existing_terms.update(model_terms.indexed_terms)
            terms_to_add = terms - existing_terms
            terms_to_remove = existing_terms - terms
        else:
        # When force is True, add all terms to the index
            terms_to_add = terms
            terms_to_remove = set()
        terms_to_add = list(terms_to_add)
        terms_to_remove = list(terms_to_remove)
        logging.debug(
                "Adding terms %s to index %s" % (terms_to_add, self.__class__.__name__)
                )
        # Queue all terms to add, broken into chunks based on the TERM_LOAD
        start = 0
        for i in range(0,int(
                math.ceil(float(len(terms_to_add) / float(TERM_LOAD))))):
            end = min(start+TERM_LOAD, len(terms_to_add))
            terms = ' '.join(terms_to_add[start:end])
            start = end
            if immediate:
                self.add(model, terms)
            else:
                taskqueue.Task(url='/bts/add-terms',
                               params=dict(index=self.__class__.__name__,
                                           key=model.key(), terms=terms)).add(
                        queue_name='index-terms')
        logging.debug(
                "Removing terms %s from index %s" % (terms_to_remove, self.__class__.__name__)
                )
        # Queue all terms to remove, broken into chunks based on the TERM_LOAD
        start = 0
        for i in range(0,int(
                math.ceil(float(len(terms_to_remove) / float(TERM_LOAD))))):
            end = min(start+TERM_LOAD, len(terms_to_remove))
            terms = ' '.join(terms_to_remove[start:end])
            start = end
            if immediate:
                self.remove(model, terms)
            else:
                taskqueue.Task(url='/bts/remove-terms',
                               params=dict(index=self.__class__.__name__,
                                           key=model.key(), terms=terms)).add(
                        queue_name='index-terms')

    def add(self, model, terms):
        """Add the terms for the specified model to this index, including both
        the forward and the inverted indexes.

        terms is a comma-separated list of words
        """
        logging.debug(
                "Adding terms '%s' to index %s" % (terms, self.__class__.__name__)
                )
        # Identify additional filter properties to include in index
        filter_properties = self.extract_filter_properties(model)
        entries = []
        split_terms = terms.split()
        for term in split_terms:
            entries.append(self.entry_type(term=term, model_keys=[model.key()],
                                           number_of_model_keys=1,
                                           **filter_properties))
        # Break put into 100 record chunks to avoid timeouts
        start = 0
        for i in range(0,int(
                math.ceil(float(len(entries) / float(100))))):
            end = min(start+100, len(entries))
            db.put(entries[start:end])
            start = end
        # Remember which terms were added
        ModelTerms(index_type=self.__class__.__name__, model=model,
                   indexed_terms = split_terms,
                   number_of_indexed_terms=len(split_terms)).put()
        # Execute term_added callback for each added term
        try:
            term_added = self.term_added
            for term in split_terms:
                try:
                    self.term_added(model, term, filter_properties)
                except Exception, e:
                    logging.debug("Unable to process term_added: %s" % e)
        except Exception, e:
            logging.debug("Unable to get term_added callback: %s" % e)

    def remove(self, model, terms):
        logging.debug(
                "Removing terms '%s' from index %s" % (terms, self.__class__.__name__)
                )
    # TODO: figure out remove logic

    def do_remove(self, key, model, term):
        if key:
            entry = self.entry_type.get(key)
            try:
                if entry:
                    model_keys = set(entry.model_keys)
                    model_keys.remove(model.key())
                    entry.model_keys = list(model_keys)
                    entry.put()
            except KeyError, e:
                pass

    def compact_inverted_index(self, start_time):
        while datetime.now() - start_time < timedelta(seconds=15):
            """Compacts terms in the inverted index"""
            # Find an entry that needs compacting
            original_entry_to_compact = self.entry_type.all().filter(
                    'is_compacted =',
                    False
                    ).get()
            # Find similar entries that also need compacting
            if original_entry_to_compact:
                filter_properties = self.extract_filter_properties(
                        original_entry_to_compact)
                entries_to_compact_query = self.entry_type.all().filter(
                        'is_compacted =', False).filter("term =",
                                                        original_entry_to_compact.term
                        )
                for name, value in filter_properties.iteritems():
                    entries_to_compact_query.filter("%s =" % name, value)
                compacted_entries = []
                all_model_keys = set()
                # Collect all model keys for compaction
                for entry_to_compact in entries_to_compact_query.fetch(1000):
                    if len(all_model_keys) + len(entry_to_compact.model_keys
                            ) > 1000:
                        break;
                    all_model_keys.update(entry_to_compact.model_keys)
                    compacted_entries.append(entry_to_compact)
                # Find an existing compacted entry that can be updated
                compacted_entry_query = self.entry_type.all().filter(
                        'is_compacted =', True).filter("term =",
                                                       original_entry_to_compact.term
                        ).filter('number_of_model_keys <=',
                                 1000 - len(all_model_keys))
                for name, value in filter_properties.iteritems():
                    compacted_entry_query.filter("%s =" % name, value)
                compacted_entry = compacted_entry_query.get()
                # Create a new compacted entry if none found
                if not compacted_entry:
                    self.entry_type(term=original_entry_to_compact.term,
                                    model_keys=list(all_model_keys),
                                    number_of_model_keys=len(all_model_keys),
                                    is_compacted=True, **filter_properties).put(
                            )
                # Update the existing compacted entry in a transaction
                else:
                    def update_compacted_entry():
                        reread_compacted_entry = db.get(compacted_entry.key())
                        all_model_keys.update(reread_compacted_entry.model_keys)
                        reread_compacted_entry.model_keys = list(all_model_keys)
                        reread_compacted_entry.number_of_model_keys = len(
                                all_model_keys)
                        reread_compacted_entry.put()
                    db.run_in_transaction(update_compacted_entry)
                # Delete the old entries
                db.delete(compacted_entries)
            else:
                break

    def compact_forward_index(self, start_time):
        while datetime.now() - start_time < timedelta(seconds=15):
            original_entry_to_compact = ModelTerms.all().filter('index_type =',
                                                                self.__class__.__name__
                    ).filter('is_compacted =', False).get()
            if original_entry_to_compact:
                def do_compacting():
                    entries_to_compact_query = ModelTerms.all().ancestor(
                            original_entry_to_compact.model.key()).filter(
                            'index_type =', self.__class__.__name__).filter(
                            'is_compacted =', False)
                    compacted_entries = []
                    all_terms = set()
                    # Collect all model keys for compaction
                    for entry_to_compact in entries_to_compact_query.fetch(1000
                            ):
                        if len(all_terms) + len(entry_to_compact.indexed_terms
                                ) > 1000:
                            break;
                        all_terms.update(entry_to_compact.indexed_terms)
                        compacted_entries.append(entry_to_compact)
                    # Find an existing compacted entry that can be updated
                    compacted_entry_query = ModelTerms.all().ancestor(
                            original_entry_to_compact.model.key()).filter(
                            'index_type =', self.__class__.__name__).filter(
                            'is_compacted =', True).filter(
                            'number_of_indexed_terms <=', 1000 - len(all_terms))
                    compacted_entry = compacted_entry_query.get()
                    # Create a new compacted entry if none found
                    if not compacted_entry:
                        ModelTerms(index_type = self.__class__.__name__,
                                   model=original_entry_to_compact.model,
                                   indexed_terms=list(all_terms),
                                   number_of_indexed_terms=len(all_terms),
                                   is_compacted=True).put()
                    # Update the existing compacted entry in a transaction
                    else:
                        all_terms.update(reread_compacted_entry.indexed_terms)
                        compacted_entry.indexed_terms = list(all_terms)
                        compacted_entry.number_of_indexed_terms = len(all_terms)
                        compacted_entry.put()
                    # Delete the old entries
                    db.delete(compacted_entries)
                db.run_in_transaction(do_compacting)
            else:
                break

    def extract_filter_properties(self, model):
        """Extract the properties used to filter on this index"""
        filter_properties = dict()
        for name, field in self.entry_type.properties().iteritems():
            if name not in ['term', 'model_keys', 'dt_created', 'dt_updated',
                            'is_compacted', 'number_of_model_keys'
                    ] and isinstance(field, db.Property
                    ) and not isinstance(field,
                                         polymodel._ClassKeyProperty
                    ):
                filter_properties[name] = getattr(model, name)
        return filter_properties

def create_index(index_name, model_type, **kwargs):
    """Creates an index for the specified model_type under the specified name.

    If code in this application has already initialized in index with the exact
    same name (case sensitive), this method will simply return the existing
    index without modification.

    For kwargs, specify name value pairs to indicate which properties of the
    model to include in the index. The name should be the property name, and the
    value is a dictionary containing any of the following:

    'filter' - if True, this property will not be parsed into the full-text
    index but will instead be used as a filter property for faceted search.
    Defaults to False.

    'case_insensitive' - if True, this property will be included in the index
    in all lowercase to allow case insensitive searches. Defaults to True.

    'stemmed' - if True, this property will be stemmed to allow for common-root
    searches. Defaults to True.
    """
    full_index_name = "%s_index" % index_name
    index = existing_indexes.get(full_index_name.lower(), None)
    if not index:
    # Create new index
        entry_type_properties = dict()
        for name, settings in kwargs.iteritems():
            if settings.get('filter', False):
                property = copy.copy(getattr(model_type, name))
                if isinstance(property, db.ReferenceProperty):
                    property.collection_name = None
                entry_type_properties[property.name] = property
        entry_type = type(index_name, (IndexEntry,), entry_type_properties)
        index = type(full_index_name, (Index,),
                     dict(model_type=model_type, entry_type=entry_type,
                          properties=kwargs))()
        # Register index with model_type
        try:
            model_type.bts_indexes.append(index)
        except AttributeError, e:
            model_type.bts_indexes = [index]
        # Save index to list of existing indexes
        existing_indexes[full_index_name.lower()] = index
    return index

def request_indexing(model, force=False):
    """Requests that the supplied model object be indexed in all indexes
    for that kind of model.

    The indexing is performed asynchronously via a task queue.
    """
    indexes = None
    try:
        indexes = model.bts_indexes
    except:
        pass
    if indexes:
        taskqueue.Task(url='/bts/index-model',
                       params=dict(key=model.key(), force=force)).add(
                queue_name='index-model')

def add_to_index(**kwargs):
    model = kwargs['instance']
    request_indexing(model)

# Make sure indexes are loaded
import indexes