#import logging
#import math
#import re
#import copy
#from datetime import datetime, timedelta
#from google.appengine.ext import db
#from google.appengine.ext.db import polymodel
#from google.appengine.api.labs import taskqueue
#from bts.porter_stemmers.en import Stemmer
#
## Defines how many terms to index at once (maximum allowed is currently 30)
#TERM_LOAD = 250
#
## Dictionary of indexes that have already been defined (keyed to index name)
#existing_indexes = dict()
#
## Currently, we support only English stemming
#stemmer = Stemmer('en')
#stopwords = set(
#        ',about,above,above,across,after,afterwards,again,against,all,almost,alone,along,already,also,although,always,am,among,amongst,amoungst,amount,an,and,another,any,anyhow,anyone,anything,anyway,anywhere,are,around,as,at,back,be,became,because,become,becomes,becoming,been,before,beforehand,behind,being,below,beside,besides,between,beyond,bill,both,bottom,but,by,call,can,cannot,cant,co,con,could,couldnt,cry,de,describe,detail,do,done,down,due,during,each,eg,eight,either,eleven,else,elsewhere,empty,enough,etc,even,ever,every,everyone,everything,everywhere,except,few,fifteen,fify,fill,find,fire,first,five,for,former,formerly,forty,found,four,from,front,full,further,get,give,go,had,has,hasnt,have,he,hence,her,here,hereafter,hereby,herein,hereupon,hers,herself,him,himself,his,how,however,hundred,ie,if,in,inc,indeed,interest,into,is,it,its,itself,keep,last,latter,latterly,least,less,ltd,made,many,may,me,meanwhile,might,mill,mine,more,moreover,most,mostly,move,much,must,my,myself,name,namely,neither,never,nevertheless,next,nine,no,nobody,none,noone,nor,not,nothing,now,nowhere,of,off,often,on,once,one,only,onto,or,other,others,otherwise,our,ours,ourselves,out,over,own,part,per,perhaps,please,put,rather,re,same,see,seem,seemed,seeming,seems,serious,several,she,should,show,side,since,sincere,six,sixty,so,some,somehow,someone,something,sometime,sometimes,somewhere,still,such,system,take,ten,than,that,the,their,them,themselves,then,thence,there,thereafter,thereby,therefore,therein,thereupon,these,they,thickv,thin,third,this,those,though,three,through,throughout,thru,thus,to,together,too,top,toward,towards,twelve,twenty,two,un,under,until,up,upon,us,very,via,was,we,well,were,what,whatever,when,whence,whenever,where,whereafter,whereas,whereby,wherein,whereupon,wherever,whether,which,while,whither,who,whoever,whole,whom,whose,why,will,with,within,without,would,yet,you,your,yours,yourself,yourselves,the'.split
#                (','))
#
## This regex is used to split multi-word phrases/sentences into individual words
#split_regex = re.compile(r'[\(\)\[\]\{\}\t\n ]+|[,\.\!\?][$ ]')
#
#def split(string):
#    return split_regex.split(string)
#
#def tokenize(value, case_insensitive=True, stemmed=True):
#    """Takes a value or list of values and tokenize them and returns a list of
#    words.
#
#    If the value is a list of other values, each of the values in the list
#    will be tokenized individually. If case_insensitive is True, the tokenized
#    words will be all lowercase. If stemmed is True, the tokenized words will be
#    in their stemmed forms, stemmed according to the porter2 algorithm
#    """
#    words = []
#    final_words = []
#    # Collect words
#    if isinstance(value, list):
#        for value_item in value:
#            words.extend(split(unicode(value_item)))
#    else:
#        words.extend(split(unicode(value)))
#    # Collect words that don't appear in stopwords and adjust case if necessary
#    for word in words:
#        if word.lower() not in stopwords:
#            if case_insensitive:
#                final_words.append(word.lower())
#            else:
#                final_words.append(word)
#    return stemmer.stemWords(final_words) if stemmed else final_words
#
#class IndexEntry(polymodel.PolyModel):
#    """An entry in the inverted index
#
#    The entry is identified by the term (word) and holds a list of item_keys
#    pointing to the indexed models that contain this term. Multiple entries may
#    exist for the same term, so to find all item keys corresponding to a
#    particular term, one should query for all IndexEntries with that term.
#
#    Often, IndexEntry will be subclassed to include additional filter properties
#    to allow for faceted search.
#    """
#    term = db.StringProperty(required=True)
#    item_keys = db.ListProperty(db.Key, indexed=False, default=[])
#    dt_created = db.DateTimeProperty(auto_now_add=True)
#    dt_updated = db.DateTimeProperty(auto_now=True)
#
#    def __init__(self, parent=None, *args, **kwargs):
#        super(polymodel.PolyModel, self).__init__(parent, *args, **kwargs)
#
#class ModelTerms(db.Model):
#    """An entry in the forward index
#
#    The entry identifies which terms appear with a particular model object in a
#    particular index (identified by the index_type). If a model has a large
#    number of terms, there may be ModelTerms for this model in the forward
#    index.
#    """
#    index_type = db.StringProperty(required=True)
#    model = db.ReferenceProperty(required=True)
#    indexed_terms = db.StringListProperty(required=True)
#
#class SearchQuery(object):
#    """A query on a particular index
#
#    The query must be constructed with a query_string that contains at least one
#    word. Currently, the query also requires that an order (for sorting) be
#    specified prior to running the query.
#
#    SearchQuery is intended to function similarly to a
#    google.appengine.ext.db.Query and it supports filter(), order() and fetch()
#    methods that function like the ones from db.Query.
#    """
#    def __init__(self, index, query_string):
#        """Constructs a SearchQuery
#
#        Typically, you would call Index.query(...) instead
#        """
#        if not query_string or len(query_string) <= 0:
#            raise Exception(
#                    "Please specify at least one term in your query_string")
#        self.index = index
#        self.query_string = query_string
#        self.offset = 0
#        self.limit = 10
#        self.filters = []
#
#    def order(self, order):
#        """Order the results by the specified order.
#
#        Orders in ascending order by default. To order in descending order,
#        prepend '-' to the order, like "-date_created".
#
#        Any field on which the results are to be ordered must appear in the
#        index from which this query was created.
#        """
#        self.order = order
#        return self
#
#    def filter(self, condition, value):
#        """Adds the specified filter to this query.
#
#        This method functions identically to its analog on
#        google.appengine.ext.db.Query.
#        """
#        self.filters.append((condition, value))
#        return self
#
#    def fetch(self, limit=10, offset=0, case_insensitive=True, stemmed=True,
#              use_or=False):
#        """Excutes this query and returns the results as a list.
#
#        This method functions identically to its analog on
#        google.appengine.ext.db.Query, except that it also accepts arguments
#        to determine whether the query will be run case_insensitively and
#        whether or the query string should be stemmed, and an argument to use
#        boolean OR logic instead of the default AND.
#        """
#        if limit > 1000:
#            raise Exception("Please specify a limit of 1000 items or fewer")
#        if offset > 1000:
#            raise Exception("Please specify an offset of 1000 or smaller")
#        if not self.order:
#            raise Exception("Please specify a sort order")
#        logging.debug("Running search '%s'" % self.query_string)
#        start = datetime.now()
#        last_entry = offset + limit
#        # Identify the sort order
#        sort_key = self.order
#        reverse = False
#        if self.order.startswith('+'):
#            sort_key = self.order[1:len(self.order)]
#        elif self.order.startswith('-'):
#            reverse = True
#            sort_key = self.order[1:len(self.order)]
#        query_terms = list(
#                tokenize(self.query_string, case_insensitive, stemmed))
#        existing_entries = self.read_index_entries(query_terms)
#        if use_or or len(query_terms) == 1:
#            model_keys = self.collect_union(query_terms, existing_entries,
#                                            last_entry)
#        else:
#            model_keys = self.collect_intersection(query_terms, existing_entries
#                                                   , sort_key)
#        finish = datetime.now()
#        logging.debug(
#                "Finished processing sort keys, returning whatever search results we can"
#                )
#        logging.info("Read index entries in %s" % (finish - start))
#        start = datetime.now()
#        result_list_length = len(model_keys)
#        if result_list_length <= offset:
#            return (0, [])
#        if result_list_length < last_entry:
#            last_entry = result_list_length
#        results = self.index.model_type.get(model_keys[offset:last_entry])
#        def key_function(value):
#            return getattr(value, sort_key)
#        results.sort(key=key_function, reverse=reverse)
#        finish = datetime.now()
#        logging.info("Read search results in %s" % (finish - start))
#        return (result_list_length, results)
#
#    def read_index_entries(self, query_terms):
#        """Reads all index entries matching any of the query terms and matching
#        the configured filters on this SearchQuery"""
#        query = self.index.entry_type.all()
#        query.filter("term in", query_terms)
#        for filter in self.filters:
#            logging.debug("Adding filter %s" % str(filter))
#            query.filter(filter[0], filter[1])
#        query.order(self.order)
#        return query.fetch(1000)
#
#    def collect_union(self, query_terms, existing_entries, last_entry):
#        """Collects all documents that match any of the query terms"""
#        model_keys = []
#        for entry in existing_entries:
#        #Stop collecting if we've already collected enough to retrieve
#        #the requested models
#            if len(model_keys) > last_entry:
#                break
#            for key in entry.item_keys:
#                if key not in model_keys:
#                    model_keys.append(key)
#        return model_keys
#
#    def collect_intersection(self, query_terms, existing_entries, sort_key):
#        """Collects all documents that match all of the query terms"""
#        model_keys = []
#        last_sort_key_value = None
#        combined_term_keys = None
#        number_of_entries = len(existing_entries)
#        logging.debug(
#                "Finding intersection of %s entries" % number_of_entries)
#        i = 1
#        combined_term_keys = dict()
#        for entry in existing_entries:
#            sort_key_value = getattr(entry, sort_key)
#            logging.debug("Processing sort key %s" % sort_key_value)
#            if sort_key_value != last_sort_key_value:
#                logging.debug("Matching keys ...")
#                if combined_term_keys:
#                    model_keys.extend(
#                            self.match_keys(query_terms, combined_term_keys))
#                    if len(model_keys) > 1000:
#                        logging.debug(
#                                "Exceeded 1000 result limit on search, finishing up"
#                                )
#                        break
#                combined_term_keys = dict()
#            if combined_term_keys.has_key(entry.term):
#                term_keys = combined_term_keys[entry.term]
#            else:
#                term_keys = set()
#            term_keys.update(entry.item_keys)
#            combined_term_keys[entry.term] = term_keys
#            last_sort_key_value = sort_key_value
#            i += 1
#        if combined_term_keys:
#            model_keys.extend(
#                    self.match_keys(query_terms, combined_term_keys))
#        return model_keys
#
#    def match_keys(self, query_terms, combined_term_keys):
#        """For a given map of terms/model keys, finds the keys that appear
#        with all terms"""
#        matching_keys = None
#        all_terms_present = True
#        logging.debug(
#                "Ensuring that all query terms '%s' are present in group '%s" % (query_terms, combined_term_keys.keys
#                        ()))
#        for query_term in query_terms:
#            if not combined_term_keys.has_key(query_term):
#                all_terms_present = False
#                break
#        if all_terms_present:
#            for value in combined_term_keys.values():
#                if not matching_keys:
#                    matching_keys = value
#                else:
#                    matching_keys = matching_keys.intersection(value)
#        if not matching_keys:
#            matching_keys = []
#        return matching_keys
#
#    # TODO: indexing functionality may need some kind of locking, or just serialize updates to index
#class Index(object):
#    """A full-text index of model objects
#
#    Use the function create_index() to create an index.
#    """
#    def query(self, query_string):
#        """Query this index using the specified query_string"""
#        return SearchQuery(self, query_string)
#
#    def index(self, model, immediate=False, force=False):
#        """Adds/updates the specified model in the index
#
#        By default, the actual operation of updating the forward and inverted
#        indexes for this model object is handled asynchronously via task queues.
#        To index immediately, specify immediate as True - but be aware, this can
#        have negative performance implications and is likely to time out in
#        production (especially for big model objects).
#
#        If force is True, all terms appearing in the model will be added to the
#        inverted index whether or not the forward index indicates that the term
#        has already been included.  This is useful when reindexing a model whose
#        index entries may have become corrupted. By default, this is False to
#        improve performance by not reindexing terms that have already been
#        stored.
#        """
#        logging.debug(
#                'Attempting to index item in index %s' % self.__class__.__name__
#                )
#        # Collect all terms by tokenizing the indexed fields from the model
#        terms = set()
#        for name, settings in self.properties.iteritems():
#            if not settings.get('filter', False):
#                value = getattr(model, name)
#                if value:
#                    terms.update(tokenize(value,case_insensitive=settings.get(
#                            'case_insensitive', True),
#                                          stemmed=settings.get('stemmed', True))
#                            )
#        if not force:
#        # Identify which terms need to be added to or removed from the index
#            existing_terms = set()
#            all_model_terms = ModelTerms.all().filter("index_type =",
#                                                      self.__class__.__name__
#                    ).filter("model =", model).fetch(1000)
#            for model_terms in all_model_terms:
#                existing_terms.update(model_terms.indexed_terms)
#            terms_to_add = terms - existing_terms
#            terms_to_remove = existing_terms - terms
#        else:
#        # When force is True, add all terms to the index
#            terms_to_add = terms
#            terms_to_remove = set()
#        terms_to_add = list(terms_to_add)
#        terms_to_remove = list(terms_to_remove)
#        logging.debug(
#                "Adding terms %s to index %s" % (terms_to_add, self.__class__.__name__)
#                )
#        # Queue all terms to add, broken into chunks based on the TERM_LOAD
#        start = 0
#        for i in range(0,int(
#                math.ceil(float(len(terms_to_add) / float(TERM_LOAD))))):
#            end = min(start+TERM_LOAD, len(terms_to_add))
#            terms = ' '.join(terms_to_add[start:end])
#            start = end
#            if immediate:
#                self.add(model, terms)
#            else:
#                taskqueue.Task(url='/bts/add-terms',
#                               params=dict(index=self.__class__.__name__,
#                                           key=model.key(), terms=terms)).add(
#                        queue_name='index-terms')
#        logging.debug(
#                "Removing terms %s from index %s" % (terms_to_remove, self.__class__.__name__)
#                )
#        # Queue all terms to remove, broken into chunks based on the TERM_LOAD
#        start = 0
#        for i in range(0,int(
#                math.ceil(float(len(terms_to_remove) / float(TERM_LOAD))))):
#            end = min(start+TERM_LOAD, len(terms_to_remove))
#            terms = ' '.join(terms_to_remove[start:end])
#            start = end
#            if immediate:
#                self.remove(model, terms)
#            else:
#                taskqueue.Task(url='/bts/remove-terms',
#                               params=dict(index=self.__class__.__name__,
#                                           key=model.key(), terms=terms)).add(
#                        queue_name='index-terms')
#
#    def add(self, model, terms):
#        """Add the terms for the specified model to this index, including both
#        the forward and the inverted indexes.
#
#        terms is a comma-separated list of words
#        """
#        logging.debug(
#                "Adding terms '%s' to index %s" % (terms, self.__class__.__name__)
#                )
#        # Identify additional filter properties to include in index
#        filter_properties = self.extract_filter_properties(model)
#        indexed_terms = set()
#        split_terms = terms.split()
#        added_terms = []
#        updated_entries = []
#        entries = []
#        entries_by_term = {}
#        # Query existing terms in chunks of 30
#        start = 0
#        for i in range(0,int(
#                math.ceil(float(len(split_terms) / float(30))))):
#            end = min(start+30, len(split_terms))
#            in_terms = split_terms[start:end]
#            query = self.entry_type.all()
#            for name, value in filter_properties.iteritems():
#                query.filter("%s =" % name, value)
#            query.filter("term in", in_terms)
#            entries.extend(query.fetch(1000))
#            start = end
#        for entry in entries:
#            if entries_by_term.has_key(entry.term):
#                entries_by_term[entry.term].append(entry)
#            else:
#                entries_by_term[entry.term] = [entry]
#        for term in split_terms:
#            logging.debug(
#                    "Adding term '%s' to index %s" % (term, self.__class__.__name__)
#                    )
#            model_key = model.key()
#            term_found = False
#            shortest_entry = None
#            shortest_entry_length = 1000000
#            for entry in entries_by_term.get(term, []):
#                if model_key in entry.item_keys:
#                    term_found = True
#                    break
#                else:
#                    entry_length = len(entry.item_keys)
#                    if entry_length < shortest_entry_length:
#                        shortest_entry = entry
#                        shortest_entry_length = entry_length
#            if not term_found:
#                if not shortest_entry or shortest_entry_length >= 500:
#                    shortest_entry = self.entry_type(term=term,
#                                                     **filter_properties)
#                    shortest_entry.item_keys = []
#                shortest_entry.item_keys.append(model.key())
#                updated_entries.append(shortest_entry)
#                added_terms.append(term)
#            indexed_terms.add(term)
#        db.put(updated_entries)
#        # Remember which terms were added
#        ModelTerms(index_type=self.__class__.__name__, model=model,
#                   indexed_terms = list(indexed_terms)).put()
#        # Execute term_added callback for each added term
#        for term in added_terms:
#            try:
#                self.term_added(model, term, filter_properties)
#            except Exception, e:
#                logging.debug("Unable to process term_added: %s" % e)
#
#    def remove(self, model, terms):
#        logging.debug(
#                "Removing terms '%s' from index %s" % (terms, self.__class__.__name__)
#                )
#        # Identify additional filter properties to include in index
#        filter_properties = self.extract_filter_properties(model)
#        removed_terms = set()
#        for term in terms.split():
#            logging.debug(
#                    "Removing term '%s' from index %s" % (term, self.__class__.__name__)
#                    )
#            query = self.entry_type.all(keys_only=True)
#            for name, value in filter_properties.iteritems():
#                query.filter("%s =" % name, value)
#            query.filter("term =", term)
#            db.run_in_transaction(self.do_remove, query.get(), model, term)
#            removed_terms.add(term)
#        all_model_terms = ModelTerms.all().filter("index_type =",
#                                                  self.__class__.__name__
#                ).filter(
#                "model =", model).fetch(1000)
#        for model_terms in all_model_terms:
#            original_terms = set(model.indexed_terms)
#            new_terms = original_terms - removed_terms
#            if original_terms != new_terms:
#                model_terms.indexed_terms = list(new_terms)
#                model_terms.put()
#
#    def do_remove(self, key, model, term):
#        if key:
#            entry = self.entry_type.get(key)
#            try:
#                if entry:
#                    model_keys = set(entry.item_keys)
#                    model_keys.remove(model.key())
#                    entry.item_keys = list(model_keys)
#                    entry.put()
#            except KeyError, e:
#                pass
#
#    def extract_filter_properties(self, model):
#        """Extract the properties used to filter on this index"""
#        filter_properties = dict()
#        for name, field in self.entry_type.properties().iteritems():
#            if name not in ['term', 'item_keys', 'dt_created', 'dt_updated'
#                    ] and isinstance(field, db.Property
#                    ) and not isinstance(field,
#                                         polymodel._ClassKeyProperty
#                    ):
#                filter_properties[name] = getattr(model, name)
#        return filter_properties
#
#def create_index(index_name, model_type, **kwargs):
#    """Creates an index for the specified model_type under the specified name.
#
#    If code in this application has already initialized in index with the exact
#    same name (case sensitive), this method will simply return the existing
#    index without modification.
#
#    For kwargs, specify name value pairs to indicate which properties of the
#    model to include in the index. The name should be the property name, and the
#    value is a dictionary containing any of the following:
#
#    'filter' - if True, this property will not be parsed into the full-text
#    index but will instead be used as a filter property for faceted search.
#    Defaults to False.
#
#    'case_insensitive' - if True, this property will be included in the index
#    in all lowercase to allow case insensitive searches. Defaults to True.
#
#    'stemmed' - if True, this property will be stemmed to allow for common-root
#    searches. Defaults to True.
#    """
#    full_index_name = "%s_index" % index_name
#    index = existing_indexes.get(full_index_name.lower(), None)
#    if not index:
#    # Create new index
#        entry_type_properties = dict()
#        for name, settings in kwargs.iteritems():
#            if settings.get('filter', False):
#                property = copy.copy(getattr(model_type, name))
#                if isinstance(property, db.ReferenceProperty):
#                    property.collection_name = None
#                entry_type_properties[property.name] = property
#        entry_type = type(index_name, (IndexEntry,), entry_type_properties)
#        index = type(full_index_name, (Index,),
#                     dict(model_type=model_type, entry_type=entry_type,
#                          properties=kwargs))()
#        # Register index with model_type
#        try:
#            model_type.bigtablesearch_indexes.append(index)
#        except AttributeError, e:
#            model_type.bigtablesearch_indexes = [index]
#        # Save index to list of existing indexes
#        existing_indexes[full_index_name.lower()] = index
#    return index
#
#def request_indexing(model, force=False):
#    """Requests that the supplied model object be indexed in all indexes
#    for that kind of model.
#
#    The indexing is performed asynchronously via a task queue.
#    """
#    indexes = None
#    try:
#        indexes = model.bigtablesearch_indexes
#    except:
#        pass
#    if indexes:
#        taskqueue.Task(url='/bts/index-model',
#                       params=dict(key=model.key(), force=force)).add(
#                queue_name='index-model')
#
#def add_to_index(**kwargs):
#    model = kwargs['instance']
#    request_indexing(model)
#
## Make sure indexes are loaded
#import indexes