#!/usr/bin/python
# -*- coding: utf-8 -*-

from bts.core import tokenize
from bts.models import TermHits, ModelTerms, Field, TermUpdate
from datetime import datetime, date, timedelta
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
import logging

TERM_DELIMITER = "@|@"
MAX_RUNTIME = timedelta(seconds=15)


def request_indexing(model):
    """Request that the supplied model object be indexed asynchronously. This is
    the preferred mechanism for adding or updating a model in the index.
    """

    if isinstance(model, db.Key):
        key = model
    else:
        descriptor = None
        try:
            descriptor = model.bts_fields
        except:
            pass
        if descriptor:
            currently_indexing = False
            try:
                currently_indexing = model.bts_currently_indexing
            except AttributeError, e:
                pass
            if not currently_indexing:
                key = model.key()
    if key:
        taskqueue.Task(url='/bts/index-model', params=dict(key=key)).add(queue_name=
                'index-model')


def request_removal(model):
    """Request that the supplied model object be removed from the index
    asynchronously.  This is the preferred mechanism for removing a model from
    the index.
    """

    descriptor = None
    try:
        descriptor = model.bts_fields
    except:
        pass
    if descriptor:

        taskqueue.Task(url='/bts/remove-model', params=dict(key=model.key())).add(queue_name=
                'index-model')


def remove_model(key):
    """Removes the model identified by the given key from the index, effective
    immediately.
    """
    updated_hits = list()
    for hit in TermHits.all().filter('model_keys =', key):
        new_keys = set(hit.model_keys)
        new_keys.remove(key)
        hit.model_keys = list(new_keys)
        updated_hits.append(hit)
    db.put(updated_hits)
    db.delete(ModelTerms.all(keys_only=True).filter("model =", key).fetch(200))
    

def purge_processed_updates():
    """Purges processed TermUpdates from the database.
    """
    for i in range(0,10):
        # Check for any positive value of dt_processed (this will exclude null values)
        keys = TermUpdate.all(keys_only=True).filter("dt_processed >", datetime.now() - timedelta(weeks=10000000)).fetch(200)
        if len(keys) == 0:
            return
        db.delete(keys)


def update_model(model, descriptor=None):
    """Update the specified model in the index. This has the effect of queuing
    TermUpdates to capture any changes in the terms that are indexed
    for this model.  At this point, the model is not yet searchable by the new
    terms.  For that to happen, the merge_updates function must be run in order
    to merge the queued TermUpdates into the actual index as TermHits.
    
    This method also takes care of materializing any materialized properties
    prior to indexing the model.
    
    Models can optionally define a method called "indexable" to indicate whether
    or not the model is eligible to be added to the index. If indexable returns
    False, the model will still be materialized, but it will not be included
    in the index.
    
    Models can optionally define a method called "term_added" to find out
    whenever new terms are added to the index (for this model). For each added
    term, term_added will be invoked with a single non-keyword argument
    representing the added term. For full text indexed fields, each term will be
    a string, and for other fields it will be the same as the datatype of the
    source property.
    
    This function also makes sure that the synthetic bts_all field contains
    all terms from all indexed fields to allow searching against all fields.
    
    Keyword arguments:
    
    model
        The object to add to the index
    
    descriptor (optional)
        A list of dictionaries identifying which of the model's properties are
        indexable and configuring how they are indexed.  If no descriptor is
        given, this function attempts to find one under the class attribute
        "bts_fields".
        
        Each descriptor dictionary containst the following entries:
        
        field (required)
            The name of the property (field) to include in the index
            
        weight (optional)
            A positive integer weight used in determining strength of match
            during search.  Higher weights mean higher priority in search
            results. The default weight is 1. On full text fields, individual
            words will receive a higher weight based on the number of
            occurrences of the word.  For example, if weight = 2 and the word
            "car" appears 3 times in a field, the word car would be indexed
            with a weight of 2 * 3 = 6.
            
        fulltext (optional)
            True if the field should be indexed as a full text field.  By
            default, text properties like String, Text, StringList, etc. are
            indexed as full-text fields and all other properties (numeric,
            boolean, etc.) are indexed in their original type. To force a non-
            text field to be indexed as full text, set this to True. The default
            value is False.
            
        startswith (optional)
            Applies only to full text indexed fields.  If True, terms will be
            stored for all starting sequences of each word to allow matching
            on starting sequences (e.g. for type-ahead completions).  For
            example, the word "bicycle" would be indexed as:
            "b", "bi", "bic", "bicy", "bicyc", "bicycl" and "bicycle"
            
        stem (optional)
            True if field should be stemmed. This is useful for text fields
            where matching should be performed on word roots instead of whole
            words. For example, if you want a search for "walking" in include
            fields that have the term "walker", set stem to True. The default
            value is True.
             
        keepunstemmed (optional)
            Applies only to fields indexed as full text and stemmed. If True,
            the indexed terms will include both the stemmed and the unstemmed
            versions of words in the original field. 
    """

    if not descriptor:
        try:
            descriptor = model.__class__.bts_fields
        except AttributeError, e:
            raise Exception("To index a model object, please specify a descriptor when calling index or as the property 'bts_fields' on the model class itself")
    model_class_name = model.__class__.__name__
    was_materialized = False

    for (name, property) in model.properties().iteritems():
        # materialize all properties before we get into a transaction
        materialized_from = None
        try:
            materialized_from = property.bts_materialized_from
        except AttributeError:
            # This is not a materialized property, just ignore it
            pass
        if materialized_from:
            setattr(model, name, getattr(model, materialized_from))
            was_materialized = True
        else:
            getattr(model, name)
    if was_materialized:
        # Some properties were materialized, save model
        model.bts_currently_indexing = True
        model.put()

    indexable = True
    try:
        # Identify whether the model is indexable based on its indexable method
        indexable = model.indexable()
    except AttributeError, e:
        # The model does not define an indexable method, just continue
        pass
    if indexable:

    # pre-load properties outside of transaction (to make sure references are
    # initialized

        for descriptor_entry in descriptor:
            field_name = descriptor_entry.get('field', None)
            if not field_name:
                raise Exception("Descriptor entry must include a value for the key 'field'")

            # read value before we get into transaction

            getattr(model, field_name)

        # now process TermUpdates in transaction

        added_terms = db.run_in_transaction(update_terms, model,
                model_class_name, descriptor)
        try:
            # If model defines a term_added callback, invoke it for every
            # added term.
            term_added = model.term_added
            for term in added_terms:
                term_added(term)
        except AttributeError, e:
            # model does not provide a term_added method, ignore
            pass


def update_terms(model, model_class_name, descriptor):
    """Updates the specified model in the index
    """
    added_terms = set()
    for descriptor_entry in descriptor:
        # Process each indexed field
        field_name = descriptor_entry.get('field', None)
        weight = int(descriptor_entry.get('weight', 1))
        field = Field(model_class_name, field_name)
        value = getattr(model, field_name)
        if value:
            model_terms = ModelTerms.all().ancestor(model).filter('field =',
                    unicode(field)).get()
            if not model_terms:
                # Create a ModelTerms to keep track of which terms have
                # been indexed for this field on this model.
                model_terms = ModelTerms(model=model, field=field)
            index_as_text = isinstance(value, basestring) or \
                descriptor_entry.get('fulltext', False)
            if index_as_text:
                # Index this field as a full text field
                field_added_terms = index_full_text(descriptor_entry,
                        model, weight, field, value, model_terms)
                for term in field_added_terms:
                    added_terms.add(term.split(TERM_DELIMITER)[1])
            else:
                # Index this field using its native type
                added_terms.update(index_plain(model, weight, field,
                                   value, model_terms))
    return added_terms


def index_full_text(descriptor_entry, model, weight, field, value,
                    model_terms):
    """Indexes the field as a full-text field (tokenized, stemmed, etc.).

    This method does not actually update the index but simply posts TermUpdates
    which can later be merged into the index.
    """

    index_startswith_entries = descriptor_entry.get("startswith", False)
    words = tokenize(value,
                     stemmed=descriptor_entry.get('stem', True),
                     keep_unstemmed=descriptor_entry.get('keepunstemmed', False))
    word_frequencies = dict()
    for word in words:
        word_frequencies[word] = word_frequencies.get(word, 0) + 1
        if index_startswith_entries:
            # Derive starting sequences for the word
            for i in range(1, len(word)):
                fragment = word[0:i]
                word_frequencies[fragment] = word_frequencies.get(fragment,
                        0) + 1
    all_terms = set()
    for (word, frequency) in word_frequencies.iteritems():
        # Encode term frequency and word into a single string
        term = "%s%s%s" % (frequency, TERM_DELIMITER, word)
        all_terms.add(term)
    old_terms = set(model_terms.terms)
    added_terms = all_terms - old_terms
    removed_terms = old_terms - all_terms
    all_field = Field(field.model_class_name, 'bts_all')
    for term in added_terms:
        # Store the term under its own field and the bts_all field
        (frequency, word) = term.split(TERM_DELIMITER)
        TermUpdate(field=field, weight=min(weight * int(frequency), 10),
                   model=model, term_string=word).put()
        TermUpdate(field=all_field, weight=min(weight * int(frequency),
                   10), model=model, term_string=word).put()
    for term in removed_terms:
        if TERM_DELIMITER in term:
            # Delete removed terms from their own fields and the bts_all field
            (frequency, word) = term.split(TERM_DELIMITER)
            TermUpdate(remove=True, field=field, weight=min(weight * int(frequency),
                       10), model=model, term_string=word).put()
            TermUpdate(remove=True, field=all_field, weight=min(weight *
                       int(frequency), 10), model=model, term_string=
                       word).put()
    model_terms.terms = list(all_terms)
    model_terms.put()
    return added_terms


def index_plain(model, weight, field, value, model_terms):
    """Indexes the field as a plain field (no text processing).

    The original data type will be maintained if possible.  Currently,
    boolean, integer, date, datetime and reference (db.Model) properties are
    supported.

    This method does not actually update the index but simply posts TermUpdates
    which can later be merged into the index.
    """

    values = None
    value_strings = []
    if isinstance(value, list):
        values = value
    else:
        values = [value]
    for value in values:
        if value:
            value_string = None
            if isinstance(value, db.Model):
                value_string = unicode(value.key())
            else:
                value_string = unicode(value)
            if value_string not in model_terms.terms:
                if isinstance(value, db.Model) or isinstance(value, db.Key):

                    TermUpdate(field=field, weight=weight, model=model,
                               term_reference=value).put()
                elif isinstance(value, bool):

                    TermUpdate(field=field, weight=weight, model=model,
                               term_bool=value).put()
                elif isinstance(value, int):

                    TermUpdate(field=field, weight=weight, model=model,
                               term_integer=value).put()
                elif isinstance(value, date):

                    TermUpdate(field=field, weight=weight, model=model,
                               term_date=value).put()
                elif isinstance(value, datetime):

                    TermUpdate(field=field, weight=weight, model=model,
                               term_datetime=value).put()
                else:

                    TermUpdate(field=field, weight=weight, model=model,
                               term_string=unicode(value)).put()
                value_strings.append(value_string)
    model_terms.terms = value_strings
    model_terms.put()
    return value_strings


def noop_stop_condition(current_term_value):
    return False


def request_merge_updates(term_type, **kwargs):
    """Requests that updates for the specified term_type (e.g. term_string) be
    merged into the index asynchronously.
    """

    params = dict(term_type=term_type)
    params.update(**kwargs)
    queue_params = dict(url='/bts/merge-updates', params=params)
    taskqueue.Task(**queue_params).add(queue_name='merge-updates')


def merge_updates(term_type, stop_condition=noop_stop_condition,
                  additional_filters=[], **kwargs):
    """Merge TermUpdates of the specified term_type into the index by writing
    out the necessary TermHits objects. When a term is added to the index, the
    logic is roughly as follows:
    
        If adding a term:
            If this term already appears in the index for another model
                Add the new model's key to the existing TermHits.
        
            If this term does not already appear in the index
                Add a new TermHits for this term
            
        If removing a term:
            Remove the model's key from the existing TermHits object
            
    If there are multiple pending TermUpdates for the same field and the same
    value, this function will batch the updates together to improve performance.
    
    This function limits its runtime to about 15 seconds to avoid running into
    App Engine timeouts. If the 15 second limit is hit, this function will
    re-queue itself for subsequent processing. This continues until all updates
    are merged.
    
    Keyword arguments:
    
    term_type
        The type of term ('term_string', 'term_reference', 'term_integer',
        'term_bool', 'term_date' or 'term_datetime')
        
    stop_condition
        A function that takes the value of the last merged term and returns
        True if merging should stop or False if it should continue. Defaults
        to a noop_stop_condition that allows continuation ad-infinitum.
        
    additional_filters
        Additional filters to apply to the query for TermUpdates that can be
        used to limit the scope of what is merged. Each filter is a tuple of
        (expression, value) just as would be specified to the filter method
        on a db Query.
    """

    logging.debug("Merging index updates")
    start_time = datetime.now()
    were_updates_processed = False

    # merge updates one field at a time

    field_query = TermUpdate.all().filter('dt_processed =', None)
    field_query.filter('%s !=' % term_type, None)
    for filter in additional_filters:
        field_query.filter(*filter)
    field_query.order(term_type)
    while True:

    # Limit our run to around 15 seconds

        if datetime.now() - start_time > MAX_RUNTIME:
            break
        field_update = field_query.get()
        if not field_update or stop_condition(getattr(field_update,
                term_type)):
            break
        field = field_update.field
        logging.debug("Merging index updates for field %s" % field)
        added_keys = dict()
        removed_keys = dict()
        processed_updates = []
        prior_update = None

        # Find the next 100 unprocessed TermUpdates for this field

        query = TermUpdate.all().filter('dt_processed =', None)
        query.filter('%s !=' % term_type, None)
        query.filter('field =', str(field))
        for filter in additional_filters:
            query.filter(*filter)
        query.order(term_type)
        updates = query.fetch(100)
        number_of_updates = len(updates)

        # For each TermUpdate, accumulate the keys

        for i in range(0, number_of_updates + 1):

        # Limit our run to around 15 seconds

            if datetime.now() - start_time > MAX_RUNTIME:
                break
            update = None
            if i < number_of_updates:
                update = updates[i]
            current_term_value = None
            prior_term_value = None
            if update:
                current_term_value = getattr(update, term_type)
            if prior_update:
                prior_term_value = getattr(prior_update, term_type)

            # Once we're on a new term (or the end of our run), write updates into the index as TermHits

            if i > 0 and (i == number_of_updates or prior_update and
                          current_term_value != prior_term_value or
                          stop_condition(current_term_value)):
                updated_hits = set()
                were_updates_processed = True
                existing_hits_query = TermHits.all()
                existing_hits_query.filter('%s =' % term_type,
                        prior_term_value)
                existing_hits_query.filter('field =', str(field))
                existing_hits_query.order('-dt_updated')
                existing_hits = existing_hits_query.fetch(1000)

                for existing_hit in existing_hits:
                    # Add keys to existing TermHits where possible
                    keys_to_add = added_keys.get(existing_hit.weight,
                            None)
                    if keys_to_add and len(keys_to_add) + len(existing_hit.model_keys) < \
                        3000:
                        keys_to_add.update(set(existing_hit.model_keys))
                        existing_hit.model_keys = list(keys_to_add)
                        updated_hits.add(existing_hit)
                        added_keys.pop(existing_hit.weight)

                for existing_hit in existing_hits:
                    # Remove keys from existing TermHits where necessary
                    keys_to_remove = removed_keys.get(existing_hit.weight,
                            None)
                    if keys_to_remove:
                        existing_hit.model_keys = list(set(existing_hit.model_keys) -
                                keys_to_remove)
                        updated_hits.add(existing_hit)

                for (weight, model_keys) in added_keys.iteritems():
                    # Write new TermHits where necessary
                    model_keys = model_keys - removed_keys.get(weight,
                            set())
                    hits_params = dict(field=prior_update.field, weight=
                            weight, model_keys=list(model_keys))
                    hits_params[str(term_type)] = prior_term_value
                    updated_hits.add(TermHits(**hits_params))

                # Flush all updates to the datastore

                db.put(processed_updates)

                # Reset accumulator variables

                added_or_updated_hits = []
                deleted_hits = []
                for hit in updated_hits:
                    if len(hit.model_keys) == 0:
                        if hit.is_saved():
                            deleted_hits.append(hit)
                    else:
                        added_or_updated_hits.append(hit)
                db.put(added_or_updated_hits)
                db.delete(deleted_hits)
                added_keys = dict()
                removed_keys = dict()
                processed_updates = []

            # If we're at the end of our run, stop processing

            if i == number_of_updates or stop_condition(current_term_value):
                break
            accumulator = removed_keys if update.remove else added_keys
            keys = accumulator.get(update.weight, set())
            try:
                keys.add(update.model.key())
            except:

                # Exception may be thrown if referenced model has been deleted
                # Just ignore it

                pass
            accumulator[update.weight] = keys
            prior_update = update
            update.dt_processed = datetime.now()
            processed_updates.append(update)
    if were_updates_processed:

        # Add this merge_updates operation to the queue for further processing

        request_merge_updates(term_type, **kwargs)


def merge_string_updates(start_letter, end_letter):
    """Merge updates for string (full-text) terms.
    
    This is basically the same as merge_updates except that the merge is 
    restricted to words beginning with a letter in the specified range
    (inclusive on both start_letter and end_letter).
    """

    def stop_condition(current_term_value):
        return end_letter and current_term_value > end_letter

    return merge_updates('term_string', stop_condition, [('term_string >=',
                         start_letter)], start_letter=start_letter,
                         end_letter=end_letter)


def add_to_index(**kwargs):
    """This is a handler for a Django signal. It can be used to index objects
    whenever they're updated by connecting to the post_save signal"""

    model = kwargs['instance']
    request_indexing(model)


def remove_from_index(**kwargs):
    """This is a handler for a Django signal. It can be used to index objects
    whenever they're deleted by connecting to the post_delete signal"""

    model = kwargs['instance']
    request_removal(model)


