#!/usr/bin/python
# -*- coding: utf-8 -*-

import operator
from datetime import date, datetime
from google.appengine.ext import db
from bts.core import *
from bts.models import *


class query(object):
    """A query against the bts index.  The query behaves similarly to a
    datastore Query, with the following differences:
    
    1. bts.search.query supports a filter_fulltext method for full-text search
    
    2. bts.search.query supports a '~=' operator that implements full-text
    matching semantics.
    
    3. The fetch() and count() methods have no upper bound on the limit and
    offset parameters.
    
    4. For performance, once either fetch() or count() have been called, the
    results are cached.  To pick up the latest changes, you'll need to construct
    a new query or add a filter to the existing query.
    
    5. bts.search.query also provides a facet() method that can be used to
    obtain the unique values for a particular field from the index. The results
    are limited by whatever filters have been set, making it
    useful for faceted search.
    
    6. bts.search.query does not currently support keys_only queries.
    
    7. bts.search.query has no run method and is not itself iterable
    """
    def __init__(self, model_class):
        """Construct the query.
        
        Keyword arguments:
        
        model_class
            The type of model for which this query will search
        """
        self.model_class = model_class
        self.model_class_name = model_class.__name__
        self.field_descriptors = dict()
        for descriptor in self.model_class.bts_fields:
            (self.field_descriptors)[descriptor['field']] = descriptor
        self.offset = 0
        self.limit = 10
        self.filters = []
        self.prepared = False
                
    def filter_fulltext(self, querystring, field="bts_all", and_search=False):
        """Filter as full-text (meaning the querystring is tokenized and
        matched against the field word-by-word).
        
        Keyword parameters:
        
        querystring
            The text against which to query. Will be tokenized.
            
        and_search
            True if search requires an exact match (all words), False if search
            should look for matches to any of the words in the querystring.
            Defaults to False.
            
        field
            The field against which to search.  Defaults to "bts_all" which
            will search against all fields.
        """
        words = tokenize(querystring)
        if and_search or len(words) == 1:
            operator = "="
        else:
            operator = "~="
        for word in words:
            self.filter("%s %s" % (field, operator), word)

    def filter(self, condition, value):
        """Add a filtering condition (just like on a regular datastore Query).
        """
        self.filters.append((condition, value))
        self.prepared = False
        return self

    def fetch(self, limit=10, offset=0):
        """Fetch the results of this query.
        
        Once called, the results are cached and subsequent calls to fetch(),
        count() and facet() will reflect the same query results.  However,
        each time that fetch() is called, the latest actual entities are fetched
        from the datastore.
        """
        if limit > 3000000:
            raise Exception("Please specify a limit of 3000000 items or fewer")
        if offset > 3000000:
            raise Exception("Please specify an offset of 3000000 or smaller")
        if not self.prepared:
            self.__prepare_fetch()
        if offset >= self.hit_count:
            return []
        else:
            final_result = []
            result = db.get((self.keys_by_score)[offset:min(offset +
                            limit, self.hit_count)])
            for item in result:
                if item:
                    final_result.append(item)
            return final_result

    def count(self):
        """Count the matches to this query.
        
        Once called, the results are cached and subsequent calls to fetch(),
        count() and facet() will reflect the same query results.
        """
        if not self.prepared:
            self.__prepare_fetch()
        return self.hit_count

    def facet(self, field_name):
        """Within the scope of results matching this query, this function
        returns a dictionary with the distinct values for the specified field
        and their corresponding frequencies within the search results.
        """
        if not self.prepared:
            self.__prepare_fetch()
        all_keys = set(self.keys_by_score)
        values = dict()
        hits = TermHits.all().filter("field =", str(Field(self.model_class_name,
                field_name))).fetch(1000)
        for hit in hits:
            if hit.term_string:
                term = hit.term_string
            elif hit.term_reference:
                term = hit.term_reference
            elif hit.term_integer:
                term = hit.term_integer
            elif hit.term_bool:
                term = hit.term_bool
            elif hit.term_date:
                term = hit.term_date
            elif hit.term_datetime:
                term = hit.term_datetime
            count = values.get(term, 0)
            count += len(all_keys.intersection(set(hit.model_keys)))
            values[term] = count
        final_values = dict()
        for (value, count) in values.iteritems():
            if count > 0:
                final_values[value] = count
        return final_values

    def __prepare_fetch(self):
        """Does the work of querying the index. Saves the matched keys in
        self.keys_by_score and the hit count in self.hit_count.
        """
        if not self.filters:
            self.keys_by_score = []
            for term_hits in TermHits.all().filter("field =", str(Field(self.model_class_name,
                    "bts_all"))).fetch(1000):
                for model_key in term_hits.model_keys:
                    self.keys_by_score.extend(term_hits.model_keys)
        else:
            model_key_scores = dict()
            required_keys = list()

            # Collect required filters

            for (condition, value) in self.filters:
                required = True
                (field_name, op) = condition.split(' ')
                if op == "~=":
                    required = False
                    op = "="

                # TODO: check attribute instead of value

                test_value = value
                if op == 'in':
                    test_value = value[0]
                term_type = 'term_string'
                if isinstance(test_value, db.Model):
                    term_type = 'term_reference'
                    if isinstance(value, list):
                        new_value = []
                        for item in value:
                            new_value.append(item.key())
                        value = new_value
                    else:
                        value = value.key()
                elif isinstance(test_value, db.Key):
                    term_type = 'term_reference'
                elif isinstance(test_value, bool):
                    term_type = 'term_bool'
                elif isinstance(test_value, int):
                    term_type = 'term_integer'
                elif isinstance(test_value, date):
                    term_type = 'term_date'
                elif isinstance(test_value, datetime):
                    term_type = 'term_datetime'
                all_keys = set()
                for term_hits in TermHits.all().filter("field =", str(Field(self.model_class_name,
                        field_name))).filter("%s %s" % (term_type, op),
                        value).fetch(1000):
                    for model_key in term_hits.model_keys:
                        model_key_scores[model_key] = model_key_scores.get(model_key,
                                0) + term_hits.weight
                    if required:
                        all_keys.update(term_hits.model_keys)
                if required:
                    required_keys.append(all_keys)
            allowed_keys = None
            if len(required_keys) >= 1:
                for keys in required_keys:
                    if allowed_keys == None:
                        allowed_keys = keys
                    else:
                        allowed_keys = allowed_keys.intersection(keys)
                for key in model_key_scores.keys():
                    if key not in allowed_keys:
                        model_key_scores.pop(key)
            self.keys_by_score = []
            for (key, score) in sorted(model_key_scores.iteritems(), key=
                    operator.itemgetter(1), reverse=True):
                self.keys_by_score.append(key)
        self.hit_count = len(self.keys_by_score)
        self.prepared = True


