from google.appengine.ext import db
from google.appengine.api.users import User
from app.helpers.queries import Query
from app.helpers.simplify import simplify
from datasets import DataSet
from valuesets import ValueSet
from fieldsets import FieldSet
from tags import Tag
from versions import Version
import logging

# Each batch must be < 1MB
# Batch size should be estimated number of records to slip under that
_COMMIT_BATCH_SIZE = 100 # Assumes average valueset < 10kb

class ValueSetInterface(object):
  """ An encapsulation of and interface for batch operations on ValueSets """
  
  def __init__(self, user, dataset):
    assert isinstance(user, User)
    self.user = user
    
    assert isinstance(dataset, DataSet)
    self.dataset = dataset
    
    # Cached fieldsets by key
    self._fieldsets = {}
    
    # Fieldset count increments
    self._fieldset_counts = {}
    
    # Dict from valueset key to unsaved ValueSet object
    self._valuesets = {}
    
    # Dict of tag increments
    self._tags = {}
    
    # Batch size for queries
    self.batch_size = 100
  
  def update_by_primary_key(self, primary_key=None, *value_tuples):
    """ Update a row of data given a primary key
    
    Args:
      primary_key : The key of the primary valueset for this person
      *value_tuples : A list of (fieldset_key, values) tuples
        Values should take the form of a dict
    
    """
    if not primary_key:
      return self.insert_or_update_row(*value_tuples)
    
    primary = ValueSet.get(primary_key)
    assert primary
    assert primary.dataset_key == self.dataset.key()
    assert primary.is_primary
        
    model = self.get_model(primary.fieldset_key)
    primary = model.from_valueset(primary)
        
    return self.insert_or_update_secondary(primary, *value_tuples)
  
  def insert_or_update_row(self, *value_tuples):
    """ Insert or update a row of data for a given person
    
    Args:
      *value_tuples : A list of (fieldset_key, values) tuples
        Values should take the form of a dict. The first item
        in the list should correspond to the primary valueset
    
    """    
    # Establish the primary valueset
    primary_fieldset_key, primary_values = value_tuples[0]
    # Convert unicode to strings
    primary_values = dict((str(key),value)
      for key,value in primary_values.items())
        
    primary = self._find_primary(**primary_values)
    if primary:
      self._update_primary(primary, **primary_values)
    else:
      primary = self._make_primary(primary_fieldset_key, **primary_values)
    
    self._valuesets[primary.key()] = primary
    
    value_tuples = value_tuples[1:]
    return [primary] + self.insert_or_update_secondary(primary, *value_tuples)
  
  def insert_or_update_secondary(self, primary, *value_tuples):
    """ Given a primary valueset, go create or update secondary ones
    
    Args:
      primary : The primary ValueSet for this person
      *value_tuples : A list of (fieldset_key, values) tuples
        Values should take the form of a dict. The first item
        in the list should correspond to the primary valueset
    
    """
    valuesets_touched = []
    
    # Make or update secondary fieldsets
    for fieldset_key, values in value_tuples:
      update_primary = False
      if str(fieldset_key) == str(primary.fieldset_key):
        update_primary = True
      else:
        secondary = self._find_secondary(primary, fieldset_key)
        
      # Convert unicode to strings
      values = dict((str(key),value) for key,value in values.items())
      
      if update_primary:
        self._update_primary(primary, **values)
        secondary = primary 
      elif secondary:
        self._update_secondary(secondary, **values)
      else:
        secondary = self._make_secondary(primary, fieldset_key, **values)
      self._valuesets[secondary.key()] = secondary
      valuesets_touched.append(secondary)
    
    # Return a list of valuesets inserted or modified
    return valuesets_touched
    
  def commit(self):
    """ Puts all the unsaved ValueSets to datastore """
    commit_list = []
    version_list = []
    count = 0
        
    for valueset in self._valuesets.itervalues():
      commit_list.append(valueset)
      version_list.append(Version.make(valueset))
      count += 1
      if count >= _COMMIT_BATCH_SIZE:
        db.put(commit_list)
        db.put(version_list)
        
        count = 0
        commit_list = []
        version_list = []
    
    # Commit remainders
    if commit_list:
      db.put(commit_list)
      db.put(version_list)
    
    self._valuesets = {}
    
    for tag in self._tags.itervalues():
      tag.update() # TODO: Should do this in batches somehow
    self._tags = {}
    
    for fieldset_key, count in self._fieldset_counts.items():
      FieldSet.incr_count(fieldset_key, count)
    self._fieldset_counts = {}
      
  def get_fieldset(self, fieldset_key):
    if fieldset_key not in self._fieldsets:
      self._fieldsets[fieldset_key] = FieldSet.get(fieldset_key)
    return self._fieldsets[fieldset_key]
  
  def get_model(self, fieldset_key):
    return self.get_fieldset(fieldset_key).model
  
  def lookup_primary(self, *values):
    """ Finds all primary ValueSets with the given lookups, typecasts them 
    
    Args:
      *values: List of keywords to search on
    Returns:
      Typecasted primary ValueSets
    
    """    
    # Construct query lookups
    query = Query(ValueSet).filter(is_primary=True,
      dataset_key=self.dataset.key())
    
    lookups = []
    for value in values:
      simplified = simplify(value)
      if hasattr(simplified, '__iter__'):
        simplified.sort()
        lookups.append(''.join(simplified))
      else:
        lookups.append(simplified)
    
    query.filter('lookups IN ', lookups)
        
    # Fetch query and typecast
    return [self.get_model(v.fieldset_key).from_valueset(v) for v in query]
  
  def _make_primary(self, fieldset_key, **values):
    """ Given a fieldset key and values, makes a primary ValueSet """
    model = self.get_model(fieldset_key)
    primary = model.make_primary(**values)
    new_tags = primary.get_tags()
    for tag_name, tag in new_tags.items():
      if tag_name in self._tags:
        self._tags[tag_name].count += 1
      else:
        self._tags[tag_name] = tag
        
    count = self._fieldset_counts.setdefault(fieldset_key, 0)
    self._fieldset_counts[fieldset_key] += 1
    
    return primary
  
  def _find_primary(self, **values):
    """ Find the primary valueset for a given set of values, None otherwise 
    
    Args:
      **values: Keyword from field att_name to value
    
    Returns:
      db.Key object for a ValueSet if exists, None otherwise
      
    """
    # Fetch query and evaluate more carefully in Python
    for valueset in self.lookup_primary(*values.values()):      
      # Check for any differences
      match = True
      for key, query_value in values.items():
        valueset_value = getattr(valueset, key, None)
        if valueset_value and query_value and valueset_value != query_value:
          match = False
          break
      
      if match: return valueset
      
  def _update_primary(self, primary, **values):
    """ Given a primary valueset, updates it with the given values """
    self._update_valueset(primary, **values)
    primary.set_primary()
    for valueset in Query(ValueSet).filter(primary_key=primary.key()):
      if valueset.key() == primary.key(): continue
      valueset = self._valuesets.setdefault(valueset.key(), valueset)
      valueset.set_secondary(primary)
  
  def _make_secondary(self, primary, fieldset_key, **values):
    """ Given a primary, fieldset_key, and values, makes secondary ValueSet """
    model = self.get_model(fieldset_key)
    secondary = model.make_secondary(primary, **values)
    for valueset in Query(ValueSet).filter(primary_key=primary.key()):
      if valueset.key() == primary.key(): continue
      if valueset.key() == secondary.key(): continue
      valueset = self._valuesets.setdefault(valueset.key(), valueset)
      valueset.related_fieldsets = primary.related_fieldsets
    
    new_tags = secondary.get_tags()
    for tag_name, tag in new_tags.items():
      if tag_name in self._tags:
        self._tags[tag_name].count += 1
      else:
        self._tags[tag_name] = tag
    
    count = self._fieldset_counts.setdefault(fieldset_key, 0)
    self._fieldset_counts[fieldset_key] += 1
    
    return secondary
  
  def _find_secondary(self, primary, fieldset_key):
    """ Finds the secondary FieldSet for a given primary and fieldset_key """
    key_name = ValueSet.get_key_name(fieldset_key, primary.key())
    key = db.Key.from_path('ValueSet',key_name)
    if key in self._valuesets:
      valueset = self._valuesets[key]
    else:
      valueset = ValueSet.get(key)
    if valueset:
      model = self.get_model(fieldset_key)
      return model.from_valueset(valueset)
  
  def _update_secondary(self, secondary, **values):
    self._update_valueset(secondary, **values)
  
  def _update_valueset(self, valueset, **values):
    old_tags = valueset.get_tags()
    valueset.update(**values)
    new_tags = valueset.get_tags()
    
    for tag_name in old_tags.keys():
      if not new_tags.pop(tag_name, None):
        if tag_name in self._tags:
          self._tags[tag_name].count -= 1
        else:
          self._tags[tag_name] = old_tags[tag_name]
          self._tags[tag_name].count = -1
    
    for tag_name, tag in new_tags.items():
      if tag_name in self._tags:
        self._tags[tag_name].count += 1
      else:
        self._tags[tag_name] = tag
  
  def query(self, include_fieldset_keys, tag_keys=[], filter_fieldset_keys=[]):
    """ Generator that yields ValueSets matching criteria, sorted on agg_sort
        
    Args:
      include_fieldset_keys : Keys of FieldSets to include in query results
        if they exist
      tag_keys : Tags that results must have (if they have that fieldset)
      filter_fieldset_keys : Keys of FieldSets the results must have
    Returns:
      A generator yielding lists of related ValueSets matching tags
    
    """
    batch_size = self.batch_size
    
    # Prefetch all tags in one go
    tags = db.get(tag_keys)
    # Sorted list of tags by least likely to most
    # Filter on least likely first to speed things up
    tags.sort(key=(lambda t : t.count))
    
    # Get relevant fieldsets for filtering
    relevant_fieldset_keys = include_fieldset_keys + [tag.fieldset_key for tag in tags]
    relevant_fieldset_keys = set(relevant_fieldset_keys)
    
    # This forces any tag usage to require that fieldsest to exist
    # filter_fieldset_keys += [tag.fieldset_key for tag in tags]
    
    # Sort filter fieldset keys from least likely to most
    filter_fieldsets = db.get(filter_fieldset_keys)
    filter_fieldsets.sort(key=(lambda f : f.count))
    
    # Prefetch all include fieldsets
    include_fieldsets = db.get(include_fieldset_keys)
    
    # Cache fieldsets
    for fieldset in (include_fieldsets + filter_fieldsets):
      self._fieldsets[fieldset.key()] = fieldset
    
    offset = '' # Offset allows paging
    current = {} # Dict from fieldset keys to valuesets for current_primary_key
    current_primary_key = None # Current "person" we're getting data for
    reject_current = False # If current primary key does not match, set to True
    
    while True:
      # Fetch from db based on batch size
      query = Query(ValueSet)
      query.filter(dataset_key=self.dataset.key())
      for t in filter_fieldsets[:1]:
        query.filter(related_fieldsets = t.key())
      query.filter('unique_agg_sort > ', offset)
      query.order('unique_agg_sort')
      fetched = query.fetch(batch_size)
      
      for valueset in fetched:
        
        # Change in primary key means data complete
        if (valueset.primary_key != current_primary_key):
          # No filter conflicts, yield
          if current and not reject_current:
            yield [current[key] if key in current else None
              for key in include_fieldset_keys]
          
          # Now reset for next primary key
          current = {}
          current_primary_key = valueset.primary_key
          reject_current = False
        
        # Previous filter mismatch -- ignore this valueset
        if reject_current: continue
        
        # Make sure it has all the related fieldsets we need
        for fieldset in filter_fieldsets:
          if not fieldset.key() in valueset.related_fieldsets:
            reject_current = True
            break
        if reject_current: continue
        
        # If not a relevant fieldset, ignore
        if valueset.fieldset_key not in relevant_fieldset_keys: continue
        
        # Already a ValueSet for this fieldset in dict
        # Must be a paging mismatch -- ignore
        if valueset.fieldset_key in current:
          continue
        
        # Cast with appropriate properties
        valueset = self.get_model(valueset.fieldset_key).from_valueset(valueset)
        valueset_tag_names = set(valueset.get_tags().keys())
        
        # Check tags
        for tag in tags:
          # Same fieldset implies we must check for existence of tag
          if tag.fieldset_key == valueset.fieldset_key:
            if not tag.name in valueset_tag_names: # Filter fail
              reject_current = True
              break
                      
        # Add to valueset if selected
        if valueset.fieldset_key in include_fieldset_keys:
          current[valueset.fieldset_key] = valueset
      
      if len(fetched) < batch_size:
        # Yield last item in list
        if current and not reject_current:
          yield [current[key] if key in current else None
            for key in include_fieldset_keys]
        return # We're done!
      
      else: # New offset for next batch
        offset = valueset.unique_agg_sort
