import fix_path

from google.appengine.ext import db, deferred
from google.appengine.runtime import DeadlineExceededError
import logging
import sys

class Mapper(object):
    # Subclasses should replace this with a model class (eg, model.Person).
    KIND = None
    runintransaction = False
    fastdelete = False

    def __init__(self):
        self.to_put = []
        self.to_delete = []
        self.start_key = None
        self.batch_size = 0
        self.delay = 30

    def map(self, key):
        """Updates a single entity.

        Implementers should return a tuple containing two iterables (to_update, to_delete).
        """
        return ([], [])

    def finish(self):
        """Called when the mapper has finished, to allow for any final work to be done."""
        self.start_key = None
        pass

    def get_query(self):
        """Returns a query over the specified kind, with any appropriate filters applied."""
        q = self.KIND.all(keys_only = True)
        if not  self.fastdelete :
            q.order("__key__")
            # If we're resuming, pick up where we left off last time.
            if self.start_key:
                q.filter("__key__ >", self.start_key)
        return q

    def run(self, batch_size = 100, max_fetch = 1000):
        """Starts the mapper running."""
        logging.debug("run - batch_size = %d" % (batch_size))
        self.start_key = None
        self.batch_size = batch_size
        self.max_fetch = max_fetch
        self._continue()

    def _batch_write(self, key=None):
        """Writes updates and deletes entities in a batch."""
        if key and key.has_id_or_name():
            logging.debug("batch_write: last key %s" % (key.id_or_name()))
        else:
            logging.debug("batch_write: last key %s" % (key))
        if self.to_delete:
            logging.debug("batch_write: to_delete %d" % (len(self.to_delete)))
            db.delete(self.to_delete)                              
            self.to_delete = []
        if self.to_put:
            logging.debug("batch_write: to_put %d" % (len(self.to_put)))
            db.put(self.to_put)
            self.to_put=[]
        self.start_key = key
        if self.start_key and self.start_key.has_id_or_name():
            logging.debug("batch_write: done, next first key %s" % (self.start_key.id_or_name()))
        else:
            logging.debug("batch_write: done, next first key %s" % (self.start_key))

    def _continue(self):
        if self.start_key and self.start_key.has_id_or_name():
            logging.debug("continue: start_key = %s, batch_size = %d" % (self.start_key.id_or_name(), self.batch_size))
        else:
            logging.debug("continue: start_key = %s, batch_size = %d" % (self.start_key,self.batch_size))
        isNull = True
        try:
            # Do updates and deletes in batches.
            if (len(self.to_put) + len(self.to_delete)) > 0:
                if self.runintransaction :
                    db.run_in_transaction(self._batch_write,self.start_key)
                else:
                    self._batch_write(self.start_key)
            q = self.get_query()
            key = None
            # Keep updating records until we run out of time.
            # Steps over the results, returning each entity and its index.
            if self.fastdelete :
                self.to_delete = q.fetch(limit=self.batch_size)
            else :
                for i, key in enumerate(q.fetch(limit=self.max_fetch)):
                    if isNull :
                        isNull = False
                    #logging.debug("continue: key = %s" % (key))
                    map_updates, map_deletes = self.map(key)
                    self.to_put.extend(map_updates)
                    self.to_delete.extend(map_deletes)
                    # Do updates and deletes in batches.
                    if ((i + 1) % self.batch_size == 0) or ((len(self.to_put) + len(self.to_delete)) >= self.batch_size):
                        if self.runintransaction :
                            db.run_in_transaction(self._batch_write,key)
                        else:
                            self._batch_write(key)
#                logging.debug("continue: key = %s" % (key))
            # Do updates and deletes in batches.
            if (len(self.to_put) + len(self.to_delete)) > 0:
                if self.runintransaction :
                    db.run_in_transaction(self._batch_write,key)
                else:
                    self._batch_write(key)
            elif key:
                self.start_key = key 
        except:
            logging.debug("continue: exception %s, %s" % (sys.exc_info()[:1],str(sys.exc_info()[1:2]))) 
            # Queue a new task to pick up where we left off.
            deferred.defer(self._continue, _countdown=self.delay)
            return
        if not isNull:
            # Queue a new task to pick up where we left off.
            logging.debug("continue: Datastore possible is not empty. Try to restart.") 
#            self.start_key = None
            deferred.defer(self._continue, _countdown=self.delay)
            return
        self.finish()