from logging import getLogger

from datetime import datetime

from zope.interface.exceptions import DoesNotImplement

from twisted.python import compat

from meercat.harvester.base.interfaces import IHarvester, IQueriableHarvester
from meercat.storage.interfaces import IStorage
from meercat.job import IncrementalJob, ScheduledJob, IncrementalDailyDowntimeJob



log = getLogger('meercat.harvester.job')

def incrementalJobCall(self, limit, offset):
    """ Store all instances returned from the harvester with the provided 
    limit and offset. """
    log.info('%s.incrementalJobCall(limit=%s,offset=%s)' % (unicode(self),
                                                            str(limit),
                                                            str(offset)))
    try:
        self._storage.action(self._harvester.harvest(limit=limit,
                                                     offset=offset))
    except Exception, e:
        e = u'incrementalJobCall(%s, %s, %s) exception: %s' % (unicode(self),
                                                               str(limit),
                                                               str(offset),
                                                               unicode(e))
        log.error(e)
        raise Exception(e)


class IncrementalHarvestJob(IncrementalJob):
    """ The IncrementalHarvestJob runs an incremental harvest against a 
    harvester that implements the IQueriableHarvester interface. Limit,
    step, and offset can be configured as well as the max number of 
    concurrent sets to run and the factory method to be used to generate
    the deferreds from a function call and the arguments. """
    
    def __init__(self, harvester, storage, limit=None, offset=0,step=1000,
                 max_workers=None, deferredFactory=None):
        self._harvester = IQueriableHarvester(harvester)
        self._storage   = IStorage(storage)
        
        if limit is None:
            limit = lambda:self._harvester.count() - offset
        
        super(IncrementalHarvestJob,
              self).__init__(f=self, limit=limit, offset=offset, step=step, 
                             max_workers=max_workers, 
                             deferredFactory=deferredFactory)
        
        
    __call__ = incrementalJobCall
        

class IncrementalDailyDowntimeHarvestJob(IncrementalDailyDowntimeJob):
    """ The IncrementalDailyDowntimeHarvestJob is a clone of the 
    IncrementalHarvestJob that also allows for daily downtime of the harvester
    or storage. """
    
    def __init__(self, harvester, storage, downtime_begin, downtime_duration,
                 limit=None, offset=0,step=1000, max_workers=None,
                 deferredFactory=None):
        """ """
        self._harvester = IQueriableHarvester(harvester)
        self._storage   = IStorage(storage)
        
        if limit is None:
            limit = lambda:self._harvester.count() - offset
        
        super(IncrementalDailyDowntimeHarvestJob,
              self).__init__(f=self, downtime_begin=downtime_begin, 
                             downtime_duration=downtime_duration, limit=limit, 
                             offset=offset, step=step, max_workers=max_workers, 
                             deferredFactory=deferredFactory)
        
    __call__ = incrementalJobCall

class ScheduledFullHarvestJob(ScheduledJob):
    """ The ScheduledFullHarvestJob runs scheduled harvests against a harvester
    that implements the IHarvester interface. It runs a set difference against
    the instances already stored for the source and removes the ones that are
    not in the current harvest. This is useful for harvester sources that do
    not provide the ability to query for changes. """
    
    def __init__(self, harvester, storage, schedule, **kwargs):
        self._harvester = IHarvester(harvester)
        self._storage   = IStorage(storage)
        self._runs = []
        self._called = []
        self._active = False
        super(ScheduledFullHarvestJob,self).__init__(schedule,self,**kwargs)
    
    def __call__(self,*args,**kwargs):
        """ The order of operations for scheduled full updates is:
        1. Get a list of stored instances from the harvester source
        2. Harvest all instances from the harvester
        3. Store all harvested instances
        4. Remove all instances that were in the storage but not in the 
           harvested set. """
        log.debug('%s(*%s, **%s)' % (str(self),str(args),str(kwargs)))
        
        stored = compat.frozenset(self._storage.getInstances(source=self._harvester.getSource()))
        
        harvested = compat.frozenset(self._harvester.harvest()) 
        
        harvestCount = 0
        deactivateCount = 0
        
        for instance in harvested:
            log.debug('%s storing %s' % (str(self), str(instance)))
            try:
                self._storage.action(instance)
                harvestCount += 1
            except DoesNotImplement, e:
                self._active = False
                raise e
            except Exception, e:
                log.error(str(e))
        
        for instance in stored.difference(harvested):
            log.debug('%s deactivating %s' % (str(self), str(instance)))
            try:
                instance.deactivate()
                self._storage.action(instance)
                deactivateCount += 1
            except DoesNotImplement, e:
                self._active = False
                raise e
            except Exception, e:
                log.error(str(e))
        
        return (harvestCount, deactivateCount,)
        
    
    def _generateDeferred(self, *args, **kwargs):
        if self._active:
            log.info('%s already active' % (str(self),))
            return
        
        log.debug('%s not active, running now' % (str(self),))
        try:
            self._active = True
            self._called.append(datetime.now())
            if len(self._called) > 256:
                self._called = self._called[-32:]
            
            log.debug('%s generating deferred' % (str(self),))
            d = super(ScheduledFullHarvestJob, self)._generateDeferred(*args, **kwargs)
            log.debug('%s generate deferred' % (str(self),))
            d.addCallback(self.addStatistics)
            d.addCallback(self._deactivate)
            d.addErrback(self._deactivate)
            return d
        except Exception, e:
            self._handleError(e)
            self._deactivate(e)
    
    def _deactivate(self, results):
        if not self._active:
            log.warning('%s deactivating an inactive job' % (str(self),))
        else:
            self._active = False
        
        return results
    
    def addStatistics(self, results):
        log.debug('%s.addStatistics()' % (str(self),))
        try:
            self._runs.append((datetime.now(),results[0], results[1],))
            if len(self._runs) > 256:
                self._runs = self._runs[-32:]
        except Exception, e:
            log.error('Error adding statistics: %s' % (str(e),))
        
        return results
    
    def getStatus(self):
        if self.getError():
            return u'Stopped: %s' % (str(self.getError()),)
        
        if len(self._called) > 0:
            last_start = self._called[-1]
        else:
            last_start = None

        if not self.isRunning():
            active = u'stopped'
        elif self._active:
            active = u'active'
            if len(self._called) > 1:
                last_start = self._called[-2]
        else:
            active = u'waiting'
        
        if len(self._runs):
            last_finish = self._runs[-1][0]
        else:
            last_finish = '...'
        
        return u'Currently %s. %d items harvested and %d items deactivated in the last %d runs (latest ' \
               'at %s-%s)' % (
                    active,
                    sum(map(lambda r: r[1],self._runs)),
                    sum(map(lambda r: r[2],self._runs)),
                    len(self._runs),
                    unicode(last_start),
                    unicode(last_finish),)

class ScheduledUpdateHarvestJob(ScheduledJob):
    """ The ScheduledUpdateHarvestJob runs scheduled incremental updates from 
    a harvester and stores the changes in the specified storage. The harvester
    must implement the IQueriableHarvester interface. """
    
    def __init__(self, harvester, storage, schedule, from_date=datetime.now(), **kwargs):
        self._harvester = IQueriableHarvester(harvester)
        self._storage   = IStorage(storage)
        
        if not isinstance(from_date,datetime):
            raise ValueError('from_date must be a datetime.datetime object')
        
        self._called = [from_date]
        self._runs = []
        self._active = False
        
        super(ScheduledUpdateHarvestJob,self).__init__(schedule,self,**kwargs)
    
    def __call__(self, from_date, *args, **kwargs):
        """ Store all instances that have changed since the last time this 
        function was called on the given harvester. """
        log.debug('%s(%s)' % (str(self),str(from_date)))
        i = 0
        for instance in self._harvester.harvest(from_date=from_date):
            log.debug('Performing storage action on: %s' % (str(instance),))
            try:
                self._storage.action(instance)
                i += 1
            except DoesNotImplement, e:
                self._active = False
                raise e
            except Exception, e:
                log.error(str(e))
        return i
    
    def _generateDeferred(self, *args, **kwargs):
        if self._active:
            log.debug('%s already active' % (str(self),))
            return
        
        log.debug('%s not active, running now' % (str(self),))
        self._active = True
        last = self._called[-1]
        self._called.append(datetime.now())
        if len(self._called) > 256:
            self._called = self._called[-32:]
        
        log.debug('%s generating deferred' % (str(self),))
        d = super(ScheduledUpdateHarvestJob, self)._generateDeferred(from_date=last,
                                                                     *args,
                                                                     **kwargs)
        log.debug('%s generate deferred' % (str(self),))
        d.addCallback(self.addStatistics)
        d.addCallback(self._deactivate)
        d.addErrback(self._deactivate)
        return d
    
    def _deactivate(self, results):
        if not self._active:
            log.warning('%s deactivating an inactive job' % (str(self),))
        else:
            self._active = False
        
        return results
    
    def addStatistics(self, number):
        log.debug('%s.addStatistics()' % (str(self),))
        try:
            self._runs.append((datetime.now(),number,))
            if len(self._runs) > 256:
                self._runs = self._runs[-32:]
        except Exception, e:
            log.error('Error adding statistics: %s' % (str(e),))
        
        return number
        
    
    def getStatus(self):
        if self.getError():
            return u'Stopped: %s' % (str(self.getError()),)
        
        last_start = self._called[-1]

        if not self.isRunning():
            active = u'stopped'
        elif self._active:
            active = u'active'
            if len(self._called) > 1:
                last_start = self._called[-2]
        else:
            active = u'waiting'
        
        if len(self._runs):
            last_finish = self._runs[-1][0]
        else:
            last_finish = '...'
        
        return u'Currently %s. %d items harvested in the last %d runs (latest ' \
               'at %s-%s)' % (
                    active,
                    sum(map(lambda r: r[1],self._runs)),
                    len(self._runs),
                    unicode(last_start),
                    unicode(last_finish),)
