from django.core.management.base import BaseCommand
import logging
import shutil
import tempfile
import traceback
import uuid
from optparse import make_option
from django.db import transaction
from zope.dottedname.resolve import resolve

from cultureday.management.storage import CompressedPickleStorage
from cultureday.management.storage import PickleStorage

COMMIT_EACH = 0
COMMIT_ALL = 1

def sequential_batch_ids(last=None):
    if last is None:
        return '1'
    return str(int(last) + 1)
    
def uuid_batch_ids(last=None):
    return str(uuid.uuid4())

class LogWrapper(object):

    def __init__(self, job_name, logger):
        self.logger = logger
        self.job_name = job_name

    def _log(self, method, message):
        getattr(self.logger, method)('%s: %s' % (self.job_name, message))

    def info(self, message):
        self._log('info', message)

    def error(self, message):
        self._log('error', message)

    def debug(self, message):
        self._log('debug', message)

    def trace(self, message):
        self._log('trace', message)

    def critical(self, message):
        self._log('critical', message)
        
        
class AbortError(Exception):
    """ An AbortError can be raised by a job to terminate
        its running, regardless of the commit_on_error setting.
    """


class BaseJob(BaseCommand):
    logger_name = 'mofin.batch'

    def __init__(self):
        super(BaseJob, self).__init__()
        self.job_name = self.__class__.__module__
        self.logger = LogWrapper(self.job_name, logging.getLogger(self.logger_name))

    def pre_process(self, *args, **options):
        self.logger.info('Starting job')
        
    def post_process(self):
        self.logger.info('Ending job')

    def process(self):
        raise NotImplementedError


class NoItemJob(BaseJob):

    def handle(self, *args, **options):
        self.pre_process()
        self.process()
        self.post_process()

_marker = object()

class MultiItemJob(BaseJob):    
    commit = COMMIT_ALL
    continue_on_error = False
    log_every = 100

    def __init__(self, commit=_marker):
        super(MultiItemJob, self).__init__()
        # If COMMIT_EACH is set, then we want to commit after each item is
        # processed, so wrap self.process in the commit decorator. Otherwise,
        # we want to commit the whole batch at once, so wrap the whole run
        # call in the commit decorator
        if commit is _marker:
            commit = self.commit

        # Set explicit transaction control. 
        if commit == COMMIT_EACH:
            self.process = transaction.commit_on_success(self.process)
        elif commit == COMMIT_ALL:
            self._run = transaction.commit_on_success(self._run)
        elif commit is None:
            # Don't do anything - the caller doesn't want the
            # job to do transaction control.
            pass
        else:
            raise ValueError, u'Unknown commit value: %s' % commit

    def items(self):
        raise NotImplementedError
        
    def process(self, item):
        raise NotImplementedError

    def handle(self, *args, **options):
        self.pre_process(*args, **options)
        self._run(self.items())
        self.post_process()

    def _run(self, all_items):
        if hasattr(all_items, '__len__'):
            total_items = len(all_items)
            self.logger.info('%s item(s) to process' % total_items)
        else:
            total_items = None
            self.logger.info('Unable to determine number of items to process')

        count = -1
        for item in all_items:
            count += 1
            if count % self.log_every == 0:
                if total_items is None:
                    self.logger.info('Processed %s items' % count)
                else:
                    pctage = (float(count) / total_items) * 100
                    self.logger.info('%.2f%%: Processed %s items' % (
                            pctage, count
                            )
                    )
            try:
                self.process(item)
            except AbortError, e:
                self.logger.error('Job raised an AbortError processing job item %s' % str(item))
                self.logger.error('Traceback:\n%s' % traceback.format_exc())
                self.logger.error(str(e))
                raise
            except KeyboardInterrupt:
                self.logger.error('Keyboard interrupt')
                raise
            except Exception, e:
                self.logger.error('Error processing job item %s' % str(item))
                self.logger.error('Traceback:\n%s' % traceback.format_exc())
                if not self.continue_on_error:
                    raise
                self.logger.error('Continuing')
        self.logger.info('Job finished, performing final commit...')
        
        
class KeyValuePair(object):
    def __init__(self, key, value):
        self.key = key
        self.value = value
        
    def __iter__(self):
        yield self.key
        yield self.value
        
class MapJob(MultiItemJob):
            
    option_list = MultiItemJob.option_list + (
        make_option('-m', '--map', action='store_true', dest='map',
                    help='Run the map phase of the job'),
        make_option('-p', '--process', action='store_true', dest='process',
                    help='Run the process phase of the job'),
        make_option('-r', '--reduce', action='store_true', dest='reduce',
                    help='Run the reduce phase of the job'),
        make_option('-a', '--map-path', action='store', dest='map_path',
                    help='Filesystem path to store map results. Required if using a storage '\
                         'that persists to the filesystem, and if the --map or --process '\
                         'options are explicitly specified.'),
        make_option('-c', '--process-path', action='store', dest='process_path',
                    help='Filesystem path to store process results. Required if using a storage '\
                         'that persists to the filesystem, and if the --process or --reduce '\
                         'options are explicitly specified.'),
        make_option('-b', '--batches-to-process', action='store', dest='batches',
                    help='Batches to process. Valid specs are a,b,c,d, a..d or *'),
        make_option('-x', '--max-batches', action='store', dest='max_batches', type='int',
                    help='Maximum number of batches for the map phase to produce. Default '\
                         'is unlimited.'),
        make_option('-k', '--keep-temp-dirs', action='store_true', dest='keep_temp_dirs',
                    help='Do not remove any temporary working directories used'),
        make_option('-e', '--map-storage', action='store', dest='map_storage',
                    help='Storage to use for the map phase'),
        make_option('-g', '--process-storage', action='store', dest='process_storage',
                    help='Storage to use for the process phase')
    )
            
    # Storage class to use for the map phase by default. PickleStorage is (usually) faster
    # to write but slower to read than its compressed equivalent, so use that for the map
    # phase
    map_storage = PickleStorage        
    
    # Storage class to use for the process stage. Use a compressed pickle storage by default,
    # as that'll be relatively quick for the reduce() phase to read.
    process_storage = CompressedPickleStorage
            
    # The number of individual results from the items() call that will
    # be placed into an individual batch for processing.
    map_batch_size = 1000
    
    # The path passed to the map storage for map output.
    map_path = None
    
    # The path where results from the process phase will be stored
    process_path = None
    
    # The number of results from individual process() calls that will
    # be placed into a results file. Smaller means more results files
    # and more I/O overhead, larger is more memory usage for each batch.
    process_batch_size = 100000
    
    # If set, then the map phase will only produce up to this many bathes. Useful for
    # test runs.
    max_batches = None
    
    def reduce(self, results):
        raise NotImplementedError
                
    def handle(self, *args, **options):
        temp_dirs = []
        for opt in 'map_path', 'process_path':
            setattr(self, opt, options.get(opt))
            if getattr(self, opt) is None:
                setattr(self, opt, tempfile.mkdtemp())
                temp_dirs.append(getattr(self, opt))

        try:
            self.max_batches = options.get('max_batches')
            batch_spec = options.get('batches_to_process', '')
            batch_ids = []
        
            # If no specific stage was specifed, do all of them
            do_map = options.get('map')
            do_process = options.get('process')
            do_reduce = options.get('reduce')
            if not (do_map or do_process or do_reduce):
                do_map= do_process = do_reduce = True
        
            # Process any storage overrides from the command-line
            for storage_type in 'map_storage', 'process_storage':
                if options.get(storage_type):
                    setattr(self, storage_type, resolve(options[storage_type]))

            if do_map:
                if batch_spec:
                    raise ValueError, 'Cannot specify batches when running a map operation.'
                batch_ids = self.map_all()
    
            if do_process:
                if batch_spec and not batch_ids:
                    batch_ids = self.map_storage(self.map_path).parse_batches(batch_spec)
                self.process_all(batch_ids)
            
            if do_reduce:
                # Combine the results
                results_storage = self.process_storage(self.process_path)
                self.reduce(results_storage.results())
                self.logger.info('Finishing reduce')
        finally:
            # Delete any temp directories
            for temp_dir in temp_dirs:
                if not options.get('keep_temp_dirs'):
                    try:
                        self.logger.info('Cleaning up %s' % temp_dir)
                        shutil.rmtree(temp_dir)
                    except:
                        pass
                else:
                    self.logger.info('Keeping %s' % temp_dir)
                
            
    def map_all(self):
        self.logger.info('Starting map, batch size is %s items' % self.map_batch_size)
        self.logger.info('Using map storage %s' % getattr(self.map_storage, '__name__', 'unknown'))
        if self.max_batches:
            self.logger.info('Limited to %s batches' % self.max_batches)
            
        count = 0
        batch_count = 0
        batches = []
        storage = self.map_storage(self.map_path, get_next_batch_id=sequential_batch_ids)
        # Perform the map operation - taking the objects returned by the items()
        # call and chopping them up into batches for further processing.
        for item in self.items():
            if isinstance(item, KeyValuePair):
                storage.save(item.value, key=item.key)
            else:
                storage.save(item)

            count += 1
            if count % self.map_batch_size == 0:
                batch_count += 1
                self.logger.debug('Done batch %s' % (batch_count))
                batches.append(storage.rollover())
                if self.max_batches and batch_count >= self.max_batches:
                    break
        
        # Close the storage to obtain the last batch id.
        batches.append(storage.close())
        self.logger.info('Finishing map, %s items mapped into %s batches' % (count, batch_count))
        return set(batches)
                
    def process_all(self, batch_ids=None):
        self.logger.info('Starting process')
        self.logger.info('Using process storage %s' % getattr(self.process_storage, '__name__', 'unknown'))
        read_storage = self.map_storage(self.map_path)
        write_storage = self.process_storage(self.process_path, get_next_batch_id=uuid_batch_ids)
        
        # process specified batches
        self.pre_process()
        count = 0
        for item in read_storage.results(batch_ids):
            count += 1
            write_storage.save(self.process(item))
            if count % self.process_batch_size == 0:
                write_storage.rollover()
        write_storage.close()
        self.post_process()                    
        self.logger.info('Finishing process')

