'''
Implementations of how a process (aka workflow) can be executed.

@author: Philip van Oosten
'''

import threading
from Queue import Queue, Empty
import multiprocessing
import wolfit
import logging

error = wolfit.WolfitRunnerException

class SequentialRunner(wolfit.BaseRunner):
    '''
    Single-threaded simple implementation.
    
    Runs a process in the calling thread.
    '''
    def __init__(self, *args, **kwargs):
        super(SequentialRunner, self).__init__(*args, **kwargs)
        self.busy = False
        self.queue = []
    def start(self):
        self.busy = True
        while self.busy and self.queue:
            process, task_id = self.queue.pop(0)
            new_tasks = self._perform_task(process, task_id)
            for new_task_id in new_tasks:
                self.submit(process, new_task_id)
    def stopBecause(self, reason):
        raise reason
    def stop(self):
        self.busy = False
    def submit(self, process, task):
        self.queue.append((process, task))
    def join(self):
        pass


#TODO: class AOPLayer(wolfit.BaseRunner):
    '''
    Instruments a process with debugging and profiling information, further
    delegates to an actual runner.
    '''

class ThreadPoolRunner(wolfit.BaseRunner):
    '''
    Runs all tasks concurrently if possible, on one computer.

    The thread Pool implementation is inspired by
    http://code.activestate.com/recipes/302746-simplest-useful-i-hope-thread-
    pool-example/
    
    TODO: reuse the same thread pool for multiple processes and runners
    '''

    STOP_TIMEOUT = .2

    def __init__(self, number_of_worker_threads = 0, error_handler = None):
        super(ThreadPoolRunner, self).__init__()
        assert number_of_worker_threads >= 0
        self.number_of_worker_threads = number_of_worker_threads or multiprocessing.cpu_count() + 1
        self._incoming_tasks = Queue()
        self._error_queue = Queue() # Unlimited size!!!
        self.state = 'init'
        self._stop_lock = threading.Lock()
        self._del_workers_lock = threading.Lock()
        self._error_handler = error_handler or self.stopBecause

    def _create_worker_threads(self,):
        worker_threads = []
        for i in xrange(self.number_of_worker_threads):
            worker_thread = threading.Thread(target = self._do_work, args = (self._error_handler,), name = 'worker-thread-{0}'.format(i + 1))
            worker_thread.daemon = True
            worker_threads.append(worker_thread)
        return frozenset(worker_threads)


    def start(self):
        self.worker_threads = self._create_worker_threads()
        for thread in self.worker_threads:
            thread.start()
        self.state = 'running'

    def stopBecause(self, reason):
        def stop_and_raise():
            self.stop()
        stop_thread = threading.Thread(target = stop_and_raise, name = 'Stop-thread')
        stop_thread.start()
        raise reason

    def stop(self):
        '''
        Makes sure no new tasks can be submitted.
        '''
        if not self.state == 'stopped':
            self._stop_lock.acquire()
            self.state = 'stopped'
            self._stop_lock.release()
            self._incoming_tasks.join()
            self._delete_worker_threads()

    def _delete_worker_threads(self):
        '''
        A double-checked lock to delete the worker threads.
        '''
        if hasattr(self, 'worker_threads'):
            self._del_workers_lock.acquire()
            if hasattr(self, 'worker_threads'):
                for worker_thread in self.worker_threads:
                    logging.info('Joining {0}'.format(worker_thread.name))
                    worker_thread.join()
                del self.worker_threads
            self._del_workers_lock.release()

    def is_stopped(self):
        try:
            self._stop_lock.acquire()
            return self.state == 'stopped'
        finally:
            self._stop_lock.release()

    def _do_work(self, error_handler):
        '''
        This is what the worker threads do.
        '''
        while not self.is_stopped():
            try:
                process, active_task = self._incoming_tasks.get(timeout = self.STOP_TIMEOUT)
                if self.is_stopped():
                    self._submitted_after_stop(process, active_task)
                    self._incoming_tasks.task_done()
                else:
                    self.__run_task(error_handler, process, active_task)
            except Empty:
                pass
        self._empty_incoming_tasks_queue()

    def __run_task(self, error_handler, process, task_id):
        try:
            newly_activated_tasks = self._perform_task(process, task_id)
            logging.info('Newly activated tasks: {0}'.format(newly_activated_tasks))
            while newly_activated_tasks:
                # Fork all tasks, except one to the queue
                for new_task_id in newly_activated_tasks[1:]:
                    self.submit(process, new_task_id)
                # Directly execute one new new_task_id, without adding it to the
                # queue.
                task_id = newly_activated_tasks[0]
                newly_activated_tasks = self._perform_task(process, task_id)
        except BaseException as e:
            self._error_queue.put((process, task_id, e))
            error_handler(e)
        finally:
            # Only after all the immediately executed tasks are performed,
            # task_done can be called on the incoming tasks queue, otherwise
            # the queue would be joined too early.
            self._incoming_tasks.task_done()

    def _empty_incoming_tasks_queue(self):
        try:
            while True:
                process, task_id = self._incoming_tasks.get_nowait()
                self._submitted_after_stop(process, task_id)
                self._incoming_tasks.task_done()
        except Empty:
            pass

    def _submitted_after_stop(self, process, task_id):
        task = process.tasks[task_id]
        logging.error('tried executing task "{0}" after pool stopped.'.format(task.name))
        self._error_queue.put(task, error('Pool already stopped.'))

    def submit(self, process, task_id):
        if not self.is_stopped():
            self._incoming_tasks.put((process, task_id))
        else:
            raise error('Queue already stopped')

    def join(self):
        self._incoming_tasks.join()
        self._raise_errors()

    def _raise_errors(self):
        '''
        Raises all errors stored in the error queue one by one.
        
        TODO: generate more informative output, making use of the traceback module in the Python standard library.
        '''
        empty = False
        while not empty:
            try:
                process, task, caught_error = self._error_queue.get_nowait()
                self._error_queue.task_done()
                logging.error('An error occurred while performing task "{0}"'.format(process.tasks[task]))
                raise caught_error
            except Empty:
                empty = True

# TODO: class GridRunner(wolfit.BaseRunner):
    '''
    Runs all tasks on a grid.
    
    TODO: find a decent grid implementation.
    '''
