'''
Why "Wolfit"?

* "Wolf" is the inverse of "flow" and we are dealing with workflows here.
* Wolves travel and hunt together and computers can work together to execute a workflow.
* "Wolfit" contains W..F, which stands for WorkFlow
* "Wolfit" contains L..T, which refers to LT3, Language and Translation Technology Team, where Wolfit was developed.

Wolfit is strongly inspired by Longbow, an experimental WFE written in Java,
also written by Philip van Oosten.

We attempt to use the terminology used by the Workflow Design Patterns.

A process is created by a Chain of Responsibility:
* function or callable object that has only keyword-arguments and returns
either None or an dict-like object with named attributes.

Created on Aug 2, 2010

@author: Philip van Oosten
@license: GNU GPLv2
'''
import networkx as nx
import itertools

import logging
import threading
import textwrap

EDGE_TYPE = 'type'
EDGE_TYPE_DATA = 'data'
EDGE_TYPE_PRIORITY = 'priority'

PRIORITY_EDGE = {EDGE_TYPE:'priority', 'style':'dashed'}
DATA_EDGE_FROM = 'from'
DATA_EDGE_TO = 'to'
DATA_EDGE_DATA = 'data'
DATA_EDGE_IS_FEEDBACK = 'is_feedback'
DATA_EDGE_FEEDBACK_DATA = 'fb_available'
DATA_EDGE_EMPTY = {EDGE_TYPE:EDGE_TYPE_DATA, DATA_EDGE_FROM:(), DATA_EDGE_TO:(), DATA_EDGE_DATA:()}

class WolfitException(BaseException):
    pass
class WolfitRunnerException(WolfitException):
    pass
class WolfitTaskException(WolfitException):
    pass
class WolfitProcessException(WolfitException):
    pass

class BaseRunner(object):
    '''
    Subclasses implement how a Wolfit workflow (a process) is executed.
    '''

    def __call__(self, process, **input):
        self.indegrees = []
        self.indegree_locks = []
        process.inputs_task.pass_input(dict(**input))
        tasks_to_submit = []
        n = process._network
        for task_id, task in enumerate(process.tasks): #@UnusedVariable
            indegree = n.in_degree(task_id, )
            self.indegrees.append(indegree)
            self.indegree_locks.append(threading.Lock())
            if indegree == 0: tasks_to_submit.append((process, task_id))
        # In all feedback zones, decrease the indegree of all successors of the
        # corresponding feedback task with 1
        for feedback_task, feedback_zone in process.feedback_zones.iteritems():
            for task in n.successors(feedback_task):
                if task in feedback_zone:
                    self.indegrees[task] -= 1
        for process, task_id in tasks_to_submit:
            self.submit(process, task_id)
        self.start()
        self.join() # TODO: try not to wait in the runner itself, because that may be the reason for deadlock in inner processes
        output = process.outputs_task.fetch_output()
        return output

    def _perform_task(self, process, task_id):
        task = process.tasks[task_id]
        input = self.__gather_input_data(process, task_id)
        output = task._perform(input)
        activated_tasks = self.__finish_task(process, task_id, output)
        return activated_tasks

    def __gather_input_data(self, process, task_id, input=None):
        # Take data from feedback edges if any is available
        # Take all data from input edges if no value is yet given by feedback edges
        if not input:
            input = {}
            feedback_inputs = set()
            for edge in process._network.in_edges_iter(task_id, data=True):
                data_dict = edge[2]
                if data_dict[EDGE_TYPE] == EDGE_TYPE_DATA:
                    for input_name, value in itertools.izip(data_dict[DATA_EDGE_TO], 
                                      data_dict[DATA_EDGE_DATA]):
                        if (not data_dict[DATA_EDGE_IS_FEEDBACK] 
                            and input_name not in feedback_inputs):
                            input[input_name] = value
                        elif (data_dict[DATA_EDGE_IS_FEEDBACK]
                              and data_dict[DATA_EDGE_FEEDBACK_DATA]):
                            feedback_inputs.add(input_name)
                            input[input_name] = value
        return input or {}

    def __finish_task(self, process, task_id, output):
        rerun_feedback_loop = False
        # TODO: now, only one feedback loop can be present
        # TODO: nested feedback loops will (possibly) cause all hell to break loose.
        # If task_id is a feedback task, only make tasks active if
        # the feedback condition is fulfilled. On top of that, reset
        # the indegrees in the corresponding feedback zone.
        if task_id in process.feedback_zones:
            condition = process.tasks[task_id].condition
            proceed = output[condition] # input is redirected to output
            if proceed:
                # if must run feedback loop again:
                # prepare output so that input is taken from feedback, not default input
                # reset indegrees for tasks in feedback loop
                # make successors of feedback task active
                rerun_feedback_loop = True
                activated_tasks = self.__do_feedback_loop_again(process, task_id)
            else:
                # must not run feedback loop again. all tasks in the feedback
                # zone that have successors outside of it have not yet made those
                # successors active. That must be done now. First, decrease the
                # recorded indegree of those tasks, then
                activated_tasks = self.__done_with_feedback_loop(process, task_id)
        elif task_id in process.task_feedback_zones:
            # The finished task is in a feedback zone, but it is not a feedback
            # task itself.
            
            # The tasks that must be made active (and indegree decreased) are
            # only the tasks in the same feedback zone as this task. Executing
            # tasks outside the feedback zone would cause work that could have
            # to be redone.
#            self.__distribute_output_ ...
            activated_tasks = self.__continue_running_tasks_in_feedback_zone(process, task_id)
        else:
            # The task is not in a feedback zone. Decrease the recorded indegree
            # of all successors and make those with indegree 0 active.
            # This would always be the behaviour if there were no feedback loops.
            activated_tasks = self.__continue_outside_feedback_zone(process, task_id)
        self.__distribute_output_values(process, task_id, output, rerun_feedback_loop)
        return activated_tasks

    def __do_feedback_loop_again(self, process, feedback_task_id):
        '''
        All tasks in the feedback zone are performed already at least once.
        # reset indegrees for tasks in feedback loop
        # make successors of feedback task active
        '''
        print 'do feedback loop again'
        assert isinstance(process.tasks[feedback_task_id], _Feedback)
        n = process._network
        zone = process.feedback_zones[feedback_task_id]
        tasks_made_active = []
        for task_in_zone in zone:
            # TODO: encapsulate indegree handling, locking, etc.
            self.indegree_locks[task_in_zone].acquire()
            self.indegrees[task_in_zone] = n.in_degree(task_in_zone, zone)
            indegree = 0
            for former, latter in n.in_edges_iter(task_in_zone):
                if former in zone and former != feedback_task_id:
                    indegree += 1
            self.indegrees[task_in_zone] = indegree
            if indegree == 0: tasks_made_active.append(task_in_zone)
            self.indegree_locks[task_in_zone].release()
        return tasks_made_active
    
    def __done_with_feedback_loop(self, process, feedback_task_id):
        print 'done with feedback loop'
        # Indegree of all tasks in feedback zone will be 0
        # Find all tasks in zone with successors out of the zone. Finish those tasks.
        zone = process.feedback_zones[feedback_task_id]
        assert feedback_task_id in zone
        tasks_made_active = []
        for task_id in zone:
            self.indegree_locks[task_id].acquire()
            self.indegrees[task_id] = 0
            self.indegree_locks[task_id].release()
            tasks_made_active.extend((new_task[1]
                                for new_task in process._network.out_edges_iter(task_id)
                                if new_task[1] not in zone))
        return tasks_made_active
    
    def __continue_running_tasks_in_feedback_zone(self, process, task_id):
        print 'continue running tasks in feedback zone'
        tasks_made_active = []
        for feedback_task in process.task_feedback_zones[task_id]:
            zone = process.feedback_zones[feedback_task]
            successors = [successor
                for successor in process._network.successors_iter(task_id)
                if successor in zone]
            for successor in successors:
                if successor in zone:
                    indegree = self.__decrement_indegree(process, successor)
                    if indegree == 0:
                        tasks_made_active.append(successor)
        return tasks_made_active
    
    def __decrement_indegree(self, process, task_id):
        self.indegree_locks[task_id].acquire()
        try:
            self.indegrees[task_id] -= 1
            assert self.indegrees[task_id] >= 0, 'Indegree for {0}: {1}'.format(process.tasks[task_id].name, self.indegrees[task_id])
            return self.indegrees[task_id]
        finally:
            self.indegree_locks[task_id].release()

    def __continue_outside_feedback_zone(self, process, task_id):
        print 'continue outside feedback zone'
        tasks_made_active = []
        for former, latter in process._network.out_edges_iter(task_id): #@UnusedVariable
            if self.__decrement_indegree(process, latter) == 0:
                tasks_made_active.append(latter)
        return tasks_made_active

    def __distribute_output_values(self, process, task_id, output, feedback = False):
        for former, latter, data_dict in process._network.out_edges_iter(task_id, data=True): #@UnusedVariable
            assert former == task_id
            if data_dict[EDGE_TYPE] == EDGE_TYPE_DATA:
                output_names, data = data_dict[DATA_EDGE_FROM], data_dict[DATA_EDGE_DATA]
                for i in xrange(len(output_names)):
                    output_name = output_names[i]
                    if not output_name in output:
                        task_name = process.tasks[task_id].name
                        exception_summary = '''
                        Was distributing output data for task "{1}"
                        > Problem: no value for output "{0}" found
                        > Therefore, can't assign value to {1}->{0}
                        '''.format(output_name, task_name)
                        raise WolfitRunnerException(textwrap.dedent(exception_summary))
                    data[i] = output[output_name]
                if feedback:
                    data_dict[DATA_EDGE_FEEDBACK_DATA] = True
                process._network.add_edge(former, latter, data_dict)

    def __not_implemented(self):
        raise WolfitRunnerException('Not all methods are implemented in wolfit.runners.BaseRunner.')
    
    def start(self):
        '''
        Start running tasks.
        '''
        self.__not_implemented()
        
    def stopBecause(self):
        '''
        Stops because an exception occurred.
        '''
        self.__not_implemented()
        
    def stop(self):
        '''
        Disables this runner. Tasks that can not be stopped will still run till
        finished. Not all implementation guarantee that this actually has an
        effect.
        '''
        self.__not_implemented()
    
    def submit(self, task):
        '''
        Sends a owner to the runner for execution.
        '''
        self.__not_implemented()
        
    def join(self):
        '''
        Waits until all tasks have finished executing.
        '''
        self.__not_implemented()


class _TaskPort(object):
    'base class for task and process input and output'
    def __init__(self, owner):
        self.owner = owner
        self.names = set()
    def __del__(self):
        for name in self.names:
            del self[name]
        del self.owner
    def __contains__(self, name):
        return name in self.names
    def __delitem__(self, name):
        # TODO: All connections with the given input/output must be deleted.
        try:
            self.names.remove(name)
        except KeyError:
            raise

class _TaskOutput(_TaskPort):
    def __getitem__(self, name):
        self.names.add(name)
        return (self.owner, name)

class _TaskInput(_TaskPort):
    def __setitem__(self, name, output):
        self.names.add(name)
        task, output_name = output
        assert task._process == self.owner._process, '{0} != {1}'.format(repr(task._process),repr(self.owner._process))
        self.owner._process._connect_through_data(task, output_name, name, self.owner)
class _TaskThroughput(_TaskInput, _TaskOutput):
    pass

class BaseTask(object):
    '''
    An abstract task.
    '''
    registry = []

    __slots__ = ('_process', '_input_mapping', '__dict__', '_implementation', 'name')
    
    def __init__(self, process, name=None):
        self._process = process
        self.name = name or object.__repr__(self)
        self.output = _TaskOutput(self)
        self.input = _TaskInput(self)

    def after(self, other_task):
        '''
        Indicate that this owner must be performed after other_task.
        '''
        self._process._set_task_order(other_task, self)

    def before(self, other_task):
        '''
        Indicate that this owner must be performed before other_task.
        '''
        other_task.after(self)

    def _perform(self, input):
        raise WolfitTaskException('_perform not implemented')
    
    def runnable_copy(self):
        '''
        Returns self if it is thread-safe, else a deep or shallow copy, whatever
        is needed to be able to run multiple processes with the same owner
        concurrently.
        '''
        return self
    
    @classmethod
    def for_implementation(cls, process, implementation):
        '''
        Searches for a owner type that can be used to use the given implementation
        '''
        task = None
        for task_registration in BaseTask.registry:
            task = task_registration(process, implementation)
            if task is not None:
                break
        return task

class _Constants(BaseTask):
    '''
    Contains constant values for a process.
    '''
    def __init__(self, process):
        super(_Constants, self).__init__(process)
        self.data = {}
        self.name = 'Constants'
    def _perform(self, input):
        return self.data

class _ProcessInput(BaseTask):
    __slots__ = ('data',)
    def __init__(self, *args, **kwargs):
        super(_ProcessInput, self).__init__(*args, **kwargs)
        self.name = 'Process Input'
        del self.input
    def runnable_copy(self):
        wf_input = _ProcessInput(None)
        wf_input.data = {}
        return wf_input
    def pass_input(self, input):
        self.data = input
    def _perform(self, input):
        return self.data
class _ProcessOutput(BaseTask):
    __slots__ = ('data', 'performed',)
    def __init__(self, *args, **kwargs):
        super(_ProcessOutput, self).__init__(*args, **kwargs)
        self.name = 'Process Output'
        del self.output
    def runnable_copy(self):
        wf_output = _ProcessOutput(None)
        wf_output.data = {}
        self.performed = False
        return wf_output
    def fetch_output(self):
        assert self.performed, 'The process output task was not performed.'
        return self.data if hasattr(self, 'data') else {}
    def _perform(self, input):
        self.performed = True
        self.data = input
        return input

class _Feedback(BaseTask):
    def __init__(self, process, condition = None, **kwargs):
        super(_Feedback, self).__init__(process, **kwargs)
        assert isinstance(condition, basestring)
        self.condition = condition
        self.name = 'Feedback'.format(self.condition)
        self.input = self.output = _TaskThroughput(self)
    def _perform(self, input):
        return input

class Process(object):
    '''
    A process consists of a number of tasks that can be executed in a certain order.
    '''

    __slots__ = ('_network', 'tasks', 'task_ids', 'inputs', 'outputs',
                 'constants','inputs_task', 'outputs_task', 'constants_task',
                 'runner', 'runner_init_kwargs', 'feedback_zones',
                 'task_feedback_zones')

    __name__ = 'wolfit_process'

    def __init__(self, runner=None):
        self._network = nx.DiGraph()
        self.tasks = []
        self.task_ids = {}
        self.inputs_task = self.add_task(_ProcessInput(self))
        self.outputs_task = self.add_task(_ProcessOutput(self))
        self.constants_task = self.add_task(_Constants(self))
        self.inputs, self.outputs, self.constants = range(3)
        self.runner = runner

    def __call__(self, **input):
        '''
        Directly executes the process, without first compiling it.
        
        TODO: It must be possible for the same process to be run concurrently, with different input. Delegate calling and data to a separate class?
        '''
        if self.runner is None:
            raise WolfitProcessException('''
            You must set a runner before executing this process:
            >>> process.runner = runner
            You can find implementations in the wolfit.runners package.
            ''')
        elif not isinstance(self.runner, BaseRunner):
            raise WolfitProcessException('''
            The runner passed to a process must be a runner instance;
            its type must be a subtype of wolfit.runners.BaseRunner.
            ''')
        logging.info('Created runner.')
        return self.runner(self.runnable_copy(), **input)
    
    def __contains__(self, item):
        return item in self.tasks

    def add_task(self, implementation):
        task = None
        if isinstance(implementation, BaseTask):
            task = implementation
        else:
            task = BaseTask.for_implementation(self, implementation)
        if task is None:
            raise WolfitProcessException('Type {0} is not supported as a owner implementation.'.format(type(implementation)))
        self.__add_task_to_process(task)
        return task

    def add_constants(self, **kwargs):
        self.constants_task.data.update(kwargs)
    
    def add_feedback(self, condition = False):
        feedback_task = _Feedback(self, condition = condition)
        self.add_task(feedback_task)
        return feedback_task

    def __add_task_to_process(self, task):
        task_id = len(self.tasks)
        self._network.add_node(task_id, label=task.name)
        self.tasks.append(task)
        self.task_ids[task] = task_id

    def add_sequence(self, *task_implementations):
        '''
        Implementation of the Sequence Pattern
        @see: http://www.workflowpatterns.com/patterns/control/basic/wcp1.php
        '''
        previous_task = None
        for implementation in task_implementations:
            current_task = self.add_task(implementation)
            if previous_task:
                previous_task.before(current_task)
            previous_task = current_task

    def _set_task_order(self, former_task, latter_task):
        if not isinstance(former_task, int):
            former_task = self.task_ids[former_task]
        if not isinstance(latter_task, int):
            latter_task = self.task_ids[latter_task]
        self._network.add_edge(former_task, latter_task, PRIORITY_EDGE)

    def _connect_through_data(self, former_task, output_name, input_name, latter_task):
        '''        
        Implementation choice:
        (related to networkx)
        
        To allow multiple data connections between two tasks, there are two
        possibilities. The first is to let self._network be a MultiDiGraph. The
        problem there may be selecting the right connection from all
        connections between two tasks. An alternative is to keep a normal
        DiGraph, containing multiple data connections within the same edge,
        which is now the chosen implementation. Both implementations are
        equivalent, the one a little more low-level than the other.
        '''
        assert isinstance(former_task, BaseTask), repr(former_task)
        assert isinstance(latter_task, BaseTask), repr(latter_task)
        former_task_id = self.task_ids[former_task]
        latter_task_id = self.task_ids[latter_task]
        connection = self._network.get_edge_data(former_task_id, latter_task_id, DATA_EDGE_EMPTY)
        feedback = isinstance(former_task, _Feedback)
        new_edge_data = self.__data_edge_atts(connection, output_name, input_name, feedback)
        self._network.add_edge(former_task_id, latter_task_id, new_edge_data)

    def __data_edge_atts(self, connection, output_name, input_name, feedback):
        output_names, input_names = list(connection[DATA_EDGE_FROM]), list(connection[DATA_EDGE_TO])
        output_names.append(output_name)
        input_names.append(input_name)
        assert len(input_names) == len(output_names), 'Input names: {0}; output names: {1}'.format(input_names, output_names)
        return {EDGE_TYPE:EDGE_TYPE_DATA,
                DATA_EDGE_FROM:output_names,
                DATA_EDGE_TO:input_names,
                DATA_EDGE_DATA: [None] * len(input_names),
                DATA_EDGE_IS_FEEDBACK: feedback,
                DATA_EDGE_FEEDBACK_DATA: False,
                }

    def add_process_input(self, name):
        return self.inputs_task.output[name]

    def add_process_output(self, process_output_name, output):
        self.outputs_task.input[process_output_name] = output
        
    def feedback_tasks(self):
        return [task_id for task_id in self._network
                if isinstance(self.tasks[task_id], _Feedback)]

    def runnable_copy(self):
        '''
        Copies everything that needs to be copied in order to run the process thread-safely.
        '''
        process_copy = Process()
        process_copy.tasks = []
        # the id of the tasks must be preserved
        for task in self.tasks:
            if not task:
                process_copy.tasks.append(None)
            else:
                task_copy = task.runnable_copy()
                task_copy.process = process_copy
                process_copy.tasks.append(task_copy)
        process_copy._network = self._network.copy()
        process_copy.constants = self.constants
        process_copy.inputs = self.inputs
        process_copy.outputs = self.outputs
        process_copy.inputs_task = process_copy.tasks[process_copy.inputs]
        process_copy.outputs_task = process_copy.tasks[process_copy.outputs]
        process_copy.constants_task = process_copy.tasks[process_copy.constants]
        process_copy._mark_feedback_loops()
        return process_copy

    def _mark_feedback_loops(self):
        '''
        Prepares a copy of a process with feedback loops to be run.
        '''
        feedback_tasks = self.feedback_tasks()
        n = self._network
        fbt = self.feedback_zones = {}
        if feedback_tasks:
            for feedback_task in feedback_tasks:
                predecessors = set(nx.dfs_successor(n, feedback_task))
                successors = set(nx.dfs_successor(n, feedback_task, reverse_graph = True))
                # Bogus "effect": nx.dfs_precursor returns 
                # exactly the same as nx.dfs_successor.
                fbt[feedback_task] = set.intersection(predecessors, successors)
        tfz = self.task_feedback_zones = {}
        for feedback_task, tasks in fbt.iteritems():
            for task in tasks:
                tfz[task] = tfz.get(task, [])
                tfz[task].append(feedback_task)

