'''
Created on Jul 30, 2013

@author: Cihat Basol
'''

import collections
import logging
import threading
import time

from dexen.common import events, task


logger = logging.getLogger(__name__)


class _TimeEventThread(threading.Thread):
    RUNNING = "RUNNING"
    STOPPED = "STOPPED"
    NOT_RUNNING = "NOT_RUNNING" 
    def __init__(self, event_mgr, task):
        super(_TimeEventThread, self).__init__()
        self.daemon = True
        self.event_mgr = event_mgr
        self.task = task
        self.event = task.event
        self.task_name = self.task.name
        self._lock = threading.RLock()
        self.state = _TimeEventThread.NOT_RUNNING

    @property
    def is_running(self):
        with self._lock:
            return self.state == _TimeEventThread.RUNNING

    @property
    def is_periodic(self):
        return isinstance(self.event, events.PeriodicTimeEvent)

    @property
    def was_stopped(self):
        with self._lock:
            return self.state == _TimeEventThread.STOPPED

    def _one_shot(self, after):
        time.sleep(after)
        if self.is_running:
            self.event_mgr.put_task(self.task)

    def _periodic(self, period):
        #print "I am running periodic with period", period
        while True:
            time.sleep(period)
            if not self.is_running:
                break
            self.event_mgr.put_task(self.task)

    def run(self):
        #print "I am in TimeEventThread"
        with self._lock:
            self.state = _TimeEventThread.RUNNING
        if self.is_periodic:
            self._periodic(self.event.period)
        else:
            assert isinstance(self.event, events.OneShotTimeEvent)
            self._one_shot(self.event.after)

    def stop(self):
        with self._lock:
            self.state = _TimeEventThread.STOPPED


class _TaskQueue(collections.deque):
    def __init__(self, *args, **kwargs):
        super(_TaskQueue, self).__init__(*args, **kwargs)

    def empty(self):
        try:
            if self[0]:
                pass
        except IndexError:
            return True
        return False


class _TaskInfo(object):
    def __init__(self, task_name):
        self.task_name = task_name
        self.num_executions = 0
        self.total_execution_time = 0.0
        self.registration_time = time.time()
        self.num_pending = 0
        self.num_scheduled = 0

    def on_execution_completed(self, execution_result):
        self.num_executions += 1
        self.total_execution_time += execution_result.execution_time

    def on_execution_scheduled(self):
        self.num_pending -= 1
        self.num_scheduled += 1

    def on_task_added(self):
        self.num_pending += 1

    def json(self):
        doc = {}
        doc["task_name"] = self.task_name
        doc["registration_time"] = self.registration_time
        doc["num_executions"] = self.num_executions
        if self.num_executions != 0:
            doc["avg_execution_time"] = self.total_execution_time / self.num_executions
        else:
            doc["avg_execution_time"] = "NONE"
        doc["num_pending"] = self.num_pending
        doc["num_scheduled"] = self.num_scheduled
        return doc


class TaskManager(object):
    def __init__(self):
        pass

    def get_task(self):
        pass

    def register_task(self, task):
        pass

    def deregister_task(self, task_name):
        pass

    def on_execution_result_ready(self, exec_result):
        pass


class EventTaskManager(TaskManager):
    JOB_STARTED = "JobStarted"
    JOB_STOPPED = "JobStopped"

    def __init__(self):
        super(EventTaskManager, self).__init__()
        self.queue = _TaskQueue()
        self.tasks = {} # task_name : Task
        self.time_threads = {}
        self._lock = threading.RLock()
        self.state = EventTaskManager.JOB_STOPPED
        self.tasks_info = {} # task_name : {}

    def put_task(self, task):
        if self.is_task_registered(task.name):
            #print "putting task into queue", task.name
            self.queue.append(task)
            self.tasks_info[task.name].on_task_added()

    def get_task(self):
        while not self.queue.empty():
            task = self.queue.popleft()
            if self.is_task_registered(task.name):
                return task
        return None

    def undo_get_task(self, task):
        self.queue.appendleft(task)

    def has_task(self):
        return not self.queue.empty()
        #TODO: implement

    def is_task_registered(self, task_name):
        with self._lock:
            return task_name in self.tasks
        #TODO: Clean up time_threads

    def register_task(self, task):
        with self._lock:
            self.tasks[task.name] = task
            self.tasks_info[task.name] = _TaskInfo(task.name)
            if isinstance(task.event, events.TimeBasedEvent):
                self.time_threads[task.name] = _TimeEventThread(self, task)
                #TODO: start the task if the job is already running.
                if self.state == EventTaskManager.JOB_STARTED:
                    self.time_threads[task.name].start()

    def deregister_task(self, task_name):
        #print "deregistering task", task_name
        with self._lock:
            del self.tasks[task_name]
            del self.tasks_info[task_name]
            if task_name in self.time_threads:
                self.time_threads[task_name].stop()
                del self.time_threads[task_name]

    def on_job_start(self):
        self.state = EventTaskManager.JOB_STARTED
        for task in self.tasks.values():
            if isinstance(task.event, events.JobStartedEvent):
                self.put_task(task)
        for task_name in self.time_threads:
            if self.time_threads[task_name].was_stopped:
                self.time_threads[task_name] = \
                    _TimeEventThread(self, self.tasks[task_name])
            self.time_threads[task_name].start()

    def on_job_stop(self):
        self.state = EventTaskManager.JOB_STOPPED
        for task in self.tasks.values():
            if isinstance(task.event, events.JobStoppedEvent):
                self.put_task(task)
        for task_name in self.time_threads:
            self.time_threads[task_name].stop()

    def on_execution_scheduled(self, execution):
        self.tasks_info[execution.task_name].on_execution_scheduled()

    def on_execution_completed(self, scheduled_execution, execution_result):
        task_name = execution_result.task_name
        assert task_name in self.tasks_info
        self.tasks_info[task_name].on_execution_completed(execution_result)

    def on_execution_failed(self, execution):
        pass

    def json_info(self):
        res = []
        for task_info in self.tasks_info.values():
            res.append(task_info.json())
        return res


class DataFlowTaskInfo(object):
    def __init__(self, task):
        self.task = task
        self.num_executions = 0
        self.num_scheduled_executions = 0
        self.total_execution_time = 0
        # The two sets are mutually exclusive.
        self.pending_data = set() # set of DataObjectId
        self.scheduled_data = set() # set of DataObjectId

    def in_processing(self, data_id):
        return data_id in self.scheduled_data

    def add_to_pending(self, data_id):
        self.pending_data.add(data_id)

    def remove_from_pending(self, data_ids):
        self.pending_data.difference_update(set(data_ids))

    def remove_from_processing(self, data_ids):
        self.scheduled_data.difference_update(set(data_ids))

    def has_sufficient_data(self):
        return len(self.pending_data) >= self.task.input_size

    def create_task_with_data(self):
        data = list(self.pending_data)[:self.task.input_size]
        data_set = set(data)
        self.pending_data.difference_update(data_set)
        self.scheduled_data.update(data_set)
        return self.task.clone_with_data(data)

    def undo_create_task_with_data(self, data):
        self.pending_data.update(set(data))
        self.scheduled_data.difference_update(set(data))

    def on_execution_scheduled(self):
        self.num_scheduled_executions += 1

    def on_execution_completed(self, execution_result):
        self.num_executions += 1
        self.num_scheduled_executions -= 1

    def on_execution_failed(self):
        self.num_scheduled_executions -= 1

    def __str__(self):
        return """
            task_name: %s
            num_executions: %d
            num_scheduled_executions: %d
            pending data: %s
            processing data: %s\n"""\
            %(self.task.name, self.num_executions,
              self.num_scheduled_executions, self.pending_data,
              self.scheduled_data)


class DataFlowTaskManager(TaskManager):
    def __init__(self, job_data_mgr):
        super(DataFlowTaskManager, self).__init__()
        self.data_mgr = job_data_mgr
        self.tasks_info = {}  # task_name: DataFlowTaskInfo
        self.logger = logging.getLogger(__name__+"."+self.__class__.__name__)

    def get_task(self):
        self.logger.debug("Trying get a Dataflow task.")
        for task_info in self.tasks_info.values():
            if task_info.has_sufficient_data():
                self.logger.debug("returning the DataFlow task.")
                return task_info.create_task_with_data()
        self.logger.debug("There is not sufficient to create a DataFlow task.")
        return None

    def undo_get_task(self, task):
        self.logger.debug("Couldn't schedule the DataFlow task so putting it back.")
        self.tasks_info[task.name].undo_create_task_with_data(task.input_data)

    def register_task(self, task):
        assert task.name not in self.tasks_info
        self.tasks_info[task.name] = DataFlowTaskInfo(task)

    def deregister_task(self, task_name):
        del self.tasks_info[task_name]

    def _add_to_pending_if_valid(self, task_info, ids):
        task_condition = task_info.task.condition
        filtered_ids = self.data_mgr.filter_ids(ids, task_condition)
        self.logger.debug("filtered ids still valid for %s: %s",
                          task_info.task.name, filtered_ids)
        for data_id in filtered_ids:
            if not task_info.in_processing(data_id):
                task_info.add_to_pending(data_id)

    def on_execution_scheduled(self, execution):
        self.logger.info("on execution %s with task: %s scheduled",
                         execution.execution_id, execution.task.name)
        self.tasks_info[execution.task.name].on_execution_scheduled()

    def on_execution_completed(self, scheduled_execution, execution_result):
        execution_id = execution_result.execution_id
        self.logger.info("on execution %s with task: %s completed",
                         execution_id, scheduled_execution.task.name)
        # Check that input_data still holds the original data ids sent for
        # execution.
        if scheduled_execution.task.is_dataflow_task:
            input_ids = scheduled_execution.task.input_data
            task_info = self.tasks_info[execution_result.task_name]
            task_info.on_execution_completed(execution_result)
            task_info.remove_from_processing(input_ids)
            self._add_to_pending_if_valid(task_info, input_ids)

        modified_ids = self.data_mgr.get_ids_being_modified(execution_id)
        self.logger.debug("Modified ids by execution %s: %s", execution_id,
                          modified_ids)
        self.data_mgr.remove_execution_ids(modified_ids, execution_id)
        # Remove all the modified ids from all the pending queues.
        for task_info in self.tasks_info.values():
            task_info.remove_from_pending(modified_ids)
        # Put back the modified ids into the pending queues that still satisfy
        # the task condition.
        for task_info in self.tasks_info.values():
            self._add_to_pending_if_valid(task_info, modified_ids)

        self.logger.debug("Listing all the tasks info.")
        for task_info in self.tasks_info.values():
            self.logger.info(task_info)

    def on_execution_failed(self, execution):
        self.logger.info("on execution %s with task: %s failed.",
                         execution.execution_id, execution.task.name)

        execution_id = execution.execution_id

        self.data_mgr.rollback(execution_id)

        if execution.task.is_dataflow_task:
            input_ids = execution.task.input_data
            task_info = self.tasks_info[execution.task_name]
            task_info.on_execution_failed()
            task_info.remove_from_processing(input_ids)
            self._add_to_pending_if_valid(task_info, input_ids)

    def json_info(self):
        pass




























