"""
Custom Pool class, allowing to process tasks in a queue and change dynamically
the number of CPUs to use.
"""
from numpy import arange
from gputools import *
from debugtools import *
import multiprocessing, threading, sys, os, time

try:
    MAXCPU = multiprocessing.cpu_count()
except:
    MAXCPU = 0




class Task(object):
    def __init__(self, fun, *args, **kwds):
        self.fun = fun
        self.args = args
        self.kwds = kwds
        self.set_queued()
        
    def set_queued(self):
        self._status = 'queued'
        
    def set_processing(self):
        self._status = 'processing'
        
    def set_finished(self):
        self._status = 'finished'
        
    def set_crashed(self):
        self._status = 'crashed'
        
    def set_killed(self):
        self._status = 'killed'
    
    def get_status(self):
        return self._status
    
    status = property(get_status)
    
def eval_task((task, j, type)):
    if type == 'GPU':
        set_gpu_device(j)
    log.debug("Evaluating task on %s #%d" % (type, j))
    result = task.fun(*task.args, **task.kwds)
#    log.debug("Task successfully evaluated on %s #%d..." % (type, j))
    if type == 'GPU':
        set_gpu_device(0)
        close_cuda() # ensures that the context specific to the process
                     # is closed at the process termination
    
    return result



class CustomPool(object):
    """
    Custom Pool class. Allows to evaluate asynchronously any number of tasks 
    over any number of CPUs/GPUs. The number of CPUs/GPUs can be changed dynamically.
    Tasks statuses can be retrieved at any time.
    
    Transparent support for both CPUs and GPUs.
    
    No exception handling here : the function to be mapped is assumed to handle
    exceptions, and return (not raise) an Exception object if any exception occured.
    """
    def __init__(self, units, type = 'CPU', close_pool = True):
        self.units = units # number of CPUs/GPUs
        self.type = type
        self.pool = None
        self.close_pool = close_pool # close pool when all tasks have finished?
        self.reset()
        
    def reset(self):
        self.running = True
        self.tasks = []
        self.current_step = 0
        self.thread = None
        self.results = dict()

    def _run(self):
        prevunits = self.units
        
        if self.pool is None:
            log.debug("Creating multiprocessing pool with %d units" % self.units)
            self.pool = multiprocessing.Pool(self.units)
        
        i = self.current_step # current step, task index is i+j
        
        while (i<len(self.tasks)) and self.running:
            units = self.units
            if (prevunits is not units):
                log.info("Changing the number of units from %d to %d" % (prevunits, units))
                self.pool.close()
                self.pool.join()
                self.pool = multiprocessing.Pool(units)
#            currentresults = []
            # Launches tasks
            log.debug("Processing tasks #%d to #%d..." % (i, min(i+units-1, len(self.tasks)-1)))
            
            # Selects tasks to evaluate on the units
            local_tasks = self.tasks[i:i+units]
            
            # Sends the tasks to the pool of units
            async_results = self.pool.map_async(eval_task, 
                                                  [(local_tasks[j], j, self.type) for j in xrange(len(local_tasks))])
            [task.set_processing() for task in local_tasks]
#            for j in xrange(units):
#                if (i+j < len(self.tasks)):
#                    task = self.tasks[i+j]
#                    result = self.pool.apply_async(eval_task, [task, j, self.type])
#                    
##                    for trial in xrange(5):
##                        try:
##                            result = self.pool.apply_async(eval_task, [task, j, self.type])
##                            break
##                        except:
##                            log.warn("Task #%d on %s #%d failed, trying one more time" % (i+j, self.type, j))
##                            result = None
##                            time.sleep(.05)
#                    
#                    self.tasks[i+j].set_processing()
#                    currentresults.append(result)
#                else:
#                    break

            # Retrieves the results
            log.debug("Retrieving the results...")
            local_results = async_results.get()
            
            for j in xrange(len(local_results)):
                result = local_results[j]
                self.results[i+j] = result
                if isinstance(result, Exception):
                    self.tasks[i+j].set_crashed()
                else:
                    self.tasks[i+j].set_finished()
            log.debug("Tasks #%d to #%d finished" % (i, min(i+units-1, len(self.tasks)-1)))
                
            prevunits = units
            i += units
            self.current_step = i
        
        # Finished processing the queue
        if self.close_pool:
            self.pool.close()
            self.pool.join()
            self.pool = None
        log.debug("All tasks finished")
    
    def _create_tasks(self, fun, *argss, **kwdss):
        """
        Creates tasks from a function and a list of arguments
        """
        tasks = []
        k = len(argss) # number of non-named arguments
        keys = kwdss.keys() # keyword arguments
        
        i = 0 # task index
        while True:
            try:
                args = [argss[l][i] for l in xrange(k)]
                kwds = dict([(key, kwdss[key][i]) for key in keys])
            except:
                break
            tasks.append(Task(fun, *args, **kwds))
            i += 1
        
        return tasks
    
    def set_units(self, units):
        self.units = units
    
    def add_tasks(self, fun, *argss, **kwdss):
        new_tasks = self._create_tasks(fun, *argss, **kwdss)
        log.debug("Adding %d tasks" % len(new_tasks))
        self.tasks.extend(new_tasks)
        n = len(self.results)
        for i in (n+arange(len(self.tasks))):
            self.results[i] = None
        # Launches the new tasks if the previous ones have stopped.
        if self.thread is not None and not self.thread.is_alive():
            self.run_thread()
    
    def run_thread(self):
        self.thread = threading.Thread(target=self._run)
        self.thread.start()
#        time.sleep(.1)
    
    def map(self, fun, *argss, **kwdss):
        """
        Evaluates fun on a list of arguments and keywords over a pool of CPUs.
        Same as Pool.map, except that the number of CPUs can be changed during
        the processing of the queue. Also, it is a non-blocking call (separate thread).
        
        Exception handling must be performed inside fun : if an Exception occurs,
        fun must return the Exception object.
        """
        self.add_tasks(fun, *argss, **kwdss)
        self.run_thread()
    
    def get_status(self):
        """
        Returns the status of each task. Non-blocking call.
        """
        return [task.status for task in self.tasks]
    
    def join(self):
        if self.thread is not None:
            log.debug("joining CustomPool thread")
            self.thread.join()
        else:
            log.debug("tried to join CustomPool thread but it wasn't active")
    
    def get_results(self):
        """
        Returns the result. Blocking call.
        """
        self.join()
        return [self.results[i] for i in xrange(len(self.tasks))]
    
    def has_finished(self):
        for s in self.get_status():
            if s is not "finished":
                return False
        return True
    
    def close(self):
        """
        Waits for the current processings to terminate then terminates the job 
        processing.
        """
        self.running = False
        self.join()
        
        for i in xrange(len(self.tasks)):
            if (self.tasks[i].status == 'queued'):
                self.tasks[i].set_killed()
        
        if self.pool is not None:
            log.debug("closing pool of processes")
            self.pool.close()
            self.pool.join()
    
    def kill(self):
        """
        Kills immediately the job processing.
        """
        self.running = False
        
        if self.pool is not None:
            self.pool.terminate()
            self.pool.join()
            self.pool = None
        
        del self.thread
        
        for i in xrange(len(self.tasks)):
            if (self.tasks[i].status == 'queued' or self.tasks[i].status == 'processing'):
                self.tasks[i].set_killed()
