"""
Asynchronous Job Manager
"""
from cache import *
from gputools import *
from pool import *
import cPickle, multiprocessing, threading, os, os.path, time, hashlib, random, gc

class Job(object):
    jobdir = JOBDIR
    
    def __init__(self, function, *args, **kwds):
        """
        Constructor.
        
        *Arguments*
        
        `function`
          The function to evaluate, it is a native Python function. Function
          serialization should take care of ensuring that this function is correctly
          defined in the namespace.
        
        `*args, **kwds`
          The arguments of the function.
        """
        self.function = function
        self.args = args
        self.kwds = kwds
        self.result = None
        self.status = 'queued'
    
    def compute_id(self):
        """
        Computes a unique identifier of the job.
        """
        m = hashlib.sha1()
        pid = os.getpid() # current process id
        t = int(time.time()*10000) % 1000000 # time
        s = str(self.function) # function name
        s += cPickle.dumps(self.args, -1) # args dump
        s += cPickle.dumps(self.kwds, -1) # args dump
        m.update(str(random.random())) # random number
        m.update(str(pid))
        m.update(str(t))
        m.update(s)
        hash = m.hexdigest()
        self._id = hash
        return self._id
    
    def get_id(self):
        if not hasattr(self, '_id'):
            self.compute_id() 
        return self._id
    id = property(get_id)
    
    def get_filename(self):
        return os.path.join(Job.jobdir, self.id+'.pkl')
    filename = property(get_filename)
    
    def evaluate(self):
        """
        Evaluates the function on the given arguments.
        """
        try:
            # TODO: function code pickling
            self.status = 'processing'
            self.result = self.function(*self.args, **self.kwds)
            self.status = 'finished'
        except Exception as inst:
            self.result = inst
            self.status = 'crashed'
        return self.result
    
    def record(self):
        """
        Records the job after evaluation on the disk.
        """
        if not os.path.exists(self.jobdir):
            log.debug("creating '%s' folder for code pickling" % self.jobdir)
            os.mkdir(self.jobdir)
        log.debug("writing '%s'" % self.filename)
        file = open(self.filename, 'wb')
#        function = self.function # we remove the function from the object since it can't be pickled
#        self.function = str(function)
        cPickle.dump(self, file, -1)
        file.close()
#        self.function = function

    @staticmethod
    def load(id):
        """
        Returns the Job object stored in the filesystem using its identifier.
        """
        try:
            filename = os.path.join(Job.jobdir, id+'.pkl')
            log.debug("opening file '%s'" % filename)
            file = open(filename, 'rb')
            job = cPickle.load(file)
            file.close()
        except IOError:
            log.warn("file '%s' not found" % filename)
            job = None
        except EOFError:
            log.warn("EOF error with '%s', trying again..." % filename)
            time.sleep(.2)
            file = open(filename, 'rb')
            job = cPickle.load(file)
            file.close()
        return job

    @staticmethod
    def erase(id):
        """
        Erases the Job object stored in the filesystem using its identifier.
        """
        filename = os.path.join(Job.jobdir, id+'.pkl')
        log.debug("erasing '%s'" % filename)
        os.remove(filename)

    @staticmethod
    def erase_all():
        """
        Erases all Job objects stored in the filesystem.
        """
        files = os.listdir(Job.jobdir)
        log.debug("erasing all files in '%s'" % Job.jobdir)
        [os.remove(os.path.join(Job.jobdir, filename)) for filename in files]

def eval_job(job):
    """
    Evaluates a job. Must be global to be passed to CustomPool.
    Handles Exceptions.
    """
    result = job.evaluate()
    job.record()
    return result




class AsyncJobHandler(object):
    """
    A Handler object handling asynchronous job management, on the server side.
    """
    def __init__(self):
        """
        max_cpu is the maximum number of CPUs dedicated to the cluster
        idem for max_gpu
        None = use all CPUs/GPUs available
        """
        self.handlers = []
        self.cpu = MAXCPU
        self.gpu = 0
        self.pool = None
#        self.docollect = False
        self.jobs = {}
    
    def add_jobs(self, jobs):
#        self.jobs.extend(jobs)
        for job in jobs:
            self.jobs[job.id] = job
        return [job.id for job in jobs]
    
    # RESOURCES MANAGING IS NOW GOING TO BE HANDLED BY RPCCLIENTS
#    def get_idle_cpu(self):
#        """
#        Returns the maximum number of CPUs that can be used for the current handler.
#        """
#        n = self.max_cpu
#        for hs in self.handlers.itervalues():
#            for h in hs.itervalues():
#                if isinstance(h, AsyncJobHandler) and h.client is not self.client:
#                    n -= h.cpu
#        return max(n, 0)
#        
#    def get_idle_gpu(self):
#        """
#        Returns the maximum number of GPUs that can be used for the current handler.
#        """
#        n = self.max_gpu
#        for hs in self.handlers.itervalues():
#            for h in hs.itervalues():
#                if isinstance(h, AsyncJobHandler) and h.client is not self.client:
#                    n -= h.gpu
#        return max(n, 0)
#        
#    def get_cpu(self):
#        """
#        Returns the CPU count for each active handler in the current machine.
#        """
#        if self.cpu == 0:
#            self.set_cpu()
#        return self.cpu
#    
#    def get_gpu(self):
#        """
#        Returns the GPU count for each active handler in the current machine.
#        """
#        if self.gpu == 0:
#            self.set_gpu()
#        return self.gpu
#        
    def set_cpu(self, cpu = None):
        if cpu is None:
            self.cpu = MAXCPU
        else:
            self.cpu = cpu
        return True
        
    def set_gpu(self, gpu = None):
        if gpu is None:
            self.gpu = MAXGPU
        else:
            self.gpu = gpu
        return True
    
    def get_units(self, type):
        if type == 'CPU':
            return self.cpu
        elif type == 'GPU':
            return self.gpu
        log.warn("type must be either CPU or GPU")
        return False
    
    def submit(self, jobs, type = 'CPU'):
        """
        Submit jobs.
        
        *Arguments*
        
        `jobs`
          A list of Job objects.
        """
        job_ids = self.add_jobs(jobs)
        self.cpool = CustomPool(self.get_units(type), type=type)
        # links the global Pool object to the CustomPool object
        self.cpool.pool = self.pools[type]
        
        pool_ids = self.cpool.submit_tasks(eval_job, jobs) # creates a new process
        for i in xrange(len(jobs)):
            id = job_ids[i]
            self.jobs[id].pool_id = pool_ids[i]
        
#        self.docollect = True
#        self.collect_thread = threading.Thread(target=self._collect)
#        self.collect_thread.start()
        
        return job_ids

#    def _collect(self):
#        """
#        Periodically checks if jobs are being processed. If not, it clears the handler
#        so that the results and the status are retrieved from the files in the job folder.
#        """
#        while True:
#            if self.docollect:
#                try:
#                    if self.pool.pool is None:
#                        del self.pool
#                        gc.collect()
#                        self.pool = None
#                        self.jobs = []
#                        self.jobids = []
#                        self.docollect = False
#                        log.debug("All jobs have been processed, collecting the pool object. The results are stored in the job files.")
#                except:
#                    pass
#            else:
#                break
    
    def get_pool_ids(self, job_ids):
        """
        Converts job ids (speficit to AsyncJobHander) to pool ids (specific to the CustomPool object)
        """
        return [self.jobs[id].pool_id for id in job_ids]
    
    def get_status(self, job_ids):
        # TODO: check this
        if job_ids is None:
            statuss = None
            raise Exception("The job identifiers must be specified")
        else:
            statuss = []
            for id in job_ids:
                job = Job.load(id)
                if job is not None:
                    log.debug("job file '%s' found" % id)
                    status = job.status
                elif id in self.jobs.keys():
                    log.debug("job file '%s' not found" % id)
                    status = self.jobs[id].status
                else:
                    log.warn("job '%s' not found" % id)
                    status = None
#                else:
                    # if job is None, it means that it probably has not finished yet
#                    status = 'queued'
#                    if self.pool is not None:
#                        log.debug("Tasks have not finished yet, waiting...")
#                        self.pool.join()
#                        job = Job.load(id)
#                        if job is not None:
#                            status = job.status
                statuss.append(status)
        return statuss
    
    def get_results(self, job_ids):
        if job_ids is None:
            results = None
            raise Exception("Please specify job identifiers.")
        else:
            results = []
            for id in job_ids:
                job = Job.load(id)
                if job is not None:
                    result = job.result
                else:
                    # if job is None, it means that it probably has not finished yet
                    result = None
#                    if self.pool is not None:
                    log.debug("Tasks have not finished yet, waiting...")
                    self.cpool.join()
                    job = Job.load(id)
                    if job is not None:
                        result = job.result
                results.append(result)
        
        # Raises exceptions that occured in the function
#        exceptions = []
#        for result in results:
#            if isinstance(result, Exception):
#                exceptions.append(str(result))
#        if len(exceptions)>0:
#            raise Exception("\n".join(exceptions))
        
        return results
    
    def has_finished(self, job_ids):
        if self.cpool is not None:
            pool_ids = self.get_pool_ids(job_ids)
            return self.cpool.has_finished(pool_ids)
        else:
            log.warn("The specified job identifiers haven't been found")
            return None
    
    def close(self):
        if self.cpool is not None:
            self.cpool.close()
        else:
            log.warn("The pool object has already been closed")
    
    def kill(self):
        # TODO: jobids?
        if self.cpool is not None:
            self.cpool.kill()
        else:
            log.warn("The pool object has already been killed")
