"""
The Interface defines convenient functions (wrapper) to send jobs to the cloud.
"""
from asyncjobhandler import *
from pool import *
from rpc import *
from filetransfer import *
from gputools import *
from codepickle import *
from optimization import *
import math, random

__all__ = ['map', 'submit_jobs',
           'optimize', 'submit_optimization',
           'get_status', 'get_results']



class Interface(object):
    clients = None
    handler_id = None
    handler_class = None
    
    def extract(self):
        """
        Splits a dictionary 'dic' into two dictionaries, one with predefined variables with
        their default values 'default_vars', one with all the other values.
        """
        vars = dict()
        dic = self.kwdss_original
        dic2 = dic.copy()
        for key in self.default_vars.keys():
            if key in dic.keys():
                vars[key] = dic[key]
                del dic2[key]
            else:
                vars[key] = self.default_vars[key]
        machines = vars['_machines']
        if type(machines) is not list:
            machines = [machines]
        machines.sort()
        self.machines = machines
        self.n = len(self.machines)
        self.kwdss = dic2
        self.vars = vars

    def connect(self):
        self.clients = RpcClients(self.machines,
                                  handler_id=self.handler_id,
                                  handler_class=self.handler_class)
        self.clients.connect()

    def close(self):
        self.clients.close()

    def disconnect(self):
        self.clients.disconnect()
        self.clients = None

class JobInterface(Interface):
    def __init__(self, fun = None, *argss, **kwdss):
        self.fun = fun
        self.argss = argss
        self.kwdss_original = kwdss
        self.handler_class = AsyncJobHandler

        self.default_vars = dict(   #_cpu=MAXCPU, 
                                    #_gpu=0, 
                                    _gpu_policy='no_gpu', 
                                    _type='CPU',
                                    _machines=[], 
                                    _port=None)
        
        self.extract()
        
    def submit(self):
        if self.fun.__name__ == '<lambda>':
            raise Exception("lambda functions are currently not supported")
        
        fun = PicklableClass(self.fun)
        
        if self.n == 0:
            raise Exception("The 'submit' function can only be used with distant machines.")
        
        # Creates the jobs
        jobs = []
        k = len(self.argss) # number of non-named arguments
        keys = self.kwdss.keys() # keyword arguments
        
        i = 0 # task index
        while True:
            try:
                args = [self.argss[l][i] for l in xrange(k)]
                kwds = dict([(key, self.kwdss[key][i]) for key in keys])
            except:
                break
            if self.n>0:
                # Job object if multiple machines (asyncjobhandler)
                jobs.append(Job(fun, *args, **kwds))
            else:
                # Task object if one machine (pool)
                jobs.append(Task(fun, *args, **kwds))
            i += 1
        
        # number of jobs per machine (except the last one which has less)
        l = int(math.ceil(float(len(jobs))/self.n))
        split_jobs = [jobs[l*i:l*(i+1)] for i in xrange(self.n)] 
        
        # TODO: resource managing
        
        jobids = self.clients.submit(split_jobs)
        
        jobids = dict([(self.machines[i], jobids[i]) for i in xrange(self.n)])
        return jobids
    
    def get_status(self, jobids):
        status = self.clients.get_status([jobids[machine] for machine in self.machines])
        status_list = []
        if status is not None:
            [status_list.extend(s) for s in status]
        else:
            raise Exception("There was an error while retrieving the results.")
        return status_list
    
    def get_results(self, jobids):
        results = self.clients.get_results([jobids[machine] for machine in self.machines])
        results_list = []
        if results is not None:
            [results_list.extend(r) for r in results]
        else:
            raise Exception("There was an error while retrieving the results.")
        return results_list
    
class OptimizationInterface(Interface):
    clients = None
    
    def __init__(self, fitness_fun = None, optid = None, **kwdss):
        self.fitness_fun = fitness_fun
        self.kwdss_original = kwdss # parameters to be optimized and _-like keywords

        self.default_vars = dict(   _cpu=MAXCPU, # TODO: resources managing instead of these keywords
                                    _gpu=0, 
                                    _gpu_policy='no_gpu', 
                                    _machines=[], 
                                    _port=None,
                                    _particles=10,
                                    _iterations=1,
                                    _type='CPU',
                                    _algorithm=PSO,
                                    _units=1)
        self.extract() # extracts vars from kwds using the default variables
        self.handler_class = self.vars['_algorithm']
        log.debug("handler class=%s" % str(self.handler_class))
        
        # optid is the identifier of the optimization, and also the clientid of any
        # RPC client connecting to the server : it is the key of the handler on the servers
        if optid is None:
            self.handler_id = "TEST"+str(random.randint(0,100000000)) # TODO: random identifier for each 
                                                 # optimization, more robust (problem with processes/UNIX here)
        else:
            self.handler_id = optid
    
    def extract_params(self):
        # self.vars contains the _kwd keywords
        # self.kwdss contains all the other kwds, a part of them are algorithm-specific, the
        # others are the optimized parameters
        alg_params = self.vars['_algorithm'].default_values().keys() # algorithm-specific parameters
        params = dict([(k,v) for k,v in self.kwdss.iteritems() if k not in alg_params])
        opt_info = dict([(k,v) for k,v in self.kwdss.iteritems() if k in alg_params])
        # non-specified optimization parameters = set to default
        opt_info = self.vars['_algorithm'].default_values(**opt_info)
        return params, opt_info
    
    def submit(self):
        if self.fitness_fun.__name__ == '<lambda>':
            raise Exception("lambda functions are currently not supported")
        
        fitness_fun = PicklableClass(self.fitness_fun)
        
        if self.n == 0:
            raise Exception("The 'submit' function can only be used with distant machines.")
        
        # number of particles per machine (except the last one which has less)
        split_particles = [self.vars['_particles']//self.n for _ in xrange(self.n)]
        split_particles[-1] = self.vars['_particles']-sum(split_particles[:-1])
        
        # Sets the machines to be involved in the optimization
        self.clients.set_machine_id(self.machines)
        self.clients.set_common_handler_id(self.handler_id)
        self.clients.set_machines([self.machines]*self.n)
        

#        alg_params = self.vars['_algorithm'].default_values().keys() # algorithm-specific parameters
#        params = dict([(k,v) for k,v in self.kwdss.iteritems() if k not in alg_params])
#        opt_info = dict([(k,v) for k,v in self.kwdss.iteritems() if k in alg_params])
#        # non-specified optimization parameters = set to default
#        opt_info = self.vars['_algorithm'].default_values(**opt_info)
        
        params, opt_info = self.extract_params()
        
        opt_infos = dict([(k, [v]*self.n) for k,v in opt_info.iteritems()])
        
        # Adds the _-like kwds to opt_infos
        for k,v in self.vars.iteritems():
            opt_infos[k] = [v]*self.n
        # only _particles is machine-specific
        opt_infos['_particles'] = split_particles
        
        paramss = dict([(k, [v]*self.n) for k,v in params.iteritems()])
        
        self.clients.set_fitness([self.fitness_fun]*self.n, **paramss)
        self.clients.set_opt_info(**opt_infos)
        
        self.clients.optimize()
        
        return self.handler_id, self.vars['_algorithm'], self.machines
    
    def get_status(self):
        return self.clients.get_status()
    
    def get_results(self):
        machine_results = self.clients.get_results()
        # retrieves the algorithm in order to call finalize on the client
        algorithm = self.clients.get_class()[0] 
        # machine_results[i] is self.results of machine i
        results = algorithm.finalize(machine_results)
        return results


def map(fun, *argss, **kwdss):
    interface = JobInterface(fun, *argss, **kwdss)
    interface.connect()
    if interface.n == 0:
        p = CustomPool(MAXCPU)#interface.vars['_cpu'])
        results = p.map(fun, *argss, **interface.kwdss) # interface.kwdss is kwdss without the special parameters beginning with _
        p.close()
    else:
        # TODO: select the number of units that can be used on the server, using get_resources etc
        jobids = interface.submit()
        results = interface.get_results(jobids)
    interface.disconnect()
    return results

def submit_jobs(fun, *argss, **kwdss):
    interface = JobInterface(fun, *argss, **kwdss)
    interface.connect()
    if interface.n == 0:
        raise Exception("The 'submit' function must be used with at least one machine ('_machines_' keyword)")
    else:
        jobids = interface.submit()
    interface.disconnect()
    return jobids




def optimize(fitness_fun, **kwdss):
    interface = OptimizationInterface(fitness_fun, **kwdss)
    
    if interface.n == 0: # local machine only:
        params, opt_info = interface.extract_params()
        alg = interface.vars['_algorithm']
        
        # Adds the _-like kwds to opt_infos
        for k,v in interface.vars.iteritems():
            opt_info[k] = v
        
        obj = alg()
        obj._local = True
        obj.set_fitness(fitness_fun, **params)
        obj.set_opt_info(**opt_info)
        obj.optimize()
        results = obj.get_results()
        results = alg.finalize([results])
    else:
        optid = submit_optimization(fitness_fun, **kwdss)
        results = get_results(optid)
    return results

def submit_optimization(fitness_fun, **kwdss):
        interface = OptimizationInterface(fitness_fun, **kwdss)
        interface.connect()
        r = interface.submit()
        interface.disconnect()
        return r

def get_status(jobids):
    # job or optimization?
    if type(jobids) is tuple and len(jobids) == 2:
        # optimization
        optid, machines = jobids
        interface = OptimizationInterface(_machines=machines, optid=optid)
        interface.connect()
        status = interface.get_status()
        interface.disconnect()
    else:
        interface = JobInterface(_machines = jobids.keys())
        interface.connect()
        status = interface.get_status(jobids)
        interface.disconnect()
    return status

def get_results(jobids):
    # job or optimization?
    if type(jobids) is tuple and len(jobids) == 3:
        # optimization
        optid, algo, machines = jobids
        interface = OptimizationInterface(_machines=machines, optid=optid, _algorithm=algo)
        interface.connect()
        results = interface.get_results()
        interface.disconnect()
    else:
        interface = JobInterface(_machines = jobids.keys())
        interface.connect()
        results = interface.get_results(jobids)
#        interface.close()
        interface.disconnect()
    return results