'''
Resource manager for PlayDOh:

The idea is that each computer has a fixed set of resources (CPUs and GPUs)
which can be assigned to different users. The computer's user can reserve
some of these for their own use, and let others use the rest. I think this
way of doing things makes it more stable and easy to manage overall - you
don't need to go around the lab asking people who has free CPUs to use, etc.
You also don't need to check if maybe someone else is already using the
resources on a given machine (not sure if this is still an issue with the
current version or not).

I imagine having a GUI that shows all the users you know about, their status
message (they can leave messages here to give an idea of what they are
planning to do, and how long it will take), how many resources they have free
and who the various resources are assigned to. There would also be a panel
for your own computer's resources - and you could change the number reserved
for yourself, kick other users off if you need more resources, etc.

Resources could be requested from another user in two ways, you could submit
a request for resources that only lasts as long as a job, and some other
class would take care of requesting and releasing the resources from the
ResourceManager. Or, you could use the GUI to request an allocation of
resources directly, and then those would be used for any jobs submitted (this
would be useful if you had several jobs you were planning to submit, possibly
in sequence).

Another class should also handle requesting resources across multiple machines.

Some changes would need to be made to Playdoh as a whole to get this idea
working:

* Resource management has to go through this ResourceManager class.
* The number of assigned resources can change at any moment, and this has
  to be reflected in the code - i.e. if you submit two jobs, you can't assume
  that there'll be the same number of processors available. This could be
  handled by exceptions though.
* Jobs can be killed in the middle, and this should be handled.
'''

import multiprocessing
import getpass
from multiprocessing.connection import Listener, Client
try:
    import pycuda
    import pycuda.autoinit
    from pycuda import driver as drv
    have_gpu = True
except:
    have_gpu = False

__all__ = ['ResourceManager']

class ResourceManager(object):
    def __init__(self):
        self.name = getpass.getuser()
        self.status = ''
        if have_gpu:
            self.num_gpu = drv.Device.count()
        else:
            self.num_gpu = 0
        self.num_cpu = multiprocessing.cpu_count()
        self.assigned_cpu = 0
        self.assigned_gpu = 0
        self.assignments = {}
        self.running = set([])
        
    # Use reserve/unreserve to keep resources for yourself
        
    def reserve(self, cpu, gpu):
        return self.assign(self.name, cpu, gpu)
    
    def unreserve(self):
        return self.release(self.name)
    
    # Assign/release CPU/GPU resources to users, up to the maximum number available
    
    def assign(self, name, cpu, gpu):
        if name in self.assignments:
            return 0, 0, 'User already assigned resources'
        user_cpu = max(min(self.assigned_cpu+cpu, self.num_cpu)-self.assigned_cpu, 0)
        user_gpu = max(min(self.assigned_gpu+gpu, self.num_gpu)-self.assigned_gpu, 0)
        self.assigned_cpu += user_cpu
        self.assigned_gpu += user_gpu
        self.assignments[name] = (user_cpu, user_gpu)
        return user_cpu, user_gpu, 'Resources assigned'
    
    def release(self, name):
        if name in self.assignments:
            if name in self.running:
                return False, 'Jobs running'
            else:
                cpu, gpu = self.assignments[name]
                self.assigned_cpu -= cpu
                self.assigned_gpu -= gpu
                del self.assignments[name]
                return True, 'Resources released'
        else:
            return True, 'No resources assigned'
    
    # Find out how many free CPUs/GPUs there are
    
    def free(self):
        return self.num_cpu-self.assigned_cpu, self.num_gpu-self.assigned_gpu

    # Notify the resource manager that a job is currently running or has ended
    # so that requests to assign/release resources can be blocked if necessary.

    def start(self, name):
        if name not in self.assignments:
            return False, 'No assigned resources'
        if name in self.running:
            return False, 'Job already running'
        self.running.add(name)
        return True, ''
    
    def end(self, name):
        if name not in self.assignments:
            return False, 'No assigned resources'
        if name not in self.running:
            return False, 'No job running'
        self.running.discard(name)
        return True, ''

    # Process network messages (requests for resources, etc.). This might be
    # better done in a more general network manager class.

    def process(self, msg_type, msg_data):
        if msg_type=='name':
            return self.name
        elif msg_type=='status':
            return self.status
        elif msg_type=='resources':
            name = msg_data
            return self.assignments.get(name, (0, 0))
        elif msg_type=='assign':
            name, cpu, gpu = msg_data
            return self.assign(name, cpu, gpu)
        elif msg_type=='release':
            name = msg_data
            return self.release(name)
        elif msg_type=='free':
            return self.free()
        elif msg_type=='assignments':
            return self.assignments

if __name__=='__main__':
    nn = NetworkNode()
    nn.status = 'Nothing much'
    nn.reserve(4, 1)
    
    cpu, gpu = nn.process('free', None)
    print cpu, gpu
    print nn.process('assign', ('bob', cpu, gpu))
    
    print
    print nn.start('bob')
    print nn.process('release', 'bob')
    
    print
    
    print nn.name
    print nn.status
    print nn.num_cpu, nn.num_gpu
    print nn.assigned_cpu, nn.assigned_gpu
    print nn.assignments
    print nn.running
