import Queue
import multiprocessing
from multiprocessing.connection import Listener, Client
import os
import sys
import time
import traceback

## Only needed for win_kill, which is presently not needed.
#if sys.platform == 'win32':
#    import win32api
#    import win32con


## Enabling threads instead of multi-processes via the dummy module on
## single-core systems.#### Enabling the following will require some special-case code for the dummy API,
## which is not 100% compatible with the multiprocessing API. In addition, you
## will have to practice much more thread-safety to use it.
#try:
#    if multiprocessing.cpu_count() == 1:
#        print 'multiproc: looks like a single core: using threading instead'
#        import multiprocessing.dummy as multiprocessing
#except NotImplementedError:
#    print 'multiproc: cannot get core count: assuming single, using threading instead'
#    import multiprocessing.dummy as multiprocessing


TCP_ADDRESS = 'localhost'
TCP_PORT = 20001


#def win_kill(pid):
#    hProc = None
#    try:
#        hProc = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)
#        win32api.TerminateProcess(hProc, 0)
#    except Exception:
#        return False
#    finally:
#        if hProc != None:
#            hProc.Close()
#    return True


"""
Concept: extending beyond two processes.

                           +--------+
+--------+ >-------------> | Child1 | >---+
| Master | <-------------< +--------+ <--+|
|        |                               ||
|        | >-------------> +--------+ >--+|
|        | <-------------< | Child2 | <---+
|        |                 |        |
|        |                 |        | >---+
|        |                 +--------+ >--+|
|        |                               ||
|        | >-------------> +--------+ >--+|
+--------+ <-------------< | Child3 | <---+
                           +--------+

# Runtime and QueueHandler subclasses per process.
class ModelRuntime(multiproc.Runtime):
    pass  # my runtime logic...
class ModelQueueHandler(multiproc.QueueHandler):
    pass  # my message logic...

class ViewRuntime(multiproc.Runtime):
    pass  # my runtime logic...
class ViewQueueHandler(multiproc.QueueHandler):
    pass  # my message logic...

class IORuntime(multiproc.Runtime):
    pass  # my runtime logic...
class IOQueueHandler(multiproc.QueueHandler):
    pass  # my message logic...

# Create the Runtimes.
model = ModelRuntime()
view = ViewRuntime()
io = IORuntime()

# Add the named queue handlers for model and view.
multiproc.add_queue_handler(ModelQueueHandler(model), 'view')
multiproc.add_queue_handler(ViewQueueHandler(view), 'model')
# Add the named queue handlers for io and view.
multiproc.add_queue_handler(ModelQueueHandler(io), 'view')
multiproc.add_queue_handler(IOQueueHandler(view), 'io')

# Connect the named queue handlers.
multiproc.connect_queues(model, view, 'model', 'view')  # two-way data
multiproc.connect_queues(io, view, 'io', 'view')        # two-way data

# Create the Master and fire it all up!
master = multiproc.Master(view, model, io)              # two-way control
master.run()    # start procs and monitor/control loop
"""


class Logger(object):
    """A shared logger class which uses a lock to ensure separation of output
    """
    
    def __init__(self):
        self.lock = multiprocessing.Lock()
    
    def acquire(self, timeout=0):
        """Acquire the lock. Non-blocking. Return True if successful, otherwise
        False.
        
        timeout is the number of seconds to retry before giving up.
        
        Useful if you want to do your own printing, e.g. to print a block of
        text without the other thread weaving text in between. Make sure to call
        Logger.release when finished.
        """
        if self.lock.acquire(timeout):
                return True
        return False
    
    def release(self):
        """Release the lock when done printing.
        """
        self.lock.release()
    
    def log(self, msg, timeout=0, flush=True):
        """Write a single message to stdout, optionally flushing afterwards.
        
        Return True if successful, else False.
        
        Note: this method does its own lock acquisition and release.
        """
        if self.acquire(timeout):
            print msg
            if flush:
                sys.stdout.flush()
            self.release()
            return True
        return False


def add_queue_handler(queue_handler, target_name):
    runtime = queue_handler._runtime
    runtime._queue_handlers[target_name] = queue_handler

def connect_queues(runtime1, runtime2, name1, name2, max_size=20):
    """Connect two Runtimes using Queues.
    
    Runtimes must be connected with Queues before the Master forks them.
    
    runtime1 and runtime2 are the collaborating Runtime objects.
    
    name1 and name2 are the dict keys that identify the respective QueueHandler
    objects.
    
    max_size is the maximum number of objects that can be placed on the queue.
    The queue will block an attempt to put if the queue contains this number of
    objects, until at object is consumed. Beware: making a queue too large or
    unlimited in size (0) can result in a cumulative backlog of messages if the
    consumer process cannot keep up with the producer.
    """
    qh1 = runtime1._queue_handlers[name1]
    qh2 = runtime2._queue_handlers[name2]
    q1 = multiprocessing.Queue(max_size)
    q2 = multiprocessing.Queue(max_size)
    qh1.set_queue(q1, q2)
    qh2.set_queue(q2, q1)
    qh1._get = _getq
    qh2._get = _getq
    qh1._put = _putq
    qh2._put = _putq

def connect_pipes(runtime1, runtime2, name1, name2):
    """Connect two Runtimes using Pipes.
    
    Runtimes must be connected with Pipes before the Master forks them.
    
    Note: Pipes perform significantly better than Queues. Around 30% better.
    However, Pipes don't protect you from producing messages faster than they
    can be consumed, so care must be taken.
    """
    qh1 = runtime1._queue_handlers[name1]
    qh2 = runtime2._queue_handlers[name2]
    q1,q2 = multiprocessing.Pipe()
    qh1._in_queue = q1
    qh1._out_queue = q1
    qh2._in_queue = q2
    qh2._out_queue = q2
    qh1._get = _getp
    qh2._get = _getp
    qh1._put = _putp
    qh2._put = _putp

def start_server(runtime, name, addr, port):
    """Create a listening socket for a Runtime.
    
    The recommended way start the listening socket is in the overriding
    Runtime._post_init of the server process.
    """
    listener = Listener((addr,port), 'AF_INET')
    if listener:
        conn = listener.accept()
        if conn:
            qh = runtime._queue_handlers[name]
            qh._in_queue = conn
            qh._out_queue = conn
            qh._get = _getp
            qh._put = _putp
    return listener

def start_client(runtime, name, addr, port):
    """Create a client socket for a Runtime.
    
    The recommended way start the client socket is in the overriding
    Runtime._post_init of the client process.
    """
    for i in xrange(15):
        conn = Client((addr,port), 'AF_INET')
        if conn:
            break
        time.sleep(1)
    if conn:
        qh = runtime._queue_handlers[name]
        qh._in_queue = conn
        qh._out_queue = conn
        qh._get = _getp
        qh._put = _putp
        return True
    else:
        return False

class Runtime(object):
    """A multi-process compatible class which provides bi-directional
    communication via multiprocessing.Queues. Subclass this class to proved two
    classes: a model and a view.
    
    NOTE: Instances of this class must be picklable.
    
    NOTE: Do not call quit(), sys.exit(), or any of the many Python terminating
    functions or methods. The *only* proper way to exit the run loop is to set
    the class's "running" attribute to False. Also, if you kill one of the
    processes with an external command, the results are undetermined (but
    probably not what you want).
    """
    
    def __init__(self):
        self._logger = None
        self._queue_handlers = {}
        
        self._log_buffer = []
    
    def _run(self):
        """The run loop. Do not call this explicitly. This is called
        automatically by the process setup code in multiproc.run.
        
        It is important to exit the class gracefully. The *only* proper way to
        do this is to set the class's "running" attribute to False.
        """
        try:
            self._post_init()
            self.running = True
            while self.running:
                self.update()
                for qh in self._queue_handlers.values():
                    qh._update()
        except:
            mylock = self.log_acquire(1)
            print '----', self.__class__.__name__, 'traceback ----'
            traceback.print_exc()
            if mylock:
                self.log_release()
        self.cleanup()
        self.log('{0}: exiting'.format(self.__class__.__name__))
        # In case sockets are used, keep the socket open an extra second as a
        # courtesy to the other process, which would have to handle an IOError
        # if the socket is closed in the midst of consuming messages.
        time.sleep(1)
    
    def _post_init(self):
        """Subclass may override this to add initialization which should occur
        after the process is forked.
        """
        pass
    
    def update(self):
        """Called by this class's run loop, subclass should override this to
        drive the process's logic.
        """
        pass
    
    def cleanup(self):
        """Called by this class's run loop when it is finished running. Subclass
        should override this to perform process closure.
        """
        pass
    
    def put(self, name, obj):
        self._queue_handlers[name].put(obj)
    
    def log(self, msg, timeout=0, flush=True):
        """Print nicely to stdout. The internal use of a lock avoids mixing
        output from the multiple processes.
        """
        self._log_buffer.append(msg)
        if self.log_acquire():
            for msg in self._log_buffer:
                print msg
            del self._log_buffer[:]
            if flush:
                sys.stdout.flush()
            self.log_release()
    
    def log_acquire(self, timeout=0):
        if self._logger.acquire(timeout):
            return True
        return False
    
    def log_release(self):
        self._logger.release()


def _getq(self):
    """Poll for a message on the interprocess queue.
    """
    if not self._in_queue:
        return None
    try:
        obj = self._in_queue.get(False)
        if obj:
            self.msgs_in += 1
        return obj
    except Queue.Empty:
        pass
def _putq(self, obj):
        self._out_queue.put(obj)
def _getp(self):
    """Poll for a message on the interprocess pipe or socket.
    """
    try:
        if self._in_queue.poll():
            obj = self._in_queue.recv()
            if obj:
                self.msgs_in += 1
            return obj
    except IOError:
        self._runtime.running = False
def _putp(self, obj):
    try:
        self._out_queue.send(obj)
    except IOError:
        self._runtime.running = False

class QueueHandler(object):
    """QueueHandler parses incoming messages and hands off to the matching
    handler or the default method.
    """
    
    _sequences = tuple,list
    
    def __init__(self, runtime):
        self._runtime = runtime
        self._in_queue = None
        self._out_queue = None
        self.msgs_in = 0
        self._get = None
        self._put = None
    
    @property
    def runtime(self):
        return self._runtime
    
    def set_queue(self, in_queue, out_queue):
        self._in_queue = in_queue
        self._out_queue = out_queue
    
    def _update(self):
        obj = self.get()
        while obj:
            self.interpret(obj)
            obj = self.get()
    
    def _clear(self):
        if self._in_queue:
            obj = self.get()
            while obj:
                obj = self.get()
    
    def get(self):
        """Poll for a message on the interprocess queue.
        """
        return self._get(self)
    
    def put(self, obj):
        self._put(self, obj)
    
    def interpret(self, obj):
        """Interpret an object.
        
        Objects can be in two forms: a string representing a command or event,
        and a tuple or list containing a command_string,data pair.
        
        The command_string is prepended with 'msg_'. If the MsgHandler instance
        has the attribute it is called, with or without data as the case may be.
        
        If command_string is not an attribute then the original string is passed
        to MsgHandler.msg_default for default handling.
        """
        if isinstance(obj, self._sequences):
            if len(obj) == 2:
                handler = 'msg_' + obj[0]
                data = obj[1]
                if hasattr(self, handler):
                    getattr(self, handler)(data)
                else:
                    self.msg_default(obj)
            else:
                self.msg_default(obj)
        else:
            handler = 'msg_' + obj
            if hasattr(self, handler):
                getattr(self, handler)()
            else:
                self.msg_default(obj)
    
    def msg_default(self, obj):
        """Handle messages that do not have a defined handler.
        """
        pass
    
    def msg_EXIT(self):
        """Trigger the termination of the runtime's run loop.
        """
        self.put(('MASTER', 'EXIT'))
        self._runtime.log('{0}: EXIT received'.format(self._runtime.__class__.__name__))
        self._runtime.running = False


def _set_logger(runtime, logger):
    runtime._logger = logger

class _MasterQueueHandler(QueueHandler):
    def msg_EXIT(self):
        self._runtime.running = False

def _make_proc(master, runtime, logger):
    add_queue_handler(_MasterQueueHandler(master), runtime)
    add_queue_handler(QueueHandler(runtime), 'MASTER')
    connect_pipes(master, runtime, runtime, 'MASTER')
    _set_logger(runtime, master._logger)
    p = multiprocessing.Process(target=runtime._run)
    p.daemon = True
    return p

class Master(object):
    
    def __init__(self, *runtimes, **kwargs):
        """Create a master process which subprocesses an arbitrary number of
        Runtime objects.
        
        runtimes is a varargs list of Runtime objects.
        
        kwargs:
            sleep=n     Where n is a float representing seconds that the master
                        loop should sleep between polling children for status.
        """
        self._sleep = kwargs.get('sleep', 0.0)
        self._procs = {}
        self._runtimes = {}
        self._queue_handlers = {}
        self._logger = Logger()
        
        for runtime in runtimes:
            p = _make_proc(self, runtime, self._logger)
            self._procs[runtime] = p
            self._runtimes[p] = runtime
    
    def run(self):
        self.running = True
        for p in self._runtimes:
            p.start()
            self._logger.log('Master: started ' + str(p.pid))
        while self.running:
            if self._sleep:
                time.sleep(self._sleep)
            self.update()
        self.cleanup()
        self._logger.log('Master: exiting')
    
    def update(self):
        if not self.all_alive():
            self.running = False
    
    def cleanup(self):
        """Override this if you don't want to kill processes.
        """
        # Note: Queues block when full. If the child process is blocked on
        # putting to a full Queue, we need to clear the Queue so that it can
        # continue processing and eventually get the EXIT message.
        alive = self.runtimes_alive()
        while alive:
            for r in self._procs:
                for qh in r._queue_handlers.values():
                    qh._clear()
                self._queue_handlers[r].put('EXIT')
            time.sleep(0.01)
            alive = self.runtimes_alive()
#        time.sleep(5)
#        for p in self._runtimes:
#            self.kill_proc(p)
    
    def runtimes_alive(self):
        return [r for r,p in self._procs.items() if p.is_alive()]
    
    def all_alive(self):
        for p in self._runtimes:
            if not p.is_alive():
                return False
        return True
    
#    def kill_proc(self, proc, sig=1):
#        """Commonly: HUP=1, INT=2, QUIT=3, TERM=15. Avoid KILL=9.
#        
#        sig does not apply to Windows.
#        """
#        if proc.is_alive():
#            self._logger.log('Master: killing pid ' + str(proc.pid))
#            if sys.platform == 'win32':
#                win_kill(proc.pid)
#            else:
#                os.kill(proc.pid, sig)
