#!/usr/bin/env python
"""
Simplified data pipeline code for AMQP.

Main feature is that it allows a single process to run many workers,
by using a simple event loop with a sleep statement.
"""
__rcsid__ = "$Id$"

# system
import logging
import os
import random
import signal
import socket
import time
# third-party
from amqplib import client_0_8 as amqp

g_log = logging.getLogger("dpipelib")
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter("%(levelname)-6s %(name)s.%(funcName)-20s | %(message)s"))
_h.setLevel(logging.NOTSET)
g_log.addHandler(_h)

DURABLE_OPTS = { 'durable':True, 'auto_delete':False }

def merge_dict(d1, d2):
    d3 = d1.copy()
    d3.update(d2)
    return d3

class Exchanges(object):
    """Exchange names and options."""
    CONTROL = 'master'
    CONTROL_OPTS = merge_dict({'type' : 'direct'}, DURABLE_OPTS)
    WORK = 'work'
    WORK_OPTS = merge_dict({'type' : 'direct'}, DURABLE_OPTS)
    
class Queues(object):
    """Queue names and options."""
    WAIT = "waiting"
    WAIT_OPTS = DURABLE_OPTS
    WORK = "S{stage}"
    WORK_OPTS = DURABLE_OPTS
    CONSUMER = "consumer"
    CONSUMER_OPTS = DURABLE_OPTS

# Identifier of special stop msg
MSG_STOP = "X"

# Message fill character, should not occur in log part
FILL_CHR = '@'

# Get 'run' number from environment
g_run = os.environ.get('DPIPE_RUN', '1')

def amqp_connect(**kw):
    """Connect and return the pair (conn, channel)"""
    conn = amqp.Connection(**kw)
    channel = conn.channel()
    return (conn, channel)

def work_log(src, proc, msg, stage, startend, ts):
    """Format and return a CSV entry for start or end of work."""
    return "{run},{src},{proc},{msg},{stage:d},{se},{ts:f}\n".format(
        run=g_run, src=src, proc=proc, msg=msg, stage=stage, se=startend, ts=ts)

def msg_fill(buf, sz):
    """Fill in up to sz bytes."""
    return buf + FILL_CHR * (sz - len(buf))

def msg_data(buf):
    """Return part before fill."""
    return buf[:buf.find(FILL_CHR)]

class NoMessages(Exception):
    """Raised on timeout with no messages."""
    pass

class Component:
    """Component superclass.
    """
    def __init__(self):
        pass
    
    def shutdown(self):
        pass

    def run(self):
        pass
    
class Master(Component):
    """The master waits for everyone to be ready, queues up all the work, and exits.
    """
    def __init__(self, conn_kw, num_consumers=1, num_workers=1, num_tasks=1,
                 num_procs=1, message_size=1024):
        """Create master."""
        Component.__init__(self)
        self._sz = message_size        
        self._channel = amqp_connect(**conn_kw)[1]
        self._nc, self._nw, self._nt, self._np = num_consumers, num_workers, num_tasks, num_procs
        self._count = 0
        
    def run(self):
        """Run master until done."""
        # init exchanges and queues
        self._channel.exchange_declare(Exchanges.CONTROL, **Exchanges.CONTROL_OPTS)
        self._channel.queue_declare(Queues.WAIT, **Queues.WAIT_OPTS)
        self._channel.queue_bind(queue=Queues.WAIT, exchange=Exchanges.CONTROL,
                                 routing_key=Queues.WAIT)
        self._channel.exchange_declare(Exchanges.WORK, **Exchanges.WORK_OPTS)
        stage1 = Queues.WORK.format(stage=1)
        self._channel.queue_declare(stage1, **Queues.WORK_OPTS)
        self._channel.queue_bind(queue=stage1, exchange=Exchanges.WORK,
                                 routing_key=stage1)
        # wait for workers and consumer to check in
        tag = self._channel.basic_consume(queue=Queues.WAIT, callback=self._arr,
                                          no_ack=False)
        waitfor = self._nc + self._nw * self._np
        g_log.info("waiting for {0:d} checkins".format(waitfor))
        while self._count < waitfor:
            self._channel.wait()
            g_log.debug("got checkin. count={0:d} remain={0:d}".
                        format(self._count, waitfor - self._count))
        self._channel.basic_cancel(tag)
        # put all work on first queue
        total_tasks = self._nw * self._np * self._nt
        for i in xrange(total_tasks):
            ts = time.time()
            hdr = { 'stage' : 1 }
            m_id = str(i)
            g_log.debug("add msg {0} to stage1".format(m_id))
            body = msg_fill(work_log('M', 0, m_id, 0, 'B', ts) +
                            work_log('M', 0, m_id, 0, 'E', ts),
                            self._sz)
            msg = amqp.Message(body=body, application_headers=hdr,
                               message_id=m_id, delivery_mode=2)
            self._channel.basic_publish(msg, exchange=Exchanges.WORK,
                                        routing_key=stage1)
        self.shutdown()

    def shutdown(self):
        g_log.info("producer shutdown")

    def _arr(self, msg):
        """Message arrived callback."""
        g_log.debug("one check-in")
        self._count += 1
        self._channel.basic_ack(msg.delivery_tag)

class Workers(Component):
    """Each instance of this class has some number of 'worker' processes.
    Each of these have their own channel (connection) and wait for
    messages from any one of the pipeline stage queues in the work exchange.
    When any one of the processes receives a special 'stop' message,
    they all exit.

    This is pseudo-concurrency. Each 'process' is just an entry in a list,
    so when they are all 'ready' for data they will sequentially
    pull off messages as fast as they can.
    """
    MAX_UNCOMMITTED = 10 # max num of uncommitted messages, per process
    
    def __init__(self, conn_kw, num_procs=10, num_stages=1,
                 sleep_time=1.0, sleep_sigma=0, tx=False, message_size=1024):
        """Create num_proc workers."""
        Component.__init__(self)
        
        self._node = os.environ.get('DPIPE_NODE',
                                    "{0}_{1:d}".format(
                                        socket.getfqdn(), os.getpid()))
        self._np, self._ns = num_procs, num_stages
        self._sz = message_size
        self._mchannel = amqp_connect(**conn_kw)[1]
        self._wchannels = [amqp_connect(**conn_kw)[1]
                           for _ in xrange(self._np)]
        self._wmsg = [[] for _ in xrange(self._np)]
        self._wtags = [[] for _ in xrange(self._np)]
        self._sleep_sec = [random.gauss(sleep_time, sleep_sigma) for _ in xrange(self._np)]
        self._pause_sec = 0.1 # simulated concurrency, pause << work_time
        self._tx = tx
        if self._tx:
            self._uncommitted = [0] * self._np # uncom. messages, per channel
        self._sleep_start = [0] * self._np # ts when sleeping started, 0=not
        self._msg_seen = { } # track message id + stage, to reject dups
        self._done = False
        self._cur = None
        # add timeout to avoid hangs when queue is empty
        signal.signal(signal.SIGALRM, self._interrupt)
        
    def run(self):
        """Run workers until done."""
        # init exchanges and queues
        self._mchannel.exchange_declare(Exchanges.CONTROL,
                                        **Exchanges.CONTROL_OPTS)
        self._mchannel.queue_declare(Queues.WAIT, **Queues.WAIT_OPTS)
        self._mchannel.queue_bind(queue=Queues.WAIT,
                                  exchange=Exchanges.CONTROL,
                                  routing_key=Queues.WAIT)
        for p in xrange(self._np):
            channel = self._wchannels[p]
            if self._tx:
                channel.tx_select()
            channel.exchange_declare(Exchanges.WORK, **Exchanges.WORK_OPTS)
            for s in range(1,self._ns+1):
                stage = Queues.WORK.format(stage=s)
                channel.queue_declare(stage, **Queues.WORK_OPTS)
                channel.queue_bind(queue=stage, exchange=Exchanges.WORK,
                                   routing_key=stage)
                self._wtags[p].append(channel.basic_consume(
                    queue=stage, callback=self._arrived, no_ack=False))
            channel.queue_declare(Queues.CONSUMER, **Queues.CONSUMER_OPTS)
            channel.queue_bind(queue=Queues.CONSUMER,
                               exchange=Exchanges.WORK,
                               routing_key=Queues.CONSUMER)
        # check in with master
        g_log.info("begin: check in with master. n={0}".format(self._np))
        for p in xrange(self._np):
            self._mchannel.basic_publish(amqp.Message(body=""),
                                         exchange=Exchanges.CONTROL,
                                         routing_key=Queues.WAIT)
        g_log.info("end: check in with master. n={0}".format(self._np))
        # main loop: send/recv messages until stopped
        g_log.info("begin: main loop")
        while not self._done:
            g_log.debug("begin: main loop iter")
            # send messages
            self._send_messages()
            # get messages
            had_msg = self._recv_messages()
            g_log.debug("loop. had_msg={0}".format(had_msg))
            if had_msg:
                time.sleep(self._pause_sec)
            else:
                self._flush_pending()
            g_log.debug("end: main loop iter")
        g_log.info("end: main loop")
        self.shutdown()
        g_log.info("shutdown complete")

    def _recv_messages(self):
        """Receive messages from broker."""
        had_msg = True
        for p in xrange(self._np):
            if self._sleep_start[p] == 0:
                had_msg = self._recv_one(p)
                if had_msg:
                    self._sleep_start[p] = time.time()
                else:
                    g_log.info("got stop signal, or interrupt")
                    break
        return had_msg

    def _send_messages(self, flush=False):
        """Send messages to broker."""
        busy_ct, finished_ct, idle_ct = 0, 0, 0 # dbg counters
        for p in xrange(self._np):
            if self._wmsg[p]:
                # working on a message, check timer (or flush)
                if  flush or \
                       (time.time() - self._sleep_start[p] >=
                        self._sleep_sec[p]):
                    # done with work
                    self._finished(p, self._wchannels[p], *self._wmsg[p])
                    self._sleep_start[p], self._wmsg[p] = 0, None
                    finished_ct += 1
                else:
                    busy_ct += 1
            else:
                idle_ct += 1
        if g_log.isEnabledFor(logging.DEBUG):
            g_log.debug("end send_message loop. " +
                        "tot={tot} busy={busy} fin={fin} idle={idle}".
                        format(tot=self._np, busy=busy_ct, fin=finished_ct,
                               idle=idle_ct))

    def shutdown(self):
        """Shutdown."""
        g_log.info("worker shutdown")
        self._send_messages(True)
        for p in xrange(self._np):
            for tag in self._wtags[p]:
                channel = self._wchannels[p]
                self._commit(p, force=True)
                channel.basic_cancel(tag)
        self._done = True

    def _interrupt(self, signo, frame):
        raise NoMessages("no messages")

    def _recv_one(self, p):
        """Receive one message from broker, with a timeout.
        Return False if _done flag is set or timed out.
        """
        if self._done:
            return False
        self._cur = p
        # wait the same amount as the loop time
        interrupted = False
        try:
            signal.alarm(1)
            self._wchannels[p].wait()
            signal.alarm(0)
        except NoMessages, err:
            interrupted = True
            g_log.warn("interrupted. reason='{msg}'".format(msg=err))
        return (not interrupted) and (not self._done)

    def _arrived(self, msg):
        """Message arrived callback.
        Set sleep start-time and save info for when it expires.
        """
        if msg.properties['message_id'] == MSG_STOP:
            g_log.info("worker {0:d} got STOP message".format(self._cur))
            self._wchannels[self._cur].basic_ack(msg.delivery_tag)
            self._done = True
        else:
            ts = time.time()
            key = self._mkey(msg)
            if g_log.isEnabledFor(logging.DEBUG):
                g_log.debug("msg rcved. tag={tag} key={key}".format(
                    tag=str(msg.delivery_tag), key=key))
            if self._msg_seen.has_key(key):
                g_log.warn("{cur:d} got already seen msg {key}".format(
                    cur=self._cur, key=key))
                # ack it
                self._wchannels[self._cur].basic_ack(msg.delivery_tag)
                # by not setting self._wmsg, we do nothing with this message
            else:
                g_log.debug("worker {cur:d} process msg {key}".format(
                    cur=self._cur, key=key))
                self._wmsg[self._cur] = (ts, msg) # save ts and message

    def _finished(self, p, channel, ts1, msg):
        """Done with message, move it to next stage.
        """
        channel.basic_ack(msg.delivery_tag)
        if g_log.isEnabledFor(logging.DEBUG):
            key = self._mkey(msg)
            g_log.debug("msg acked. tag={tag} key={key}".format(
                tag=str(msg.delivery_tag), key=key))
        m_id = msg.properties['message_id']
        stage = msg.properties['application_headers']['stage']
        # mark this message as seen
        self._msg_seen[self._mkey(msg)] = True
        ts2 = time.time()
        body = msg_fill(msg_data(msg.body) +
                        work_log(self._node, p, m_id, stage, 'B', ts1) +
                        work_log(self._node, p, m_id, stage, 'E', ts2),
                        self._sz)
        if stage == self._ns:
            # publish to consumer queue
            msg = amqp.Message(body=body, message_id=m_id, delivery_mode=2)
            channel.basic_publish(msg, exchange=Exchanges.WORK,
                                  routing_key=Queues.CONSUMER)
        else:
            # publish to next worker queue
            stage += 1
            hdr = { 'stage' : stage }
            msg = amqp.Message(body=body, application_headers=hdr,
                               message_id=m_id, delivery_mode=2)
            stage_key = Queues.WORK.format(stage=stage)
            channel.basic_publish(msg, exchange=Exchanges.WORK, routing_key=stage_key)
        self._commit(p)

    def _commit(self, p, force=True):
        """Commit outstanding transactions, with some batching.

        Has no effect if self._tx is False.

        Args:
          p - process
          force - ignore batching, commit NOW
        """
        if self._tx:
            self._uncommitted[p] += 1
            if force or (self._uncommitted[p] > self.MAX_UNCOMMITTED):
                self._wchannels[p].tx_commit()
                self._uncommitted[p] = 0

    def _flush_pending(self):
        """Flush all pending (finished, but not committed) messages.
        """
        if g_log.isEnabledFor(logging.DEBUG):
            g_log.debug("begin flush pending")
        for p in xrange(self._np):
            self._commit(p, force=True)
        if g_log.isEnabledFor(logging.DEBUG):
            g_log.debug("end flush pending")

    def _mkey(self, msg):
        """Unique key for a given message at a given stage."""
        return '#'.join((msg.properties['message_id'],
                         str(msg.properties['application_headers']['stage'])))

class Consumer(Component):
    """The consumer will wait for num_workers * num_tasks messages, writing
    the body (stats) of each to a file as it arrives, then shut down the workers
    by sending each one a special 'stop' message.
    """
    def __init__(self, conn_kw, ofile=None, num_workers=1, num_tasks=1, num_stages=1,
                 num_procs=1):
        """Create consumer."""
        Component.__init__(self)
        self._nw, self._nt, self._np = num_workers, num_tasks, num_procs
        self._last_stage = num_stages+1
        self._channel = amqp_connect(**conn_kw)[1]
        self._tag = None
        if hasattr(ofile, 'write'):
            self._ofile = ofile
        elif isinstance(ofile,str):
            self._ofile = open(ofile, 'w')
        else:
            raise ValueError("Bad parameter 'ofile', must be a string " +
                             "or have a write() method")
        # header row
        self._ofile.write("run,wid,pid,mid,stage,se,ts\n")

    def run(self):
        """Run consumer until done."""
        # init exchanges and queues
        self._channel.exchange_declare(Exchanges.CONTROL, **Exchanges.CONTROL_OPTS)
        self._channel.queue_declare(Queues.WAIT, **Queues.WAIT_OPTS)
        self._channel.queue_bind(queue=Queues.WAIT, exchange=Exchanges.CONTROL,
                                  routing_key=Queues.WAIT)
        self._channel.exchange_declare(Exchanges.WORK, **Exchanges.WORK_OPTS)
        self._channel.queue_declare(Queues.CONSUMER, **Queues.WORK_OPTS)
        self._channel.queue_bind(queue=Queues.CONSUMER, exchange=Exchanges.WORK,
                                 routing_key=Queues.CONSUMER)
        # check in with master
        self._channel.basic_publish(amqp.Message(body=""), exchange=Exchanges.CONTROL,
                                    routing_key=Queues.WAIT)
        # main loop
        self._tag = self._channel.basic_consume(queue=Queues.CONSUMER, callback=self._arrived,
                                                no_ack=False)
        total_tasks = self._nw * self._nt * self._np
        i = 0
        while i < total_tasks:
            self._channel.wait()
            g_log.debug("got {0} messages".format(i+1))
            i += 1
        g_log.info("done. got {0} messages".format(i+1))
        self.shutdown()

    def shutdown(self):
        g_log.info("consumer shutdown")
        # stop listening
        self._channel.basic_cancel(self._tag)
        # stop all workers by sending a 'stop' message to stage1
        msg = amqp.Message(body="", message_id=MSG_STOP, delivery_mode=2)
        stage1 = Queues.WORK.format(stage=1)
        for i in xrange(self._nw):
            self._channel.basic_publish(msg, exchange=Exchanges.WORK,
                                        routing_key=stage1)

    def _arrived(self, msg):
        """Message arrived callback."""
        ts1 = time.time()
        self._ofile.write(msg_data(msg.body))
        self._channel.basic_ack(msg.delivery_tag)
        ts2 = time.time()
        m_id = msg.properties['message_id']
        self._ofile.write(work_log('C', 0, m_id, self._last_stage, 'B', ts1) +
                          work_log('C', 0, m_id, self._last_stage, 'E', ts2))
        self._ofile.flush()
