# pymq - The python based Message Queue
#
# Copyright(C) 2010, Dhruv Matani(dhuvbird@gmail.com)
#
# pymq is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# pymq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pymq. If not, see
# <http://www.gnu.org/licenses/>.
#


from queue.http_evented import http_evented, call_if_not_none_and_callable
from identity_deque import identity_deque
from medusa import asyncore
import functools
import re
import urllib
import logging
from queue.master_handler import patNodeInfo


STATE_CONNECTING   = 1
STATE_CONNECTED    = 2
STATE_DISCONNECTED = 3


def _give_up():
    print "Giving up"


class OperationNotSupportedError(StandardError):
    """
    Thrown when an operation is not supported
    """
    pass


class Response(object):
    def __init__(self, status, headers, body):
        self.status = status
        self.headers = headers
        self.body = body

    def getheader(self, header):
        if header in self.headers:
            return self.headers[header]
        else:
            return None

    def getbody(self):
        return self.body

class Request(object):
    """
    This is the Request object which is synthesized when a request needs to
    be made to the pymq server
    """
    method = { "consumeMessage": "GET",
               "produceMessage": "POST",
               "loadQueue": "POST",
               "dumpQueue": "POST",
               "addQueue": "POST",
               "dropQueue": "POST",
               "addDB": "POST",
               "setState": "POST",
    }
    def __init__(self, event, url, requestID,
                 queueName = None, dbName = None,
                 postData = None, ackMessage = None,
                 autoACK = False,
                 callback = None, errback = None):
        """
        If queueName is specified, that will be assumed to be the
        target of this request, else dbName will be the target
        """
        self.event      = event
        self.url        = url
        self.queueName  = queueName
        self.dbName     = dbName
        self.postData   = postData
        self.requestID  = requestID
        self.ackMessage = ackMessage
        self.autoACK    = autoACK
        self.callback   = callback
        self.errback    = errback
        if queueName is not None:
            self.target = queueName
        else:
            self.target = dbName


class single_node_handler_metadata(object):
    def __init__(self, name, handler):
        self._update(name, handler)

    def _update(self, name, handler):
        self.name = name
        self.handler = handler

    def copy(self, rhs):
        assert(type(self) == type(rhs))
        if self != rhs:
            for k,v in rhs.__dict__.items():
                self.__dict__[k] = v


class SingleNodeHandler(http_evented):
    """
    This class just handles requests to and responses from a
    single pymq instance
    """
    def __init__(self, host_and_port, owner,
                 onNodeConnected,
                 onConnectionClose,
                 onConnectionException):
        self.owner             = owner
        self.requestsToSend    = []
        self.sentRequests      = { }
        self._onNodeConnected       = onNodeConnected
        self._onConnectionClose     = onConnectionClose
        self._onConnectionException = onConnectionException
        self.numResponses = 0
        self.numRequests  = 0

        """
        The set of queue names handled by that node that this
        instance of the client is consuming messages from.

        The value indicates if they are in autoACK mode.
        """
        self.consumingQueues = { }

        http_evented.__init__(self, host_and_port,
                              onConnected = self.on_connected,
                              onClose     = self.on_close,
                              onException = self.on_exception)

        # !! This is a 100% PURE HACK !!
        # Always make sure that you override this AFTER constructing
        # the original object
        self._eventHandlers = identity_deque(self.on_response_received)

    """
    During reconnection, you may change the host/port of the
    end point if the system is running in clustered mode
    """

    def on_response_received(self, response):
        """
        Called when a response from the server is received
        """
        self.numResponses += 1
        logging.debug("on_response_received(%s)" % str(response))
        logging.debug("numResponses: %d" % self.numResponses)

        if response == None:
            # Don't do anything. The failure handler will be called
            # for these failed requests. This is because None is
            # returned only when there is a disconnection or other
            # irrecoverable failure
            return

        self.invokeEventHandler(response)
        # Also send any requests that the client may have generated
        self.sendRequestsToNode()
        self.owner.checkIdle()

    def on_connected(self):
        call_if_not_none_and_callable(self._onNodeConnected)
        self.owner.onEvent("connected")
        self.sendRequestsToNode()
        self.owner.checkIdle()


    def on_close(self):
        # Send a failure response for all pending and sent requests
        self._fail_all_queued_requests()
        call_if_not_none_and_callable(self._onConnectionClose)


    def on_exception(self):
        self._fail_all_queued_requests()
        call_if_not_none_and_callable(self._onConnectionException)


    def _fail_all_queued_requests(self):
        requestsToFail = self.requestsToSend
        requestsToFail.extend(self.sentRequests.values())
        for rtf in requestsToFail:
            response = Response(503, { "X-Sequence": rtf.requestID }, "")
            self.invokeEventHandler(response)
            # rtf.errback(response = response)
        self.requestsToSend =  []
        self.sentRequests.clear()


    def sendRequests(self):
        self.sendRequestsToNode()


    def sendRequestsToNode(self):
        """
        This method reads off all the requests in 'requestsToSend' and
        appropriately batches and sends them off to the node for further
        processing
        """
        # queueRequests = { "queueName": { "message": [ ], "ack": [ ] } }
        queueRequests = { }
        otherRequests = [ ]

        try:
            for r in self.requestsToSend:
                if r.event == "handleQueue":
                    response = Response(200, { "X-Sequence": str(r.requestID) }, "")
                    self.sentRequests[r.requestID] = r
                    self.invokeEventHandler(response)
                elif r.event == "ackMessage":
                    thisQueue = queueRequests.setdefault(r.queueName, {"message": [], "ack": []})
                    thisQueue["ack"].append(r)
                elif r.event in ["produceMessage", "consumeMessage"]:
                    thisQueue = queueRequests.setdefault(r.queueName, {"message": [], "ack": []})
                    thisQueue["message"].append(r)
                else:
                    otherRequests.append(r)

            for queueName in queueRequests:
                qReq = queueRequests[queueName]
                i = 0
                mReq = qReq["message"]
                for ackReq in qReq["ack"]:
                    if i < len(mReq):
                        mReq[i].ackMessage = ackReq.ackMessage
                        i += 1
                    else:
                        break
                qReq["ack"] = qReq["ack"][i:]
                otherRequests.extend(mReq)
                otherRequests.extend(qReq["ack"])

            requestsSent = 0
            for r in otherRequests:
                if r.event == "handleQueue":
                    continue
                headers = { }
                if r.ackMessage is not None:
                    headers["X-ACKMessage"] = r.ackMessage
                if r.autoACK == True:
                    headers['X-AutoACK'] = str(1)
                headers["X-Sequence"] = str(r.requestID)
                self.push_HTTP_request(Request.method[r.event], r.url, r.postData, headers)
                self.sentRequests[r.requestID] = r
                logging.debug("sendRequestsToNode: sending [event: %s] [url: %s] [headers: %s]" % \
                    (r.event, r.url, str(headers)))
                requestsSent += 1
                self.numRequests += 1

            if len(self.sentRequests) > 0:
                # We check len(self.sentRequests) because even if nothing was sent,
                # we still want to receive responses to requests we have made
                # earlier
                self.pop_response()

        except Exception, e:
            print "Exception (%s)" % str(e)
            print asyncore.compact_traceback()
        finally:
            self.requestsToSend = []
            logging.debug("sendRequestsToNode: numRequests: %d" % self.numRequests)

    def invokeEventHandler(self, response):
        try:
            type = "success"
            if response.status != 200:
                type = "failure"
            sequence = int(response.getheader("X-Sequence"))
            if not sequence:
                return

            if sequence in self.sentRequests:
                request = self.sentRequests[sequence]
                del self.sentRequests[sequence]
            else:
                # Try to search for it in self.requestsToSend
                request = filter(lambda x: x.requestID == sequence, self.requestsToSend)
                if request:
                    request = request[0]
                    self.requestsToSend = \
                        filter(lambda x: x.requestID != sequence, self.requestsToSend)
                else:
                    logging.debug(("invokeEventHandler: Could not find request ID: " +
                                  "%d in pending or sent requests. " +
                                  "This is a serious error") % sequence)
                    return

            if request.event in ["ackMessage"]:
                return

            # Invoke at-site event handlers
            try:
                cb = request.callback
                if response.status != 200:
                    cb = request.errback
                call_if_not_none_and_callable(cb, response=response)
            except Exception, e:
                print "Exception(%s) running at-site event handler" % str(e)
                print asyncore.compact_traceback()

            event = "%s/%s/%s" % (type, request.event, request.target)
            if request.event == "consumeMessage" and request.queueName in self.consumingQueues:
                # Re-send a consumeMessage request
                self.consumeMessage(queueName=request.queueName,
                                    autoACK=self.consumingQueues[request.queueName],
                                    callback=request.callback,
                                    errback=request.errback,
                                    makeCheck=False)
            self.owner.onEvent(event,
                               requestID=sequence,
                               response=response)
        except Exception, e:
            print "Exception (%s)" % str(e)
            print asyncore.compact_traceback()

    # Note: We are actually only interested in knowing if there are any pending
    # responses
    def hasPendingResponses(self):
        return self._connection_state == STATE_CONNECTING \
            or len(self.sentRequests) > 0


    def handleQueue(self, queueName,
                    callback=None, errback=None):
        """
        This function is called to tell the Node Handler that we are interested in
        interacting with the queue 'queueName'
        """
        request = Request("handleQueue", url = "",
                          queueName = queueName,
                          requestID = self.owner.nextRequestID(),
                          callback=callback, errback=errback)
        self.requestsToSend.append(request)
        return request.requestID


    def produceMessage(self, queueName, message,
                       callback=None, errback=None):
        """
        Produce a message 'message' and put it into the queue 'queueName'
        """
        request = Request("produceMessage",
                          url = "/queue/%s/produceMessage/" % queueName,
                          queueName = queueName,
                          postData = message,
                          requestID = self.owner.nextRequestID(),
                          callback = callback,
                          errback = errback)
        self.requestsToSend.append(request)
        return request.requestID


    def consumeMessage(self, queueName, autoACK=False,
                       callback=None, errback=None, makeCheck=True):
        """
        This call actually registers a request at the server telling it that
        we are interested in consuming a message from this queue
        """
        if makeCheck:
            assert(queueName not in self.consumingQueues)

        request = Request("consumeMessage",
                          url = "/queue/%s/consumeMessage/" % queueName,
                          queueName = queueName,
                          requestID = self.owner.nextRequestID(),
                          autoACK = autoACK,
                          callback = callback,
                          errback = errback)
        self.requestsToSend.append(request)
        return request.requestID


    def consumeMessages(self, queueName, enabled=True, autoACK=False,
                        callback=None, errback=None):
        """
        This call just turns ON/OFF message consumption for this queue handler.

        If enabled is set to True then the onConsume handlers will be invoked
        on receipt of a message. enabled=False will disable message consumption.

        If there is currently a pending request and hence a message that is to
        be delivered, then the cancellation will take place after that message
        is received.

        Setting autoACK to True acknowledges the message as soon as
        it is removed from the queue. You don't need to explicitly ACK it.
        """
        assert(type(enabled) == type(True))
        assert(type(autoACK) == type(True))

        print "consumeMessages([queue=%s], [enabled=%s], [autoACK=%s])" % (queueName, str(enabled), str(autoACK))
        if enabled:
            self.consumingQueues[queueName] = autoACK
            # We just unconditionally send a consume message for now
            return self.consumeMessage(queueName=queueName,
                                       autoACK=autoACK,
                                       callback=callback,
                                       errback=errback,
                                       makeCheck=False)
        else:
            if queueName in self.consumingQueues:
                del self.consumingQueues[queueName]
            return 0


    def ackMessage(self, queueName, messageID):
        request = Request("ackMessage",
                          url = "/queue/%s/none/" % queueName,
                          queueName = queueName,
                          ackMessage = str(messageID),
                          requestID = self.owner.nextRequestID())
        self.requestsToSend.append(request)


    def loadQueue(self, queueName, fileName,
                  callback=None, errback=None):
        """
        Load the file 'fileName' into the queue 'queueName'. This file should
        have been created (dumped) by the same pymq node that requested the load
        OR it could have been copied to this node after being generated elsewhere
        """
        request = Request("loadQueue",
                          url = "/_ah/%s/loadQueue/" % queueName,
                          queueName = queueName,
                          requestID = self.owner.nextRequestID(),
                          callback = callback,
                          errback = errback)
        request.postData = urllib.urlencode({ "fileName": fileName })
        self.requestsToSend.append(request)
        return request.requestID


    def dumpQueue(self, queueName, fileName,
                  callback=None, errback=None):
        """
        Dump the queue 'queueName' into the file 'fileName' on the server.
        The dumped file can be loaded up into another queue that has access
        to the file 'fileName'. Administrators can also freely copy this
        file to another node for loading into a queue on another node
        """
        request = Request("dumpQueue",
                          url = "/_ah/%s/dumpQueue/" % queueName,
                          queueName = queueName,
                          requestID = self.owner.nextRequestID(),
                          callback = callback,
                          errback = errback)
        request.postData = urllib.urlencode({ "fileName": fileName })
        self.requestsToSend.append(request)
        return request.requestID


    def addQueue(self, queueName,
                 callback=None, errback=None,
                 **kwargs):
        failFast
        request = Request("addQueue")
        request.url = "/_ah/%s/addQueue/" % queueName
        request.queueName = queueName
        request.requestID = self.owner.nextRequestID()
        self.requestsToSend.append(request)


    def dropQueue(self, queueName,
                  callback=None, errback=None):
        """
        Drop(delete) the queue named 'queueName'
        """
        request = Request("dropQueue",
                          url = "/_ah/%s/dropQueue/" % queueName,
                          queueName = queueName,
                          requestID = self.owner.nextRequestID(),
                          callback = callback,
                          errback = errback)
        self.requestsToSend.append(request)
        return request.requestID


    def addDB(self, dbName, callback=None, errback=None):
        """
        Add a new database named 'dbName' to the current pymq node
        """
        request = Request("addDB",
                          url = "/_ah/%s/addDB/" % dbName,
                          dbName = dbName,
                          requestID = self.owner.nextRequestID(),
                          callback = callback,
                          errback = errback)
        self.requestsToSend.append(request)
        return request.requestID



class ClusterHandler(http_evented):
    """
    This class handles client communication to and from the master
    node in a pymq cluster. It abstracts communication from the
    individual nodes in the cluster as far as the client (user) of
    this class is concerned.

    It also handles various node failure scenarios as far as pymq
    nodes are concerned and tries to re-discover replacements for
    the pymq nodes that have gone down
    """
    patNodeInfo = re.compile(r"^([^@]+)@([^:]+):([0-9]+)$")

    def __init__(self, host_and_port, owner,
                 onConnected,
                 onClose,
                 onException):
        self.owner             = owner
        self.requestsToSend    = []
        self.sentRequests      = { }
        self.pendingHandleQueueRequests = []

        self._onMasterConnected = onConnected

        # Both the structures below are maps which map a queue name
        # and node name respectively to an object of type
        # single_node_handler_metadata. This makes replacement of
        # failed nodes an O(1) operation
        self.handledQueues     = { }
        self.handledNodes      = { }
        http_evented.__init__(self, host_and_port,
                              self.on_self_connected,
                              onClose, onException)

        # !! This is a 100% PURE HACK !!
        # Always make sure that you override this AFTER constructing
        # the original object
        self._eventHandlers = identity_deque(self.on_response_received)


    def sendRequests(self):
        self.sendRequestsToMaster()


    def sendRequestsToMaster(self):
        """
        Send all pending requests to the master node
        """
        requestsSent = 0
        try:
            for r in self.requestsToSend:
                headers = { }
                headers["X-Sequence"] = str(r.requestID)
                self.push_HTTP_request("GET", r.url, r.postData, headers)
                self.sentRequests[r.requestID] = r
                print "sendRequestsToMaster: headers: " + str(headers)
                requestsSent += 1

            if len(self.sentRequests) > 0:
                self.pop_response()
        except Exception, e:
            print "Exception (%s)" % str(e)
            print asyncore.compact_traceback()
        finally:
            self.requestsToSend = []


    def invokeEventHandler(self, response):
        """
        Invoke the event handler for the specific event under question.
        Currently, only handleQueue is the event for which the cluster
        manager invokes handlers
        """
        try:
            type = "success"
            if response.status != 200:
                type = "failure"
            sequence = int(response.getheader("X-Sequence"))
            if not sequence:
                return

            print str(self.sentRequests.keys())
            request = self.sentRequests[sequence]
            del self.sentRequests[sequence]

            if request.event not in ["handleQueue", "replacementFor"]:
                print "Invalid request. Got %s" % request.event
                return

            # Invoke at-site event handlers
            try:
                cb = request.callback
                if response.status != 200:
                    cb = request.errback
                call_if_not_none_and_callable(cb, response=response)
            except Exception, e:
                print "Exception(%s) running at-site event handler" % str(e)
                print asyncore.compact_traceback()

            if request.event != "handleQueue":
                return

            event = "%s/%s/%s" % (type, request.event, request.target)
            self.owner.onEvent(event,
                               requestID=sequence,
                               response=response)
        except Exception, e:
            print "Exception (%s)" % str(e)
            print asyncore.compact_traceback()


    def checkIdle(self):
        """
        This will be called by all nodes that are being managed by
        this cluster manager. It should call the idle event handler
        for it's owner class
        """
        self.sendRequestsToMaster()
        self.owner.checkIdle()


    def hasPendingResponses(self):
        if len(self.sentRequests) > 0 or \
            self._connection_state == STATE_CONNECTING:
            return True

        for h in self.handledNodes.values():
            if h.handler.hasPendingResponses():
                return True
        return False


    def on_self_connected(self):
        """
        Called when the cluster handler is connected to the master node
        """
        call_if_not_none_and_callable(self._onMasterConnected)
        self.owner.onEvent("connected")
        self.checkIdle()


    def on_response_received(self, response):
        """
        Called when the master responds back to a request made by
        this cluster manager. Action needs to be taken depending
        upon the response received
        """
        sequence = int(response.getheader("X-Sequence"))
        if not sequence:
            # Something bad happened. We ALWAYS tag our requests
            # and expect a tagged response
            print "Something bad happened. We didn't get X-Sequence"
            return

        if sequence not in self.sentRequests:
            print "sequence number %d not found in sent requests"
            return

        request = self.sentRequests[sequence]

        if request.event not in ["handleQueue", "replacementFor"]:
            print "Invalid request event: %s" % request.event
            return

        m = patNodeInfo.match(response.getbody())
        if response.status != 200 or not m:
            # We failed to find the node for this queue or a replacement
            # for the node. Invoke event handlers if any
            if response.status == 200:
                response.status = 701
            self.invokeEventHandler(response)
            return

        nodeName = m.group(1)
        nodeHost = m.group(2)
        nodePort = int(m.group(3))

        if request.event == "handleQueue":
            # Check if the node is in the list of nodes that we are
            # handling
            if nodeName not in self.handledNodes:
                # Try to connect to that node

                # TODO: Same as below.
                eh = functools.partial(self.on_node_down, nodeName=nodeName)
                snh = SingleNodeHandler(host_and_port = (nodeHost, nodePort),
                                        owner = self,
                                        onNodeConnected = functools.partial(self.on_node_connected,
                                                                        nodeName=nodeName),
                                        onConnectionClose     = eh,
                                        onConnectionException = eh)

                self.handledNodes[nodeName] = \
                    single_node_handler_metadata(nodeName, snh)

                # Add the handleQueue request to the list of pending
                # handleQueue requests
                request.nodeName = nodeName
                self.pendingHandleQueueRequests.append((request, response))
            else:
                nodeMeta = self.handledNodes[nodeName]
                self.handledQueues[request.target] = nodeMeta
                print "Adding %s to handled queues" % request.target
                self.invokeEventHandler(response)

        elif request.event == "replacementFor":
            # Try to connect to the new node
            del self.sentRequests[sequence]
            oldNodeMeta = self.handledNodes[request.target]
            del self.handledNodes[request.target]

            # TODO: Subtle bug
            #
            # Let the originally failed node              => A
            #     the replacement node                    => B
            #     the replacement of the replacement node => C
            #
            # If B is down, then trying to connect to B will fail.
            # This will call the on_node_down() function, which will query
            # the /replacementFor/ resource from the master. Since the node
            # B will have been made active by the master by the time it sends
            # 'B' as a response to /replacementFor/ on node 'A', it will have
            # a replacement entry, which can be queried.
            #
            # However, if these things happen in quick succession and if
            # multiple clients are connected, some of them might go into
            # an indeterminate state.
            eh = functools.partial(self.on_node_down, nodeName=nodeName)
            snh = SingleNodeHandler(host_and_port = (nodeHost, nodePort),
                                    owner = self,
                                    onNodeConnected = functools.partial(self.on_node_connected,
                                                                    nodeName=nodeName),
                                    onConnectionClose     = eh,
                                    onConnectionException = eh)

            newNodeMeta = single_node_handler_metadata(nodeName, snh)
            oldNodeMeta.copy(newNodeMeta)
            self.handledNodes[nodeName] = oldNodeMeta

        self.checkIdle()


    def on_node_connected(self, nodeName):
        """
        When a node is connected, we go through all pending requests
        in self.pendingHandleQueueRequests and invoke the appropriate
        success handlers

        The connected event will be raised by the appropriate node
        handler itself
        """
        logging.debug("on_node_connected(%s)" % nodeName)
        nodeMeta = self.handledNodes[nodeName]
        for request, response in self.pendingHandleQueueRequests:
            try:
                if request.nodeName == nodeName:
                    print "Adding queue: %s" % request.target
                    self.handledQueues[request.target] = nodeMeta
                    self.invokeEventHandler(response)
            except Exception, e:
                print "Exception (%s) while invoking event handlers" % str(e)

        self.pendingHandleQueueRequests = \
            filter(lambda x: x[0].nodeName != nodeName,
                   self.pendingHandleQueueRequests)

    def on_node_down(self, nodeName):
        """
        oopsie!! A node went down. We try to find a replacement
        for the node that went down
        """
        logging.debug("on_node_down(%s)" % nodeName)
        request = Request(event = "replacementFor",
                          url = "/node/%s/replacementFor/" % nodeName,
                          requestID = self.nextRequestID(),
                          queueName = nodeName)
        self.requestsToSend.append(request)
        self.checkIdle()


    def nextRequestID(self):
        return self.owner.nextRequestID()


    def onEvent(self, event, **kwargs):
        return self.owner.onEvent(event, **kwargs)


    def handleQueue(self, queueName,
                    callback=None, errback=None):
        # Check if we are already handling the queue.
        if queueName in self.handledQueues:
            pass

        # Query the master node to find out the queues's location
        request = Request(event = "handleQueue",
                          url = "/queue/%s/findQueue/" % queueName,
                          requestID = self.owner.nextRequestID(),
                          queueName = queueName,
                          callback=callback,
                          errback=errback)
        self.requestsToSend.append(request)
        return request.requestID


    def produceMessage(self, queueName, message,
                       callback=None, errback=None):
        h = self.handledQueues[queueName].handler
        return h.produceMessage(queueName, message,
                                callback, errback)

    def consumeMessage(self, queueName, autoACK=False,
                       callback=None, errback=None):
        h = self.handledQueues[queueName].handler
        return h.consumeMessage(queueName, callback, errback)

    def consumeMessages(self, queueName, enabled=True, autoACK=False,
                        callback=None, errback=None):
        h = self.handledQueues[queueName].handler
        return h.consumeMessages(queueName, enabled, autoACK,
                                 callback, errback)

    def ackMessage(self, queueName, messageID):
        h = self.handledQueues[queueName].handler
        return h.ackMessage(queueName, messageID)

    def loadQueue(self, queueName, fileName,
                  callback=None, errback=None):
        h = self.handledQueues[queueName]
        return h.loadQueue(queueName, fileName,
                                      callback, errback)

    def dumpQueue(self, queueName, fileName,
                  callback=None, errback=None):
        h = self.handledQueues[queueName]
        return h.dumpQueue(queueName, fileName,
                                      callback, errback)

    def addQueue(self, queueName,
                 callback=None, errback=None,
                 **kwargs):
        failFast
        return self.handler.addQueue(queueName,
                                     callback, errback,
                                     **kwargs)

    def dropQueue(self, queueName,
                  callback=None, errback=None):
        h = self.handledQueues[queueName]
        return h.dropQueue(queueName, callback, errback)

    def addDB(self, dbName,
              callback=None, errback=None):
        raise OperationNotSupportedError("The addDB operation is not supported by the clustered client")



class MessageQueue(object):
    """
    This is the class that the user will instantiate. It can be
    constructed either by passing:
    1. node   = "hostname:port" OR
    2. master = "hostname:port"
    This class accordingly constructs itself to handle a single
    pymq node OR a pymq cluster master
    """
    def __init__(self, master = None, node = None, timeout = 30.0):
        if master is not None:
            self.handler = ClusterHandler(master, self, None, _give_up, _give_up)
        else:
            self.handler = SingleNodeHandler(node, self, None, _give_up, _give_up)

        self.timeout = timeout
        self.requestID = 1
        self.subscribers = []

    def nextRequestID(self):
        self.requestID += 1
        return self.requestID - 1

    def subscribe(self, eventRE, subscriber):
        self.subscribers.append( [ re.compile(eventRE), subscriber ] )
        return self

    def unsubscribe(self, subscriber, eventRE = None):
        if eventRE is not None:
            self.subscribers = \
                filter(lambda s: not (s[0].pattern == eventRE and s[1] == subscriber),
                       self.subscribers)
        else:
            self.subscribers = \
                filter(lambda s: not (s[1] == subscriber),
                       self.subscribers)
        return self

    def onEvent(self, event, **kwargs):
        for s in self.subscribers:
            if s[0].match(event):
                s[1](event, **kwargs)

    def checkIdle(self):
        self.onEvent("idle", really=False)
        while self.handler.hasPendingResponses() == False:
            self.onEvent("idle", really=True)
            self.handler.sendRequests()

    def start(self):
        while True:
            asyncore.loop(timeout = self.timeout)
            # Trigger the "idle" event now
            self.checkIdle()

    def handleQueue(self, queueName,
                    callback=None, errback=None):
        """
        Notify pymq that you are interested in performing operations on
        the queue with name 'queueName'. The success or error callback
        will be invoked appropriately on success/failure of this operation.
        """
        return self.handler.handleQueue(queueName, callback, errback)

    def produceMessage(self, queueName, message,
                       callback=None, errback=None):
        """
        Produce a message 'message' and put it into the queue 'queueName'
        """
        return self.handler.produceMessage(queueName, message,
                                           callback, errback)

    def consumeMessage(self, queueName, autoACK=False,
                       callback=None, errback=None):
        """
        Consume a single message from the queue 'queueName'.
        This call actually registers a request at the server telling it that
        we are interested in consuming a single message from this queue.

        Note: DO NOT EVER call consumeMessage if you have already called
        consumeMessages(enabled=True) for the queue that you want to consume
        messages from. Call consumeMessage ONLY if you are not already
        consuming messages via a consumeMessages() call.
        """
        return self.handler.consumeMessage(queueName,
                                           callback, errback)

    def consumeMessages(self, queueName, enabled=True, autoACK=False,
                        callback=None, errback=None):
        """
        This call just turns ON/OFF message consumption for this queue handler.

        If enabled is set to True then the onConsume handlers will be invoked
        on receipt of a message. enabled=False will disable message consumption.

        If there is currently a pending request and hence a message that is to
        be delivered, then the cancellation will take place after that message
        is received.

        Setting autoACK to True acknowledges the message as soon as
        it is removed from the queue. You don't need to explicitly ACK it.
        """
        self.handler.consumeMessages(queueName, enabled, autoACK,
                                     callback, errback)

    def ackMessage(self, queueName, messageID):
        """
        Acknowledge message with ID 'messageID'. There is no callback associated
        with this operation. The client must assume it to succeed or be prepared
        to process the same message (message with the same ID) again
        """
        self.handler.ackMessage(queueName, messageID)

    def loadQueue(self, queueName, fileName):
        """
        Load the file 'fileName' into the queue 'queueName'. This file should
        have been created (dumped) by the same pymq node that requested the load
        OR it could have been copied to this node after being generated elsewhere
        """
        return self.handler.loadQueue(queueName, fileName)

    def dumpQueue(self, queueName, fileName):
        """
        Dump the queue 'queueName' into the file 'fileName' on the server.
        The dumped file can be loaded up into another queue that has access
        to the file 'fileName'. Administrators can also freely copy this
        file to another node for loading into a queue on another node
        """
        return self.handler.dumpQueue(queueName, fileName)

    def addQueue(self, queueName, **kwargs):
        return self.handler.addQueue(queueName, **kwargs)

    def dropQueue(self, queueName):
        """
        Drop(delete) the queue named 'queueName'
        """
        return self.handler.dropQueue(queueName)

    def addDB(self, queueName):
        """
        Add a new database named 'dbName' to the current pymq node
        """
        return self.handler.addDB(queueName)


