#!/usr/bin/env python

"""
An orchestrator is invoked by a release manager to start a replication.
"""

import cvmfs_plugin
from replica_common import StampedVersion, ReplicationCommand
import threading
import pickle

class Orchestrator(cvmfs_plugin.Plugin):

    def __init__(self, target):
        cvmfs_plugin.Plugin.__init__(self, target)

        self.subscribe("REPLICA.CLIENT_REPLY");

        self.seq = 0
        self.version_queue = []
        self.replies = [] # tracking replies from clients
        self.pub_lock = threading.Lock()
        self.replica_reply_cv = threading.Condition(self.view_lock)

    def publish(self, *versions):
        """
        Publish a new snapshot version.
        """
        r = False
        if self.view.primary != self.broker_id:
            # non-primary nodes cannot perform this action
            print "i am not the primary. aborted"
            return False
        # put this new version to the queue
        # use a lock here to ensure version transition happens one by one
        self.pub_lock.acquire()
        for v in versions:
            print 'enqueuing new snapshot version %s' % v
            self.version_queue.append(v)
        self.pub_lock.release()

        # assign a sequence number to the version
        self.replica_reply_cv.acquire()
        self.active_view = self.view
        self.seq += 1
        replica_cmd = ReplicationCommand(self.seq)
        for i in range(0, len(self.version_queue)):
            version = self.version_queue[i]
            stamped = StampedVersion([self.viewstamp[0], self.viewstamp[1]],
                    version)
            print 'stamping version %s as [%d, %d]' % (version,\
                    stamped.viewstamp[0], stamped.viewstamp[1])
            replica_cmd.versions.append(stamped)
            self.viewstamp[1] += 1
            del self.version_queue[i]

        # inform each mirror in the view of the new version
        rep_cmd_str = pickle.dumps(replica_cmd)
        del self.replies[:]
        for cohort in self.view.cohorts:
            self.unicast(cohort, "REPLICA.NEWVERSION", rep_cmd_str)
        # wait until we have all replies or a timeout
        self.replica_reply_cv.wait(10 * len(self.active_view.cohorts) * \
                len(replica_cmd.versions))
        if len(self.replies) == len(self.active_view.cohorts):
            all_ok = True
            for reply in self.replies:
                if not reply.ok:
                    # TODO should start paxos to remove reply.src
                    print "removing %s's membership" % reply.src
                    all_ok = False
            if all_ok:
                r = True
        else:
            print 'not all cohorts replied ok for replication'
            r = False

        # do the replication locally
        print 'doing local replication'
        self.replica_reply_cv.release()
        return r

    def handle_msg(self, src, subject, body):
        self.replica_reply_cv.acquire()
        if subject == 'REPLICA.CLIENT_REPLY':
            reply = pickle.loads(body)
            if reply.seq == self.seq:
                reply.src = src
                if reply.ok:
                    print 'rep_ok from %s (seq %d)' % (src, self.seq)
                else:
                    print '%s is probably in viewchange (his vid: %d)' % \
                        (src, reply.vid)
            self.replies.append(reply)
            if len(self.replies) == len(self.active_view.cohorts):
                self.replica_reply_cv.notify()
            else:
                print 'replication reply of seq %d ignored' % reply.seq
        self.replica_reply_cv.release()

    def view_change(self):
        # the view_lock is already held during invocation of this function
        # re-assign the vid of the viewstamp and reset the seq counter
        print 'view on orchestrator has been changed to %d' % self.view.vid
        self.viewstamp = [self.view.vid, 0]

if __name__ == '__main__':
    import os, sys
    orchestrator = Orchestrator(int(sys.argv[1]))
    while True:
        cmd = raw_input('> ')
        if cmd == 'quit' or cmd == 'exit':
            orchestrator.close()
            break
        ca = cmd.split()
        if len(ca) > 0 and ca[0] == 'pub':
            # publish
            orchestrator.publish(*ca[1:])

