from __future__ import with_statement

from crv.model import DBSession, metadata
from crv.model.crvmodel import *
from crv.tools.clustermgr import ClusterMgr
from crv.tools.rolemgr import RoleMgr
from crv.tools.vmmgr import VMMgr
import sys, traceback,time
import logging
log = logging.getLogger(__name__)

def checkAll2():
    log.debug("checkAll2 Called")
    while True:
        checkAll()


checking=False
previousRunUpdate=-1

lastCheckTime=time.time()

def checkAll():
    global checking

    if checking:
        log.info("checkAll checking already, return")
        return

    checking=True

    try:
        doCheckAll()
    except Exception, inst:
        log.error("*** ERRORERROR ***")
        exc_type, exc_value, exc_traceback = sys.exc_info()
        epr=traceback.format_exception(exc_type, exc_value,exc_traceback)
        for i in epr:
            log.error(i)

        checking=False

    finally:
        checking=False

CHECK_MESSAGES_TIME = 10;#cloudcrv and entities check their messages every CHECK_MESSAGES_TIME seconds
CHECK_ROLES_TIME = 10; #vms check their roles to see if they can start every CHECK_ROLES_TIME seconds
STARTING_VMS_TIMEOUT_TIME = 180; #a starting vm has timed out if it has been starting for this long
CHECK_VMS_TIMEOUT_TIME = 60; #cloudcrv checks for vms that have timed out after they started every CHECK_VMS_TIMEOUT_TIME seconds
STARTED_VMS_TIMEOUT_TIME = 60; #if a vm hasn't set its active time in this long, then it timed out
MAX_VMS_STARTING = 20; #number of vms that can be starting at one time

def doCheckAll():

    global lastCheckTime;
    global CHECK_MESSAGES_TIME;
    global CHECK_ROLES_TIME;
    global STARTING_VMS_TIMEOUT_TIME;
    global CHECK_VMS_TIMEOUT_TIME;
    global STARTED_VMS_TIMEOUT_TIME;
    global MAX_VMS_STARTING;

    log.info("#"*256);
    log.info("checkAll Called");

    allClusters = DBSession.query(Cluster).all();
    log.debug("checkAll::clusters=%s"%str(allClusters));
    allVMs = DBSession.query(VM).all();
    log.debug("checkAll::vms=%s"%str(allVMs));
    allRoles = DBSession.query(Role).all();
    log.debug("checkAll::roles=%s"%str(allRoles));

    rolesToRemove = []; #keeps track of roles that need to be removed from the database (from scaling down)
    vmsToRemove = []; #keeps track of vms that need to be removed from the database (from scaling down)

    #return a dictionary with the role(s) corresponding to the argument provided
    def getRoles(roleid=None):
        if roleid != None:
            return dict([(role.Role_ID, role) for role in allRoles if (role.Role_ID == roleid)]);
        else:
            return dict([(role.Role_ID, role) for role in allRoles]);

    #return a dictionary with the vm(s) corresponding to the argument provided
    def getVMs(vmid=None):
        if vmid != None:
            return dict([(vm.VM_ID, vm) for vm in allVMs if (vm.VM_ID == vmid)]);
        else:
            return dict([(vm.VM_ID, vm) for vm in allVMs]);

    #return a dictionary with the cluster(s) corresponding to the argument provided
    def getClusters(clusterid=None):
        if clusterid != None:
            return dict([(cluster.Cluster_ID, cluster) for cluster in allClusters if (cluster.Cluster_ID == clusterid)]);
        else:
            return dict([(cluster.Cluster_ID, cluster) for cluster in allClusters]);

    #figure out if dependOn of a role are set up
    #if role has no dependOn, then dependOnsSetUp will return True
    def dependOnsSetUp(role):
        for idepon in role.dependOn:
            if idepon.status not in (Role.SC.SC_STARTED, Role.SC.SC_UPDATING, Role.SC.SC_UPDATED):
                return False;
        return True;

    #figure out if dependOn of a role are all updated
    #if role has no dependOn, then dependOnsUpdated will return True
    def dependOnsUpdated(role):
        for idepon in role.dependOn:
            if idepon.status != Role.SC.SC_UPDATED or idepon.getAttr("updateFlag") == "True":
                return False;
        return True;

    #figure out if all roles are set up in cluster
    def allRolesSetUp(cluster):
        for irole in cluster.role:
            if irole.status not in (Role.SC.SC_STARTED, Role.SC.SC_UPDATING, Role.SC.SC_UPDATED):
                return False;
        return True;

    #figure out if all roles are updated in cluster (this means the cluster deployment is finished)
    def allRolesUpdated(cluster):
        for irole in cluster.role:
            if irole.status != Role.SC.SC_UPDATED or irole.getAttr("updateFlag") == "True":
                return False;
        return True;
    
    #returns True if all roles on the vm have status INITIALIZED
    def vmRolesInitialized(vm):
        for irole in vm.role:
            if irole.status != Role.SC.SC_INITIALIZED:
                return False;
        return True;

    #returns True if the role is a worker
    def isWorker(role):
        return role.getAttr("name").startswith("rWorker");

    #returns if the role is an autorole or not
    #***is there a better way to find autoroles than searching their name?***
    def isAutorole(role):
        return role.getAttr("name").startswith("autorole");

    #returns a worker role from cluster
    #***this probably needs to be changed***
    def getWorkerRole(cluster):
        for irole in cluster.role:
            if isWorker(irole);
                return irole;

    #returns a list of workers to remove
    #remove the workers with the highest number
    def getWorkersToRemove(cluster, finalWorkerCount):
        workersToRemove = [];
        for irole in cluster.role:
            if irole.getAttr("name").startswith("rWorker"):
                workerNumber = int(irole.getAttr("name")[7:]); #get the number of the worker
                #remove the workers with the highest numbers
                if workerNumber >= finalWorkerCount:
                    workersToRemove.append(irole);
        return workersToRemove;

    #do scaling
    #if numExtraWorkers is negative, then scale down
    def doScaling(cluster, numExtraWorkers):
        log.info("doScaling called::change in number of workers=%s" % (numExtraWorkers));
        workerRole = getWorkerRole(cluster); #some worker role properties are used for the new workers        
        if workerRole != None: #make sure a worker was found
            if numExtraWorkers > 0: #scale up
                cluster.status = Cluster.SC.SC_STARTING;
                currentNumWorkers = int(cluster.getAttr("numWorkers"));
                #keep track of roles on the cluster before scaling for later
                preScaleRoles = [];
                for irole in cluster.role:
                    preScaleRoles.append(irole);
                extraRoles = []; #keeps track of the extra roles that are created
                extraVMs = []; #keeps track of the extra vms that are created
                for i in range(currentNumWorkers, currentNumWorkers+numExtraWorkers):
                    tempWorkerVM = workerRole.vm.rp.newVM("vmWorker%03d"%i);
                    tempWorker = cluster.addRole(name="rWorker%03d"%i, roledef=workerRole.roleDef, vm=tempWorkerVM, enabled=True);
                    tempWorker.setAttr("CondorPassword", "abcdefg");
                    extraRoles.append(tempWorker);
                    extraRoles += tempWorker.dependOnLocal;
                    extraVMs.append(tempWorkerVM);
                buildRoleDep_scale(preScaleRoles, extraRoles);
                #add new workers and vms to database
                for extraRole in extraRoles:
                    DBSession.add(extraRole);
                for extraVM in extraVMs:
                    DBSession.add(extraVM);
                cluster.setAttr("numWorkers", currentNumWorkers + numExtraWorkers);
                #start the roles that have their dependencies done if their vms are up; if vms aren't up, start the vms
                for extraRole in extraRoles:
                    if dependOnsSetUp(extraRole):
                        if extraRole.vm.status == VM.SC.SC_INITIALIZED: #role's VM hasn't started yet
                            startVM(extraRole.vm);
                        elif extraRole.vm.status == VM.SC.SC_STARTED: #VM is set up
                            setupRole(extraRole);
            elif numExtraWorkers < 0: #scale down the cluster by removing workers
                finalWorkerCount = int(cluster.getAttr("numWorkers")) + numExtraWorkers; 
                if finalWorkerCount > 0: #the website should already guarantee this is True; use this check to be safe
#                    cluster.status = Cluster.SC.SC_STARTING;
                    cluster.setAttr("numWorkers", finalWorkerCount);
                    workersToRemove = getWorkersToRemove(cluster, finalWorkerCount);
                    log.debug("Stopping workers %s"%([wRole.getAttr("name") for wRole in workersToRemove]));
                    #***stop the workers here***
                    for irole in workersToRemove:
                        stopRoleAndAutoroles(irole);
                    cluster.setAttr("numWorkers", finalWorkerCount);

    #start the vms on the cluster with roles with no dependOn
    def startCluster(cluster):
        cluster.status = Cluster.SC.SC_STARTING;
        for irole in cluster.role:
            if len(irole.dependOn) == 0 and irole.vm.status == VM.SC.SC_INITIALIZED:
                #start the irole's vm if the vm hasn't started yet and the irole doesn't depend on anything
                startVM(irole.vm);

    #***this is probably not complete
    #stop all the roles on the cluster; when the roles stop, then the vms will turn off then
    def stopCluster(cluster):
        cluster.status = Cluster.SC.SC_STOPPING;
        for irole in cluster.role:
            stopRole(irole);
        """
        #stop all the vms that have roles on the cluster
        for ivm in getVMs().values():
            if ivm.role[0].cluster[0].Cluster_ID == cluster.Cluster_ID:
                stopVM(ivm);
        for irole in cluster.role:
            resetRoleAttr(irole);
        """

    #let a resource pool know that it needs to start a VM
    def startVM(vm):
        numVMsStarting = 0;
        for ivm in getVMs().values():
            if ivm.status == VM.SC.SC_STARTING:
                numVMsStarting += 1;
        if numVMsStarting >= MAX_VMS_STARTING:
            vm.status = VM.SC.SC_WAITING_TO_START;
            vm.setAttr("lastActiveTime", time.time());
        else:
            #make client
            (clientuuid,clientfile)=vm.rp.getMaker("client").makeClient(vm)
            identifier=vm.rp.getMaker("vm").powerOn(clientfile)
            vm.rp.getMaker("client").expireClient(clientuuid)            
            if identifier!="Error":
                vm.setAttr("identifier",identifier)
                #reset role information for this vm just in case the vm crashed and some flags weren't reset
                for irole in vm.role:
                    resetRoleAttr(irole);
                log.info("VM %s is starting to set up on RP %s." % (vm.getAttr("name"), vm.rp.getAttr("name")));
                vm.status = VM.SC.SC_STARTING;
                vm.setAttr("lastActiveTime", time.time());
            else:
                #if a rp can't start a vm, then don't start
                log.info("RP %s cannot start VM %s." % (vm.rp.getAttr("name"), vm.getAttr("name")));

    #try to stop a vm
    #***can all vms stop at once, or is there a maximum number of vms that can be stopping at the same time?***
    #***should a vm have a status of STOPPING? controller won't know when it INITIALIZED again; just go straight to INITIALIZED
    def stopVM(vm):
        vm.rp.getMaker("vm").powerOff(vm.getAttr("identifier"))
        vm.status = VM.SC.SC_INITIALIZED;

    #set doApply flag for the role to "setup" for role so vm knows it can do the setup
    def setupRole(role):
        role.status = Role.SC.SC_STARTING;
        #create the role script
        log.info("Attempting to create profile for the setup of Role %s..." % (role.getAttr("name")));
        role.vm.rp.getMaker("profile").provideProfile(role)
        if role.getAttr("puppetcatalog")=="None":
            #error out
            role.setAttr("doApply", "none");
        else:
            #set flag so vm knows that it can apply this role
            role.setAttr("doApply", "setup");

    #set doApply flag for the role to "update" for role so vm knows it can do the update
    def updateRole(role):
        role.status = Role.SC.SC_UPDATING;
        #create the role script
        log.info("Attempting to create profile for the update of Role %s..." % (role.getAttr("name")));
        role.vm.rp.getMaker("profile").provideProfile(role)
        if role.getAttr("puppetcatalog")=="None":
            #error out
            role.setAttr("doApply", "none");
        else:
            #set flag so vm knows that it can apply this role
            role.setAttr("doApply", "update");
            role.setAttr("updateFlag", "True");

    #stop a role 
    #(*** function is not complete ***)
    def stopRole(role):
        resetRoleFlags(role);
        role.status = Role.SC.SC_STOPPING;
        #create the reverse profile
        role.vm.rp.getMaker("profile").provideReverseProfile(role)
        if role.getAttr("puppetcatalog")=="None":
            #error out
            role.setAttr("doApply", "none");
        else:
            role.setAttr("doApply", "stop");

    #stop a role and the autoroles that are directly related to it
    #***this function should not work; it needs to be finished***
    def stopRoleAndAutoroles(role):
        for irole in (role.dependBy + role.dependOn):
            if isAutorole(irole) and irole.vm.VM_ID == role.vm.VM_ID:
                stopRole(irole);
        stopRole(role);

    #reset doApply and updateFlag flags for role
    def resetRoleFlags(role):
        role.setAttr("doApply", "none");
        role.setAttr("updateFlag", "False");

    #reset role status to INITIALIZED and reset flags
    def resetRoleAttr(role):
        role.status = Role.SC.SC_INITIALIZED;
        resetRoleFlags(role);

    #reset status of dependBy to INITIALIZED and reset their flags
    def resetDependBys(role):
        for idepby in role.dependBy:
            if idepby.status in (Role.SC.SC_STARTED, Role.SC.SC_UPDATING, Role.SC.SC_UPDATED):
                resetDependBys(idepby); #some dependBy of this depby could have started, so those need to be stopped also
            resetRoleAttr(idepby);

    #set dependBy to have a status of STARTED
    #call this when setting a role status to SC.SC_STARTED; no dependBy of a STARTED role can be UPDATING or UPDATED
    def setDependBysToStarted(role):
        for idepby in role.dependBy:
            if idepby.status in (Role.SC.SC_STARTED, Role.SC.SC_UPDATING, Role.SC.SC_UPDATED):
                resetRoleFlags(idepby); #the role specified by role needs to update, so reset flags so depby can't update yet
                if idepby.status in (Role.SC.SC_UPDATING, Role.SC.SC_UPDATED):
                    idepby.status = Role.SC.SC_STARTED;
                    setDependBysToStarted(idepby); #some dependBy could be updating/updated, so need to set them to started also

    #in a cycle, if a role's profile finished running (setup, update, stop), nothing new was started, and no errors occurred, controller can check if everything is set up or if everything is all finished
    #the following 3 sets hold the clusters in which a role finished applying, something was started, and an error occurred, respectively
    roleFinished, somethingNewStarted, errorOccurred = set(), set(), set();

    #check for vm timeouts after controller has done CHECK_VMS_TIMEOUT_TIME seconds of waiting
    if (time.time()-lastCheckTime) > CHECK_VMS_TIMEOUT_TIME:
        log.info("Checking if any VMs timed out.");
        vms = getVMs();
        #check if any starting vms have timed out
        for vm in vms.values():
            if vm.status == VM.SC.SC_STARTING and (time.time() - float(vm.getAttr("lastActiveTime"))) > STARTING_VMS_TIMEOUT_TIME:
                log.info("ERROR: VM %s timed out during its start stage." % vm.getAttr("name"));
                stopVM(vm);
                errorOccurred.add(vm.role[0].cluster[0].Cluster_ID);
                startVM(vm);
        #check for vms that timed out while starting roles
        #vm sets its active time when it is on
        #controller checks the last time the vm set its active time; if it set its active time too long ago, then it is considered timed out
        #if a vm crashes, it will not set its active time anymore, and the controller will see this after a while
        for vm in vms.values():
            if vm.status == VM.SC.SC_STARTED and (time.time() - float(vm.getAttr("lastActiveTime"))) > STARTED_VMS_TIMEOUT_TIME:
                #if the vm is STARTED, but the active time was set too long ago, then vm timed out
                log.info("ERROR: VM %s has timed out after it started." % (vm.getAttr("name")));
                stopVM(vm);
                #reset role information for roles on the vm
                #stop roles on the vm and all of their dependBy; if a role started, then some of its dependBy could have started, so those need to be stopped so they can be started again later
                vmCluster = vm.role[0].cluster[0]; #get the cluster so it can be used later
                if vmCluster.status in (Cluster.SC.SC_UPDATING, Cluster.SC.SC_UPDATED):
                    #cluster will have to start more roles, so it has to return the setup stage if it was in the update stage
                    vmCluster.status = Cluster.SC.SC_STARTING;
                for irole in vm.role:
                    if irole.status in (Role.SC.SC_STARTING, Role.SC.SC_UPDATING, Role.SC.SC_STARTED, Role.SC.SC_UPDATED):
                        if irole.status == Role.SC.SC_STARTING:
                            log.info("The setup process of %s stopped because VM %s crashed." % (irole.getAttr("name"), vm.getAttr("name")));
                        elif irole.status == Role.SC.SC_UPDATING:
                            log.info("The update process of %s stopped because VM %s crashed." % (irole.getAttr("name"), vm.getAttr("name")));
                            resetDependBys(irole); #do this so dependBy can't update until crashed roles start up again
                        elif irole.status in (Role.SC.SC_STARTED, Role.SC.SC_UPDATED):
                            log.info("Role %s was killed because VM %s crashed." % (irole.getAttr("name"), vm.getAttr("name")));
                            resetDependBys(irole); #do this so dependBy can't update until crashed roles start up again
                    resetRoleAttr(irole);
                startVM(vm); #restart the vm
                errorOccurred.add(vmCluster.Cluster_ID); #error occurred on the vm's cluster
        lastCheckTime = time.time();

    #check messages
    allMessages = DBSession.query(Message).all();
    log.debug("checkAll::Messages=%s"%str(allMessages));
    for msg in allMessages:

        if msg.message == "vmIsUp": #vm successfully set up
            vm = getVMs(vmid=msg.messageAbout)[msg.messageAbout];
            log.info("VM %s is up on RP %s." % (vm.getAttr("name"), vm.rp.getAttr("name")));
            vm.status = VM.SC.SC_STARTED;
            vmCluster = vm.role[0].cluster[0];
            #start the roles whose dependOn are done
            for irole in vm.role:
                if dependOnsSetUp(irole) and irole.getAttr("doApply") == "none": #make sure the script hasn't already been created
                    setupRole(irole);
                    log.info("Script is ready for setup of Role %s" % (irole.getAttr("name")));
                    somethingNewStarted.add(vmCluster.Cluster_ID); #something new started on this role's cluster
            #see if any vms were waiting and start one the one waiting the longest
            vmWaitingLongest = None;
            earliestWaitingTime = time.time();
            for ivm in getVMs().values():
                if ivm.status == VM.SC.SC_WAITING_TO_START:
                    if float(ivm.getAttr("lastActiveTime")) < earliestWaitingTime:
                        vmWaitingLongest = ivm;
                        earliestWaitingTime = float(ivm.getAttr("lastActiveTime"));
            if vmWaitingLongest != None:
                startVM(vmWaitingLongest);

        elif msg.message == "roleIsUp": #role successfully set up; name of this role is in msg.messageAbout
            currentRole = getRoles(roleid=msg.messageAbout)[msg.messageAbout];
            if currentRole.status in (Role.SC.SC_STARTING, Role.SC.SC_UPDATING):
                roleFinished.add(currentRole.cluster[0].Cluster_ID);
                log.info("Role %s is up on VM %s." % (currentRole.getAttr("name"), currentRole.vm.getAttr("name")));
                if currentRole.status == Role.SC.SC_STARTING:
                    #role just finished its setup
                    currentRole.status = Role.SC.SC_STARTED;
                    #find more roles that can start setup
                    for idepby in currentRole.dependBy:
                        if idepby.status == Role.SC.SC_INITIALIZED:
                            if dependOnsSetUp(idepby):
                                if idepby.vm.status == VM.SC.SC_INITIALIZED: #role's VM hasn't started yet, so start it
                                    startVM(idepby.vm);
                                    somethingNewStarted.add(idepby.cluster[0].Cluster_ID);
                                elif idepby.vm.status == VM.SC.SC_STARTED: #VM is already set up, so start the role
                                    setupRole(idepby);
                                    somethingNewStarted.add(idepby.cluster[0].Cluster_ID);
                    #set dependBy/dependOn to STARTED if they have updated already but will need to update again
                    setDependBysToStarted(currentRole); #all updated dependBy will have to update again some time after this role updates
                    if currentRole.getAttr("CustomAttrChanged") == "True": #if role's attributes changed, then updated dependOn need to update again later
                        currentRole.setAttr("CustomAttrChanged", "False");
                        for idepon in currentRole.dependOn:
                            if idepon.status == Role.SC.SC_UPDATING:
                                #a depon can be updating during the setup stage if an error occurs during the update stage
                                idepon.setAttr("updateFlag", "True"); #make depon update again after it finishes updating
                                setDependBysToStarted(idepon); #dependBy of depon will have to update again since depon will update
                            elif idepon.status == Role.SC.SC_UPDATED:
                                idepon.status = Role.SC.SC_STARTED; #do this so depon must update again
                                resetRoleFlags(idepon); #depon's update script will be created again when cluster moves into update stage
                                setDependBysToStarted(idepon); #dependBy of depon will have to update again since depon will update
                elif currentRole.status == Role.SC.SC_UPDATING:
                    if currentRole.getAttr("doApply") == "none": #make sure role isn't waiting to do another update
                        #role just finished updating
                        currentRole.status = Role.SC.SC_UPDATED;
                        if currentRole.cluster[0].status == Cluster.SC.SC_UPDATING: #make sure cluster is in update stage to do more updates
                            #need to update again if properties were changed during its update
                            if currentRole.getAttr("updateFlag") == "True":
                                if dependOnsUpdated(currentRole): #make sure dependOn are updated before this is updated
                                    updateRole(currentRole);
                                    log.info("Script is ready for update of Role %s." % (idepby.getAttr("name")));
                                    somethingNewStarted.add(currentRole.cluster[0].Cluster_ID);
                            else:
                                #find roles that need to be updated
                                #if role's attributes changed, then updated dependOn need to update again
                                if currentRole.getAttr("CustomAttrChanged") == "True":
                                    currentRole.setAttr("CustomAttrChanged", "False");
                                    for idepon in currentRole.dependOn:
                                        if idepon.status == Role.SC.SC_UPDATING:
                                            idepon.setAttr("updateFlag", "True"); #make depon update again after it finishes updating
                                            setDependBysToStarted(idepon); #dependBy will have to update again
                                        elif idepon.status == Role.SC.SC_UPDATED and dependOnsUpdated(idepon):
                                            #make depon update again as long as its dependOn are updated
                                            setDependBysToStarted(idepon); #dependBy will have to update again
                                            updateRole(idepon);
                                            somethingNewStarted.add(idepon.cluster[0].Cluster_ID);
                                #always update dependBy when a role updates
                                for idepby in currentRole.dependBy:
                                    #if depby was updating, it will need to do another update later
                                    if idepby.status == Role.SC.SC_UPDATING:
                                        idepby.setAttr("updateFlag", "True"); #make depon update again after it finishes updating
                                        somethingNewStarted.add(idepby.cluster[0].Cluster_ID); #something new will start later
                                    elif idepby.status in (Role.SC.SC_STARTED, Role.SC.SC_UPDATED) and dependOnsUpdated(idepby):
                                        #update depby as long as its dependOn are updated
                                        updateRole(idepby);
                                        somethingNewStarted.add(idepby.cluster[0].Cluster_ID);

        elif msg.message == "roleApplyError": #role setup error or update error
            currentRole = getRoles(roleid=msg.messageAbout)[msg.messageAbout];
            log.info("ERROR: An error occurred while applying Role %s." % currentRole.getAttr("name"));
            resetRoleAttr(currentRole);
            resetDependBys(currentRole); #set statuses of dependBy to INITIALIZED so they don't keep working
            cluster.status = Cluster.SC.SC_STARTING;
            setupRole(currentRole); #restart currentRole
            errorOccurred.add(currentRole.cluster[0].Cluster_ID);

        #***this probably isn't correct***
        #***how does the controller know if the role should be deleted from the database also***
        elif msg.message == "roleStopped": #role was successfully stopped
            currentRole = getRoles(roleid=msg.messageAbout)[msg.messageAbout];
            log.info("Role %s was stopped on VM %s" % (currentRole.getAttr("name"), currentRole.vm.getAttr("name")));
            if currentRole.cluster[0].status == Cluster.SC.SC_STOPPING: #whole cluster is stopping
                currentRole.status = Role.SC.SC_INITIALIZED;
                if vmRolesInitialized(currentRole.vm):
                    #if all the roles on a vm are initialized (they were all stopped), then the vm can be turned off
                    stopVM(currentRole.vm);
            elif currentRole.cluster[0].status == Cluster.SC.SC_STARTING: #cluster is scaling down
                currentRole.status = Role.SC.SC_INITIALIZED;
                rolesToRemove.append(currentRole);
                if vmRolesInitialized(currentRole.vm):
                    stopVM(currentRole.vm);
                    vmsToRemove.append(currentRole.vm);                            
                roleFinished.add(currentRole.cluster[0].Cluster_ID);
            if currentRole.getAttr("CustomAttrChanged") == "True": #if role's attributes changed, then updated dependOn need to update again later
                currentRole.setAttr("CustomAttrChanged", "False");
                for idepon in currentRole.dependOn:
                    if idepon.status == Role.SC.SC_UPDATED:
                        idepon.status = Role.SC.SC_STARTED; #do this so depon must update again
                        resetRoleFlags(idepon); #depon's update script will be created again when cluster moves into update stage
                        setDependBysToStarted(idepon); #dependBy of depon will have to update again since depon will update

        elif msg.message == "roleStopError": #error when trying to stop a role
            currentRole = getRoles(roleid=msg.messageAbout)[msg.messageAbout];
            log.info("Role %s failed to stop on VM %s" % (currentRole.getAttr("name"), currentRole.vm.getAttr("name")));
            currentRole.status = Role.SC.SC_INITIALIZED;
            stopRole(currentRole);

        elif msg.message == "stopVM": #stop a vm
            vm = getVMs(vmid=msg.messageAbout)[msg.messageAbout];
            #***can the vm just be turned off to kill the roles, or do the roles need to be safely killed?***
            stopVM(vm);
        
        elif msg.message == 'startCluster': #start a cluster
            cluster = getClusters(clusterid=msg.messageAbout)[msg.messageAbout];
            startCluster(cluster);

        elif msg.message == "stopCluster": #stop a cluster
            cluster = getClusters(clusterid=msg.messageAbout)[msg.messageAbout];
            stopCluster(cluster);

        elif msg.message == "doScaling": #do scaling
            cluster = getClusters(clusterid=msg.messageAbout)[msg.messageAbout];
            doScaling(cluster, msg.messageInfo);
            somethingNewStarted.add(cluster.Cluster_ID);
            
    #delete messages from database because they have been processed
    for msg in allMessages:
        DBSession.delete(msg);

    for irole in rolesToRemove:
        DBSession.delete(irole);

    for ivm in vmsToRemove:
        DBSession.delete(ivm);

    #check if any clusters can start updating roles (vms and roles are all set up)
    for cluster in getClusters().values():
        #see if need to check if cluster finished its start/update stage
        clusterid = cluster.Cluster_ID;
        if clusterid in roleFinished and clusterid not in somethingNewStarted and clusterid not in errorOccurred:
            #if cluster is in the starting stage, check if it can move into the updating stage
            if cluster.status == Cluster.SC.SC_STARTING:
                if allRolesSetUp(cluster): #if all roles are set up, then move cluster in update stage
                    cluster.status = Cluster.SC.SC_UPDATING;
                    #update roles that have dependOn updated
                    #the first time around, this will update STARTED roles without dependOn
                    #if an error occurred in the update stage that caused the cluster to move back to the start stage, then some roles may already be updated; therefore, some roles without dependOn may not need to update again; also, it is possible that dependOnsUpdated returns True here for roles with dependOn                                
                    for irole in cluster.role:
                        if (irole.status == Role.SC.SC_STARTED or irole.getAttr("updateFlag")) and dependOnsUpdated(irole):
                            updateRole(irole);
            #if cluster is in the updating stage, check if whole cluster is done with everything
            elif cluster.status == Cluster.SC.SC_UPDATING:
                if allRolesUpdated(cluster):
                    #if all roles are updated then cluster is UPDATED (finished deployment)
                    cluster.status = Cluster.SC.SC_UPDATED;
                    log.info("Cluster %s has finished deploying its roles and VMs." % cluster.getAttr("name"));

    log.debug("Committing");
    log.debug("DIRTY=%s" % str(DBSession.dirty));
    transaction.commit();

def buildRoleDep(roles):
    for i in range(len(roles)):
        roles[i].dependOn=[]
        
        for j in range(i):

            if roles[j].roleDef in roles[i].roleDef.dependOn: #i depend on j
                log.debug("buildRoleDep:"+ roles[i].getAttr("name")+ " depends on "+ roles[j].getAttr("name"))
                roles[i].dependOn.append(roles[j])

        for ilc in roles[i].dependOnLocal:
            roles[i].dependOn.append(ilc)
                
#builds dependOn lists for newRoles
#this assumes that there are no global dependency relationships between roles in newRoles
#this deals with a global relationship between a role in oldRoles and a role in newRoles
#this also deals with local relationships for roles in newRoles
def buildRoleDep_scale(oldRoles, newRoles):
    for inewrole in newRoles:
        inewrole.dependOn = [];
        for joldrole in oldRoles:
            if joldrole.roleDef in inewrole.roleDef.dependOn:
                log.debug("buildRoleDep_scale:" + inewrole.getAttr("name") + " depends on " + joldrole.getAttr("name"));
                inewrole.dependOn.append(joldrole);
        for ideponlocal in inewrole.dependOnLocal:
            inewrole.dependOn.append(ideponlocal);
    
