import re
import time
import Queue
import thread
import random
import threading
import string
import socket
import os

from xml.dom import minidom

from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.sql.expression import and_, or_

from ivic.core.Utils import GenUUID
from ivic.vsched.Model import *
from ivic.vsched.ContainerOp import get_webfarm_url, get_vsr_url, get_vmc_url, DummySOAPProxy
from SOAPpy import SOAPProxy
from ivic.vsched.Impl import InstanceGetter

from ivic.core import Logging, Config, project_path
logger = Logging.get_logger('ivic.vsched.Controller')

failedTransTable = {
    'deploying': 'invalid',
    'starting': 'error',
    'stopping': 'error',
    'undeploying': 'error',
    'migrating': 'running',
    'scaling': 'error',
    'importing': 'invalid',
    'hibernating': 'error',
    'resuming': 'error',
    'snapshotting': 'error',
    'rolling-back': 'error',
    'running': 'running',
    'startpcaping': 'running',
    'stoppcaping': 'running',
    'cloning': 'invalid',
}

finishedTransTable = {
    'deploying': 'stopped',
    'starting': 'running',
    'stopping': 'stopped',
    'undeploying': 'invalid',
    'migrating': 'running',
    'running': 'running',
    'scaling': 'running',
    'importing': 'running',
    'hibernating': 'hibernated',
    'resuming': 'running',
    'snapshotting': 'running',
    'rolling-back': 'running',
    'startpcaping': 'running',
    'stoppcaping': 'running',
    'cloning': 'hibernated',
}

stateOps = {
    'invalid': ['deploy', 'import', 'clone'], 
    'stopped': ['start', 'undeploy'],
    'running': ['stop', 'migrate', 'scale_more', 'scale_less', 'hibernate', 'snapshot', 'rollback', 'startpcap', 'stoppcap'],
    'hibernated': ['resume','undeploy'],
    'error': ['undeploy', 'start', 'stop'],
}

opToStates = {
    'import': 'importing',
    'deploy': 'deploying',
    'start': 'starting',
    'stop': 'stopping',
    'undeploy': 'undeploying',
    'migrate': 'migrating',
    'scale_more': 'scaling',
    'scale_less': 'scaling',
    'hibernate': 'hibernating',
    'resume': 'resuming',
    'snapshot': 'snapshotting',
    'rollback': 'rolling-back',
    'startpcap': 'startpcaping',
    'stoppcap': 'stoppcaping',
    'clone': 'cloning',
}

resTransTable = {
    '0': 'success',
    '-1': 'saveFileErr',
    '-2': 'DownLoadcertErr',
    '-3': 'VSWConfNotExistErr',
    '-4': 'LibvirtOffErr',
    '-5': 'VPNConfNotExistErr',
    '-6': 'CreateNetXMLErr',
    '-7': 'IfconfigUPErr',
    '-8': 'VSwitchOffErr',
    '-9': 'IfconfigDownErr',
    '-10': 'OtherException',
    '-11': 'DestroyVSwitchErr',
    '-12': 'ParseConfErr',
    '-13': 'StartVSwitchErr',
    '-14': 'CreateVMErr',
    '-15': 'ShutdownVMErr',
    '-16': 'UUIDNullErr',
    '-17': 'DownloadRFSErr',
    '-18': 'CreateVMImgErr',
    '-19': 'VMImgNotExistErr',
    '-20': 'RemoveVMDirErr',
    '-21': 'VMCErr',
    '-22': 'DConnErr',
    '-23': 'DomainByUUIDNotFoundErr',
    '-24': 'MigFailErr',
    '-25': 'FileNotExistsErr',
    '-26': 'HibernateVMErr',
    '-27': 'ResumeVMErr',
    '-28': 'SnapShotVMErr',
    '-29': 'RollbackVMErr',
    '-30': 'CloneVMErr',
    '-31': 'StartPcapErr',
    '-32': 'StopPcapErr',
    '-33': 'DownloadDiskRefErr',
    '-34': 'DownloadMemRefErr',
    '-35': 'RegisterVSwitchErr',
}

class DummyObject:
    pass

class TaskRunner(threading.Thread):
    ''''TaskRunner thread class'''
    
    def __init__(self, id, jobDispatcher):
        threading.Thread.__init__(self)
        self.id = id
        self.task_pool = jobDispatcher.task_pool
        self.session = jobDispatcher.Session()
        self.jobDispatcher = jobDispatcher
        self.debug_soap = jobDispatcher.debug_soap

    def make_SOAPProxy(self, *args):
        if self.debug_soap:
            return DummySOAPProxy(*args)
        else:
            return SOAPProxy(*args)

    def do_soapinvoke_task(self, task):
        content = eval(task.content)
        if content['op_obj']:
            task.obj = InstanceGetter.by_key(content['op_obj'], self.session)
        else:
            task.obj = DummyObject()
        task.obj.status = opToStates[content['op_job']]
        self.session.flush()

        proxy = self.make_SOAPProxy(content['url'])
        invoke_op = getattr(proxy, content['op_name'])

        try:
            logger.debug('start to invoke vmc soap interface %s, the vmc url is %s', content['op_name'], content['url'])
            res = invoke_op(*content['op_args'])
            if not (res == None):
                if res < 0:
                    task.status = 'failed'
                    task.obj.status = failedTransTable[task.obj.status]
                    task.task_info = task.task_info + 'result: ' + resTransTable[str(res)] + ';'
                else:
                    task.status = 'finished'
                    task.obj.status = finishedTransTable[task.obj.status]
                    if content['op_name'] == 'registerVSwitch':
                        vswitch = self.session.query(Vswitch).filter(Vswitch.uuid == content['op_args'][0])[0]
                        vswitch.port = res
                    task.task_info = task.task_info + 'result: ' + resTransTable[str(0)] + ';'
            else:
                task.status = 'finished'
                task.obj.status = finishedTransTable[task.obj.status]
            self.session.flush()
        except:
            task.status = 'failed'
            task.obj.status = failedTransTable[task.obj.status]
            if content['op_name'] == 'migrateVM':
                task.obj.target_vmc_id = None
            self.session.flush()
            raise

    def __update_task_status(self, taskid):
        task = self.session.query(Task).get(taskid)
        if task.status != 'pending':
            logger.debug('error, task %d not in status pending', task.id)
            return 1
        task.status = 'scheduling'
        self.session.flush()
        return 0
        
    def do_task(self, taskid):
        i = 0
        while 1:        
            try:
                res = self.__update_task_status(taskid)
                break
            except Exception, e:
                self.session.clear()
                time.sleep(1)
                if i == 2:
                    logger.error('error executing task %d: %s %s', taskid, type(e), e)
                    logger.exception(e)
                    task.task_info = task.task_info + 'task exception: error executing task' + str(e) + ';'
                    task.status = 'failed'
                    self.session.flush()
                    break
                logger.debug('try to update task %d status again', taskid)
            i = i + 1
        
        if res:
           logger.debug('error, task %d not in status pending', taskid)
           return

        task = self.session.query(Task).get(taskid)

        adapter_name = 'do_%s_task' % task.task_type
        try:
            adapter = getattr(self, adapter_name)
        except AttributeError:
            logger.error('error, unknown task type %s', task.task_type)
            task.task_info = task.task_info + 'task exception: unknown task type ' + task.task_type + ';'
            task.status = 'failed'
            self.session.flush() 
            return
        
        try:
            adapter(task)
        except Exception, e:
            logger.error('error executing task %d: %s %s', task.id, type(e), e)
            logger.exception(e)
            task.task_info = task.task_info + 'task exception: error executing task' + str(e) + ';'
            task.status = 'failed'
            self.session.flush() 
            return
        
        self.session.flush()
    
    def run(self):
        logger.info('taskrunner[%d] started' % self.id)
        logger.debug('taskrunner[%d] waiting for task' % self.id)
        while self.jobDispatcher.running:
            try:
                taskid = self.task_pool.get(True, 1)
                task = self.session.query(Task).get(taskid)
                self.session.clear()
                logger.debug('taskrunner[%d] started task %d %s', self.id, task.id, task.title)
                self.do_task(taskid)
                logger.debug('taskrunner[%d] finished task %d %s', self.id, task.id, task.title)
                self.session.clear()
            except Queue.Empty:
                pass
            except Exception, e:
                logger.exception(e)
            
        logger.info('taskrunner[%d] stopped' % self.id)

jobKey2Mapper = {'vcluster': VirtualClusterInstance, 'vlab': VlabInstance, 'vmi':VirtualMachineInstance, 'snapshot':Snapshot}
jobOps = ['deploy', 'start', 'stop', 'undeploy', 'migrate', 'scale_more', 'scale_less', 'hibernate', 'snapshot', 'rollback', 'resume', 'startpcap', 'stoppcap', 'clone']

class TaskGenerator:
    
    def __get_vcluster_vswitch(self, vcluster):
        return self.s.query(Vswitch).filter(Vswitch.virtual_cluster_instance_id == vcluster.id)

    def __get_vmi_vswitch(self, vmi):
        doc = minidom.parseString(vmi.settings)
        vswitches = []
        for node in doc.getElementsByTagName('vSwitchRef'):
            vswitches.append(self.s.query(Vswitch).filter(Vswitch.uuid == str(node.firstChild.data))[0])
        return vswitches

    def __get_vlab_vswitch(self, vlab):
        return self.s.query(Vswitch).filter(Vswitch.vlab_instance_id == vlab.id)

    def __get_common_scale_vm(self, job):
        vcluster = job.obj
        values = {
            'scale_more':vcluster.worknode_count,
            'scale_less':job.count,
        }
        job.scale_vms = self.s.query(VirtualMachineInstance).filter(and_(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id, VirtualMachineInstance.node_index > values[job.optype]))
        return job.scale_vms

    def __get_webfarm_scale_vm(self, job):
        vcluster = job.obj
        webfarm = self.s.query(WebFarm).filter(WebFarm.virtual_cluster_instance_id == vcluster.id)[0]
        internal_ip, internal_port = webfarm.address.split(':')
        webfarm_dnet = self.s.query(Dnet).filter(and_(Dnet.internal_ip == internal_ip, Dnet.internal_port == int(internal_port)))[0]
        url = get_webfarm_url(webfarm_dnet.external_ip, webfarm_dnet.external_port)
        proxy = SOAPProxy(url)
        standby_vm_ids = []
        online_vm_ids = []
        if webfarm.standby:
            standby_vm_ids = webfarm.standby.split(' ')
        if webfarm.online:
            online_vm_ids = webfarm.online.split(' ')
        if job.optype == 'scale_more':
            vm_ips = []
            i = 0
            while i < job.count - vcluster.worknode_count:
                i = i + 1
                vm_id = standby_vm_ids[0]
                vm = self.s.query(VirtualMachineInstance).get(int(vm_id))
                doc = minidom.parseString(vm.settings).documentElement
                vm_ip = doc.getElementsByTagName("Address")[0].firstChild.data
                vm_ips.append(vm_ip)
                online_vm_ids.append(vm_id)
                standby_vm_ids.remove(vm_id)
            res = proxy.add_nodes(vm_ips)
            webfarm.online = ' '.join(online_vm_ids)
            for vm in self.s.query(VirtualMachineInstance).filter(and_(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id, VirtualMachineInstance.status == 'invalid')):
                standby_vm_ids.append(str(vm.id))
            webfarm.standby = ' '.join(standby_vm_ids)
            if res:
                webfarm.current_node += len(vm_ips)
            job.scale_vms = self.s.query(VirtualMachineInstance).filter(and_(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id, VirtualMachineInstance.status == 'invalid'))
        else:
            vmis = []
            i = 0
            while i < vcluster.worknode_count - job.count:
                vmi = self.s.query(VirtualMachineInstance).get(int(standby_vm_ids[0]))
                vmis.append(vmi)
                standby_vm_ids.remove(str(vmi.id))
                i = i + 1
            vm_ips = proxy.remove_k(vcluster.worknode_count - job.count).split(' ')
            for vm in self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id):
                doc = minidom.parseString(vm.settings).documentElement
                vm_ip = doc.getElementsByTagName("Address")[0].firstChild.data
                if vm_ip in vm_ips:
                    standby_vm_ids.append(str(vm.id))
                    online_vm_ids.remove(str(vm.id))
            webfarm.standby = ' '.join(standby_vm_ids)
            webfarm.online = ' '.join(online_vm_ids)
            webfarm.current_node -= len(vm_ips)
            job.scale_vms = vmis
        return job.scale_vms

    def __get_vcluster_vm(self, job):
        vcluster = job.obj
        vcluster_temp = self.s.query(VclusterTemp).get(vcluster.vcluster_temp_id)
        functions = {
            'common':self.__get_common_scale_vm,
            'vsaas':self.__get_common_scale_vm,
            'webfarm':self.__get_webfarm_scale_vm,
        }
        if job.optype in ['scale_more', 'scale_less']:
            if job.scale_vms != None:
                return job.scale_vms
            get_vm = functions[vcluster_temp.capabilities]
            ret = get_vm(job)
            return ret
        else:
            return self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id)
    
    def __get_vlab_vm(self, vlab):
        return self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.vlab_instance_id == vlab.id)
    
    def __find_vmc(self, job):
        #vmcs = self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.status == 'online').order_by(VirtualMachineContainer.updated_at.desc()).all()
        '''
        vmcs = self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.status == 'online').filter(VirtualMachineContainer.capability.like('%vmc%')).order_by(VirtualMachineContainer.updated_at.desc()).all()
        '''
        '''
        the_vmcs = []
	for the_vswitch in self.s.query(Vswitch).filter(or_(Vswitch.virtual_cluster_instance_id == job.obj.id, Vswitch.vlab_instance_id == job.obj.id)):
            if the_vswitch.internet_access != True:
                continue
	    for vmc in vmcs:
	        for vswitch in self.s.query(Vswitch).filter(and_(Vswitch.internet_access == True, Vswitch.gateway_virtual_machine_container_id == vmc.id)):
		    if vswitch.ip == the_vswitch.ip and vmc not in the_vmcs:
		        the_vmcs.append(vmc)
			break
	for the_vmc in the_vmcs:
	    vmcs.remove(the_vmc)
        '''
        vmcs = self.s.query(VirtualMachineContainer).filter(and_(VirtualMachineContainer.status == 'online', VirtualMachineContainer.capability.like('%vmc%'))).all()
        idx = random.randint(0, len(vmcs) - 1)
        return vmcs[idx]

    def __find_vsr(self):
        vsrs = self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.status == 'online').filter(VirtualMachineContainer.capability.like('%vswitch%')).order_by(VirtualMachineContainer.updated_at.desc()).all()
        print vsrs
        idx = random.randint(0, len(vsrs) - 1)
        return vsrs[idx]

    def __generate_dnet_cmd(self, vmc, vswitch):
	start_cmds = []
	stop_cmds = []
	old_ports = []
	for the_vswitch in self.s.query(Vswitch).filter(Vswitch.gateway_virtual_machine_container_id == vmc.id):
	    if the_vswitch.id == vswitch.id:
	        continue
	    for the_dnet in self.s.query(Dnet).filter(Dnet.vswitch_id == the_vswitch.id):
	        old_ports.append(the_dnet.external_port)
	for dnet in self.s.query(Dnet).filter(Dnet.vswitch_id == vswitch.id):
            if dnet.external_ip:
                continue
	    i = 60000
	    while i <= 61000:
	        if i not in old_ports:
	            dnet.external_port = i
	            break
	        i = i + 1
	    old_ports.append(i)
	    dnet.external_ip = vmc.address
	    start_cmd = 'iptables -t nat -A PREROUTING -p %s -d %s --dport %d -j DNAT --to %s:%d' % (dnet.protocol, dnet.external_ip, dnet.external_port, dnet.internal_ip, dnet.internal_port)
	    stop_cmd = 'iptables -t nat -D PREROUTING -p %s -d %s --dport %d -j DNAT --to %s:%d' % (dnet.protocol, dnet.external_ip, dnet.external_port, dnet.internal_ip, dnet.internal_port)
	    start_cmds.append(start_cmd)
	    stop_cmds.append(stop_cmd)
	start_cmds.append('iptables -F')
	return start_cmds, stop_cmds
		
    def __generate_dnet_params(self, vmc, vswitch):
        dnet_params = []
        old_ports = []
        for the_vswitch in self.s.query(Vswitch).filter(Vswitch.gateway_virtual_machine_container_id == vmc.id):
            #if the_vswitch.id == vswitch.id:
                #continue
            for the_dnet in self.s.query(Dnet).filter(Dnet.vswitch_id == the_vswitch.id):
                old_ports.append(the_dnet.external_port)
        for vm in self.s.query(Dnet).filter(Dnet.vswitch_id == vswitch.id).group_by(Dnet.virtual_machine_instance_id):
            dnet_param = {}
            internal_ports = []
            external_ports = []
            dnet_param['protocol'] = vm.protocol
            dnet_param['internal_ip'] = vm.internal_ip
            dnet_param['external_ip'] = vmc.address
            for dnet in self.s.query(Dnet).filter(Dnet.virtual_machine_instance_id == vm.virtual_machine_instance_id):
                if dnet.external_ip:
                    continue
                i = 60000
                while i <= 61000:
                    if i not in old_ports:
                        dnet.external_port = i
                        break
                    i = i + 1
                old_ports.append(i)
                dnet.external_ip = vmc.address
                logger.debug("*********%s"%vmc.address)
                internal_ports.append(dnet.internal_port)
                external_ports.append(dnet.external_port)
            dnet_param['internal_ports'] = internal_ports
            dnet_param['external_ports'] = external_ports
            dnet_params.append(dnet_param)
        return dnet_params
        
    '''    
    def __generate_vcluster_dnet(self, vm, vswitch):
        doc = minidom.parseString(vm.settings).documentElement
        vm_ip = doc.getElementsByTagName("Address")[0].firstChild.data
        dnets = []
        dnet = Dnet()
        dnet.virtual_machine_instance_id = vm.id
        dnet.vswitch_id = vswitch.id
        dnet.internal_ip = vm_ip
        dnet.internal_port = 8080
        dnet.protocol = 'tcp'
        dnets.append(dnet)
        self.s.save(dnet)
        i = 0
        while i <= 10:
            dnet = Dnet()
            dnet.virtual_machine_instance_id = vm.id
            dnet.vswitch_id = vswitch.id
            dnet.internal_ip = vm_ip
            dnet.internal_port = 5900+i
            dnet.protocol = 'tcp'
            dnets.append(dnet)
            i = i + 1
            self.s.save(dnet)
        return dnets
    '''

    def __generate_clone_task_xml(self, vm, job):
    	snapshot = job.snapshot
    	vmi_dict = job.vmi_dict
    	doc = minidom.parseString(vm.settings).documentElement
    	doc.setAttribute('uuid', snapshot.uuid)
    	s_vmi = self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.uuid == vmi_dict[vm.uuid])[0]
    	vmc = self.s.query(VirtualMachineContainer).get(s_vmi.virtual_machine_container_id)
    	disk_ref_xml = None
    	mem_ref_xml = None
    	vmtemp = self.s.query(VmTemp).get(vm.vm_temp_id)
    	if vmtemp.deploy_cowdir == None:
    	    disk_ref_xml = '<DiskRef>http://%s:%d/vmc/images/%s.img-%s</DiskRef>' % (vmc.address, vmc.port, s_vmi.uuid, snapshot.uuid)
    	    mem_ref_xml = '<MemRef>http://%s:%d/vmc/images/%s-saved-%s</MemRef>' % (vmc.address, vmc.port, s_vmi.uuid, snapshot.uuid)
    	else:
    	    url = vmtemp.deploy_cowdir.split('/')[2]
    	    disk_ref_xml = '<DiskRef>http://%s:%d/vmc/nfsmount/%s/_var_lib_ivic_www_vstore_nfscow/%s.img-%s</DiskRef>' % (vmc.address, vmc.port, url, s_vmi.uuid, snapshot.uuid)
    	    mem_ref_xml = '<MemRef>http://%s:%d/vmc/nfsmount/%s/_var_lib_ivic_www_vstore_nfscow/%s-saved-%s</MemRef>' % (vmc.address, vmc.port, url, s_vmi.uuid, snapshot.uuid)
    	disk_element = minidom.parseString(disk_ref_xml).documentElement
    	mem_element = minidom.parseString(mem_ref_xml).documentElement
    	doc.appendChild(disk_element)
    	doc.appendChild(mem_element)
    	return doc.toxml()
    
    def __generate_vm_task_content(self, vm, job):
        if not vm.virtual_machine_container_id:
            vmc = self.__find_vmc(job)
            logger.debug('allocate vmc %s (id:%d) to vm %d', vmc.address, vmc.id, vm.id)
            vm.virtual_machine_container_id = vmc.id 
        else:
            vmc = self.s.query(VirtualMachineContainer).get(vm.virtual_machine_container_id)
        content = {}
        content['url'] = get_vmc_url(vmc.address, vmc.port)
        content['op_job'] = job.optype
        content['op_name'] = job.optype + 'VM'
        mapper = type(job.obj)
        keys = {
            VirtualClusterInstance:'vcluster',
            VlabInstance:'vlab',
            VirtualMachineInstance:'vmi',
        }
        if keys[mapper] == 'vcluster':
            vcluster = job.obj
            vcluster_temp = self.s.query(VclusterTemp).get(vcluster.vcluster_temp_id)  
        if job.optype == 'deploy':
            content['op_args'] = [vm.uuid, vm.settings]
            if keys[mapper] == 'vcluster': 
                if vcluster_temp.capabilities == 'webfarm' and vm.hostname == 'mu':
                    webfarm = self.s.query(WebFarm).filter(WebFarm.virtual_cluster_instance_id == vcluster.id)[0]
                    content['op_args'].append(webfarm.current_node)
        elif job.optype == 'clone':
	        xml = self.__generate_clone_task_xml(vm, job)
	        content['op_args'] = [vm.uuid, xml]
        elif job.optype == 'migrate':
            vmcs = self.__get_vmi_migrate_vmcs(job)
            content['op_args'] = [vm.uuid, vmcs['dst_vmc'].address]
        elif job.optype in ['snapshot', 'rollback']:
            content['op_args'] = [vm.uuid, job.snapshot_uuid]
        elif job.optype == 'stop':
            stop_mode = 'destroy'
            if keys[mapper] == 'vcluster':
                if vcluster_temp.capabilities == 'vsaas':
                    vm_temp = self.s.query(VmTemp).get(vm.vm_temp_id)
                    if vm_temp.os_type == 'windows-xp':
                        stop_mode = 'shutdown'
            content['op_args'] = [vm.uuid, stop_mode]
        else:
            content['op_args'] = [vm.uuid] 

        content['op_obj'] = InstanceGetter.to_key(vm)
        return repr(content)

    def __get_vlab_related_vmcids(self, vswitch):
        vmi_set = set()
        for nic in self.s.query(Nic).filter(Nic.vswitch_id == vswitch.id):
            vmi_set.add(nic.virtual_machine_instance_id)
        vmc_set = set()
        for vmi_id in vmi_set:
            vmi = self.s.query(VirtualMachineInstance).get(vmi_id)
            for vmc in self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.id == vmi.virtual_machine_container_id):
                vmc_set.add(vmc.id)
        return vmc_set
    
    def __get_vcluster_related_vmcids(self, job, vswitch):
        vmi_set = set()
        for vmi in self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vswitch.virtual_cluster_instance_id):
            vmi_set.add(vmi.id)
        vmc_set = set()
        for vmi_id in vmi_set:
            vmi = self.s.query(VirtualMachineInstance).get(vmi_id)
            for vmc in self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.id == vmi.virtual_machine_container_id):
                vmc_set.add(vmc.id)
        if job.optype == 'scale_more':
            oldvmi_set = set()
            for oldvmi in self.s.query(VirtualMachineInstance).filter(and_(VirtualMachineInstance.virtual_cluster_instance_id == vswitch.virtual_cluster_instance_id, VirtualMachineInstance.node_index <= job.obj.worknode_count)):
                oldvmi_set.add(oldvmi.id)
            for oldvmi_id in oldvmi_set:
                oldvmi = self.s.query(VirtualMachineInstance).get(oldvmi_id)
                for oldvmc in self.s.query(VirtualMachineContainer).filter(VirtualMachineContainer.id == oldvmi.virtual_machine_container_id):
                    if oldvmc.id in vmc_set:
                        vmc_set.remove(oldvmc.id)
        elif job.optype == 'scale_less':
            temp_set = set()
            temp_set = vmc_set.copy()
            for vmcid in vmc_set:
                the_vmis = self.s.query(VirtualMachineInstance).filter(and_(and_(VirtualMachineInstance.virtual_machine_container_id == vmcid, VirtualMachineInstance.virtual_cluster_instance_id == vswitch.virtual_cluster_instance_id), VirtualMachineInstance.node_index <= job.count))
                for the_vmi in the_vmis:
                    if the_vmi.virtual_machine_container_id == vmcid:
                        temp_set.remove(vmcid)
                        break
            vmc_set = temp_set.copy()
        return vmc_set

    def __generate_nic_task_content(self, vswitch, vmc, vsr, job):
        content = {}
        content['url'] = get_vmc_url(vmc.address, vmc.port)
        content['op_job'] = job.optype

        OpNameMapping = {
            'deploy'  : 'deployVSwitch',
            'import'  : 'deployVSwitch',
	    'clone' : 'deployVSwitch',
            'scale_more': 'confDnat',
        }

        content['op_name'] = OpNameMapping[job.optype]
        content['op_args'] = [vswitch.uuid, '%s:%d' % (vsr.address, vsr.port), vswitch.connect_type]

        if job.optype == 'scale_more':
            content['op_args'] = [vswitch.uuid]
            dnet_params = self.__generate_dnet_params(vmc, vswitch)
            content['op_args'].append(dnet_params)

        if job.optype in ['deploy', 'clone']:
            if vswitch.internet_access and vswitch.gateway_virtual_machine_container_id == None:
	        if vswitch.vlab_instance_id == None:
	            vmis = self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_machine_container_id == vmc.id)
	            for vmi in vmis:
	                if vmi.node_index == 0 and vmi.virtual_cluster_instance_id == vswitch.virtual_cluster_instance_id:
	                    content['op_args'].append(vswitch.ip)
	                    content['op_args'].append(vswitch.netmask)
	                    vswitch.gateway_virtual_machine_container_id = vmc.id
			    dnet_params = self.__generate_dnet_params(vmc, vswitch)
	                    content['op_args'].append(dnet_params)
	                    break
	        else:
	            content['op_args'].append(vswitch.ip)
	            content['op_args'].append(vswitch.netmask)
	            vswitch.gateway_virtual_machine_container_id = vmc.id
		    dnet_params = self.__generate_dnet_params(vmc, vswitch)
	            content['op_args'].append(dnet_params)
 
        content['op_obj'] = None
        return repr(content)

    def __generate_vswitch_task_content(self, vswitch, job):
        if not vswitch.virtual_machine_container_id:
            vsr = self.__find_vsr()
            logger.debug('allocate vsr %s (id:%d) to vswitch %d', vsr.address, vsr.id, vswitch.id)
            vswitch.virtual_machine_container_id = vsr.id
        else:
            vsr = self.s.query(VirtualMachineContainer).get(vswitch.virtual_machine_container_id)
        OpNameMapping = {
            'deploy'  : 'registerVSwitch',
            'import'  : 'registerVSwitch',
            'start'   : 'startVSwitch',
            'stop'    : 'stopVSwitch',
            'undeploy': 'deregisterVSwitch',
            'hibernate': 'stopVSwitch',
            'resume': 'startVSwitch',
            #'snapshot': 'startVSwitch',
            'rollback': 'startVSwitch',
	    'clone' : 'registerVSwitch',
        }
        content = {}
        content['url'] = get_vsr_url(vsr.address, vsr.port)
        content['op_job'] = job.optype
        content['op_name'] = OpNameMapping[job.optype]
        if job.optype in ['deploy', 'clone']:
            content['op_args'] = [vswitch.uuid, vswitch.connect_type]
            if vswitch.connect_type == 'openvpn':
                usr = self.s.query(User).get(job.user_id)
                content['op_args'].append(usr.email) 
            content['op_args'].append(vswitch.ip)
            content['op_args'].append(vswitch.netmask)
        else:
            content['op_args'] = [vswitch.uuid] 
        content['op_obj'] = InstanceGetter.to_key(vswitch)

        return repr(content)

    def __generate_vmc_vswitch_task_content(self, vswitch, vmc, job):
        content = {}
        content['url'] = get_vmc_url(vmc.address, vmc.port)
        content['op_job'] = job.optype
        content['op_name'] = job.optype + 'VSwitch'
        content['op_args'] = [vswitch.uuid]
        content['op_obj'] = InstanceGetter.to_key(vswitch)
        return repr(content)

    def __new_task(self, job):
        task = Task()
        task.uuid = GenUUID()
        task.job_id = job.id
        task.task_type = 'soapinvoke'
        task.status = 'pending'
        task.task_info = ''
        self.s.save(task)
        logger.debug('saving %s', task)
        return task

    def __vcluster_taskids(self, job):
        optype = job.optype
        tasks = []
        last_task_id = -1
        if job.optype in ['scale_more', 'scale_less']:
            job.scale_vms = None

        for vm in self.__get_vcluster_vm(job):
            task = self.__new_task(job)
            task.description = task.title
            if optype == 'scale_more':
                job.optype = 'deploy'
            elif optype == 'scale_less':
                job.optype = 'stop'
            task.content = self.__generate_vm_task_content(vm, job)
            task.title = '%s vmi %d on vmc %d' % (job.optype, vm.id, vm.virtual_machine_container_id)
            '''
            if job.optype == 'deploy':
                vswitch = self.__get_vcluster_vswitch(job.obj)[0]
                dnets = self.__generate_vcluster_dnet(vm, vswitch)
            '''
            job.optype = optype
            self.s.flush() # generate id
            last_task_id = task.id
            tasks.append(task.id)

            if optype == 'scale_more' or optype == 'scale_less':
                task1 = self.__new_task(job)
                task1.description = task1.title
                if optype == 'scale_more':
                    job.optype = 'start'
                else:
                    job.optype = 'undeploy'
                task1.content = self.__generate_vm_task_content(vm, job)
                task1.title = '%s vmi %d on vmc %d' % (job.optype, vm.id, vm.virtual_machine_container_id)
                job.optype = optype
                task1.depend_task_id = task.id
                task1.status = 'waiting'
                self.s.flush() # generate id

                logger.debug('Task has dependency: id%d, type:%s, depend_task_id:%d', task1.id, task1.task_type, task1.status, task1.depend_task_id)
                
        for vswitch in self.__get_vcluster_vswitch(job.obj):

            if optype == 'stop' or optype == 'scale_less':
                for the_vmcid in self.__get_vcluster_related_vmcids(job, vswitch):
                    the_vmc = self.s.query(VirtualMachineContainer).get(the_vmcid)
                    t1 = self.__new_task(job)

                    t1.description = t1.title
                    job.optype = 'stop'
                    t1.content = self.__generate_vmc_vswitch_task_content(vswitch, the_vmc, job)
                    job.optype = optype
                    t1.title = '%s vswitch %d on vmc %d' % (job.optype, vswitch.id, the_vmcid)
                    t1.depend_task_id = last_task_id
                    t1.status = 'waiting'

                    self.s.flush()
                    logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t1.id, t1.task_type, t1.status, t1.depend_task_id)

            if not optype in ['scale_less', 'scale_more', 'snapshot']:
                task = self.__new_task(job)
                task.description = task.title
                task.content = self.__generate_vswitch_task_content(vswitch, job)
                task.title = '%s vswitch-server %d on vsr %d' % (job.optype, vswitch.id, vswitch.virtual_machine_container_id)
                self.s.flush() # generate id
                tasks.append(task.id)

            if not optype in ['deploy', 'scale_more', 'import', 'clone']:
                continue
            
            if vswitch.internet_access and optype == 'scale_more':
                vmc = self.s.query(VirtualMachineContainer).get(vswitch.gateway_virtual_machine_container_id)
                vsr = self.s.query(VirtualMachineContainer).get(vswitch.virtual_machine_container_id)
                t3 = self.__new_task(job)
                job.optype = 'scale_more'
                t3.title = 'add dnat to vswitch-client %d on vmc %d' % (vswitch.id, vmc.id)
                t3.description = t3.title
                t3.content = self.__generate_nic_task_content(vswitch, vmc, vsr, job)
                job.optype = optype
                self.s.flush()
                tasks.append(t3.id)

            for vmcid in self.__get_vcluster_related_vmcids(job, vswitch):
                vmc = self.s.query(VirtualMachineContainer).get(vmcid)
                vsr = self.s.query(VirtualMachineContainer).get(vswitch.virtual_machine_container_id)

                t2 = self.__new_task(job)
                job.optype = 'deploy'
                t2.title = '%s vswitch-client %d on vmc %d' % (job.optype, vswitch.id, vmc.id)
                t2.description = t2.title
                if optype == 'scale_more':
                    t2.content = self.__generate_nic_task_content(vswitch, vmc, vsr, job)
                    job.optype = optype
                    self.s.flush()
                    tasks.append(t2.id)
                else:
                    t2.depend_task_id = task.id
                    t2.content = self.__generate_nic_task_content(vswitch, vmc, vsr, job)
                    job.optype = optype
                    t2.status = 'waiting' #introduce new status for task that depending on other task
                    self.s.flush()
                    logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t2.id, t2.task_type, t2.status, t2.depend_task_id)
                    #should not do tasks.append(t2.id)
	        
		if optype == 'clone':
		    t3 = self.__new_task(job)
	            t3.title = 'start vswitch_client %d on vmc %d' % (vswitch.id, vmc.id)
	            t3.description = t3.title
	            job.optype = 'start'
	            t3.content = self.__generate_vmc_vswitch_task_content(vswitch, vmc, job)
	            job.optype = optype
	            t3.status = 'waiting'
	            t3.depend_task_id = t2.id
	            self.s.flush()
	            logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t3.id, t3.task_type, t3.status, t3.depend_task_id)
        job.optype = optype
        ##add by sishouyu
        vcluster_changes = []
        vcluster = job.obj
        if optype == 'deploy':
            vcluster_changes.append('deploy')
            vcluster_changes.append(vcluster.uuid)
        elif optype == 'undeploy':
            vcluster_changes.append('undeploy')
            vcluster_changes.append(vcluster.uuid)
        
        #added by yangry 2011/7/7    
        elif optype =='scale_more':
            vcluster_changes.append('scale_more')
            vcluster_changes.append(vcluster.uuid)
        elif optype =='scale_less':
            vcluster_changes.append('scale_less')
            vcluster_changes.append(vcluster.uuid)
            
        curl_changes = VclusterMonitor(self.s, vcluster_changes)
        
        if curl_changes.send_url != '':
            if curl_changes.uuid != '':
                curl_changes.start()   
                
        return tasks
                
    
    def __vlab_taskids(self, job):
        tasks = []
	last_task_id = None
        for vm in self.__get_vlab_vm(job.obj):
            task = self.__new_task(job)
            task.title = '%s vmi %d' % (job.optype, vm.id)
            task.description = task.title
            task.content = self.__generate_vm_task_content(vm, job)
            self.s.flush() # generate id
            tasks.append(task.id)
	    last_task_id = task.id

        if job.optype == 'snapshot':
            return tasks

        for vswitch in self.__get_vlab_vswitch(job.obj):

            if job.optype == 'stop':
	        for the_vmcid in self.__get_vlab_related_vmcids(vswitch):
		    the_vmc = self.s.query(VirtualMachineContainer).get(the_vmcid)
		    t1 = self.__new_task(job)
		    t1.description = t1.title
		    t1.content = self.__generate_vmc_vswitch_task_content(vswitch, the_vmc, job)
		    t1.title = '%s vswitch %d on vmc %d' % (job.optype, vswitch.id, the_vmcid)
		    t1.depend_task_id = last_task_id
		    t1.status = 'waiting'
		    self.s.flush()
		    logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t1.id, t1.task_type, t1.status, t1.depend_task_id)

            task = self.__new_task(job)
            task.title = '%s vswitch %d' % (job.optype, vswitch.id)
            task.description = task.title
            task.content = self.__generate_vswitch_task_content(vswitch, job)
            self.s.flush() # generate id
            tasks.append(task.id)

            if not job.optype in ['deploy', 'import', 'clone']:
                continue
    
            for vmcid in self.__get_vlab_related_vmcids(vswitch):
                vmc = self.s.query(VirtualMachineContainer).get(vmcid)
                vsr = self.s.query(VirtualMachineContainer).get(vswitch.virtual_machine_container_id)

                t2 = self.__new_task(job)
                t2.title = '%s vswitch %d on %s' % (job.optype, vswitch.id, vmc.address)
                t2.description = t2.title
                t2.depend_task_id = task.id
                t2.content = self.__generate_nic_task_content(vswitch, vmc, vsr, job)
                t2.status = 'waiting' #introduce new status for task that depending on other task

                self.s.flush()
                logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t2.id, t2.task_type, t2.status, t2.depend_task_id)
                #should not do tasks.append(t2.id)

                if job.optype == 'clone':
                    t3 = self.__new_task(job)
                    t3.title = 'start vswitch_client %d on vmc %d' % (vswitch.id, vmc.id)
                    t3.description = t3.title
                    job.optype = 'start'
                    t3.content = self.__generate_vmc_vswitch_task_content(vswitch, vmc, job)
                    job.optype = 'clone'
                    t3.status = 'waiting'
                    t3.depend_task_id = t2.id
                    self.s.flush()
                    logger.debug('Task has dependency: id:%d, type:%s, status:%s, depend_task_id:%d', t3.id, t3.task_type, t3.status, t3.depend_task_id)
        return tasks

    def __vmi_taskids(self, job):
        if job.optype == 'migrate':
            return self.__vmi_migrate_taskids(job)
        else:
            tasks = []
            task = self.__new_task(job)
            task.description = '%s on vmi %d' % (job.optype, job.obj.id)
            task.title = task.description
            content = {}
            vmc = self.s.query(VirtualMachineContainer).get(job.obj.virtual_machine_container_id)
            content['url'] = get_vmc_url(vmc.address, vmc.port)
            content['op_job'] = job.optype
            content['op_name'] = job.optype + 'VM'
            if job.optype == 'startpcap' or job.optype == 'stoppcap':
                nics = job.obj.capture_nics.strip()
                nics = nics[1:-1]
                content['op_args'] = [job.obj.uuid, string.split(nics, ',')]
                if job.optype == 'startpcap':
                    content['op_args'].append(job.obj.capture_expr) 
            else:
                content['op_args'] = [job.obj.uuid]
            content['op_obj'] = InstanceGetter.to_key(job.obj)
            task.content = repr(content)
            self.s.flush()
            tasks.append(task.id)
            return tasks 
    

    def __get_vmi_migrate_vmcs(self, job):
        vmcs = {}
        vmcs['src_vmc'] = self.s.query(VirtualMachineContainer).get(job.obj.virtual_machine_container_id)
        vmcs['dst_vmc'] = self.s.query(VirtualMachineContainer).get(job.obj.target_vmc_id)
        return vmcs

    def __vmi_migrate_taskids(self, job):
        tasks = []
        vmcs = self.__get_vmi_migrate_vmcs(job)
        dst_vmc = vmcs['dst_vmc']
        
        task_deployVMInfo = self.__new_task(job)
        task_deployVMInfo.title = 'deploy info of vmi %d' % (job.obj.id)
        task_deployVMInfo.description = task_deployVMInfo.title
        content = {}
        content['url'] = get_vmc_url(dst_vmc.address, dst_vmc.port)
        content['op_job'] = 'deploy'
        content['op_name'] = 'deployVMInfo'
        content['op_args'] = [job.obj.uuid, job.obj.settings]
        content['op_obj'] = InstanceGetter.to_key(job.obj)
        task_deployVMInfo.content = repr(content)
        self.s.flush()
        last_task_id = task_deployVMInfo.id
        tasks.append(task_deployVMInfo.id)

        vswitches = self.__get_vmi_vswitch(job.obj)
        vmc = vmcs['dst_vmc']
        for vswitch in vswitches:
            vsr = self.s.query(VirtualMachineContainer).get(vswitch.virtual_machine_container_id)
            job.optype = 'deploy'
            task_deployVSwitch = self.__new_task(job)
            task_deployVSwitch.title = '%s vswitch-client %d on vmc %d' % (job.optype, vswitch.id, vmc.id)
            task_deployVSwitch.description = task_deployVSwitch.title
            task_deployVSwitch.depend_task_id = last_task_id
            task_deployVSwitch.content = self.__generate_nic_task_content(vswitch, vmc, vsr, job)
            task_deployVSwitch.status = 'waiting'
            self.s.flush()
            last_task_id = task_deployVSwitch.id
            
            job.optype = 'start'
            task_startVSwitch = self.__new_task(job)
            task_startVSwitch.title = '%s vswitch-client %d on vmc %d' % (job.optype, vswitch.id, vmc.id)
            task_startVSwitch.description = task_startVSwitch.title
            task_startVSwitch.depend_task_id = last_task_id
            task_startVSwitch.content = self.__generate_vmc_vswitch_task_content(vswitch, vmcs['dst_vmc'], job)
            task_startVSwitch.status = 'waiting'
            self.s.flush()
            last_task_id = task_startVSwitch.id
        
        job.optype = 'migrate'
        task_migrateVM = self.__new_task(job)
        task_migrateVM.description = task_migrateVM.title
        task_migrateVM.depend_task_id = last_task_id
        task_migrateVM.content = self.__generate_vm_task_content(job.obj, job)
        task_migrateVM.title = job.title
        task_migrateVM.status = 'waiting'
        self.s.flush()

        return tasks
    
    '''
    def __vmi_pcap_taskids(self, job):
	tasks = []
	task = self.__new_task(job)
	task.description = '%s on vmi %d' % (job.optype, job.obj.id)
	task.title = task.description
	content = {}
	vmc = self.s.query(VirtualMachineContainer).get(job.obj.virtual_machine_container_id)
	content['url'] = get_vmc_url(vmc.address, vmc.port)
	content['op_job'] = job.optype
	content['op_name'] = job.optype + 'VM'
	nics = job.obj.capture_nics.strip()
	nics = nics[1:-1]
	content['op_args'] = [job.obj.uuid, string.split(nics, ',')]
	if job.optype == 'startpcap':
	    content['op_args'].append(job.obj.capture_expr)
	content['op_obj'] = InstanceGetter.to_key(job.obj)
	task.content = repr(content)
	self.s.flush()
	tasks.append(task.id)
	return tasks
    '''

    def __delete_vcluster(self, vcluster):
        for vm in self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id):
	    for dnet in self.s.query(Dnet).filter(Dnet.virtual_machine_instance_id == vm.id):
                self.s.delete(dnet)
            self.s.delete(vm)
        for vswitch in self.__get_vcluster_vswitch(vcluster):
            self.s.delete(vswitch)
	for snapshot in self.s.query(Snapshot).filter(Snapshot.virtual_cluster_instance_id == vcluster.id):
	    self.s.delete(snapshot)
        self.s.delete(vcluster)
        self.s.flush()

    def __delete_vlab(self, vlab):
        for vm in self.__get_vlab_vm(vlab):
	    for dnet in self.s.query(Dnet).filter(Dnet.virtual_machine_instance_id == vm.id):
	        self.s.delete(dnet)
            self.s.delete(vm)
        for vswitch in self.__get_vlab_vswitch(vlab):
	    nic = self.s.query(Nic).filter(Nic.vswitch_id == vswitch.id)[0]
	    self.s.delete(nic)
            self.s.delete(vswitch)
	for snapshot in self.s.query(Snapshot).filter(Snapshot.vlab_instance_id == vlab.id):
	    self.s.delete(snapshot)
        self.s.delete(vlab)
        self.s.flush()

    def delete_vmis(self, vcluster, count, session):
        self.s = session
        vcluster_temp = self.s.query(VclusterTemp).get(vcluster.vcluster_temp_id)
        if vcluster_temp.capabilities != 'webfarm':
            for vm in self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id):
                if vm.node_index > count:
                    for dnet in self.s.query(Dnet).filter(Dnet.virtual_machine_instance_id == vm.id):
                        self.s.delete(dnet)
                    self.s.delete(vm)
        else:
            webfarm = self.s.query(WebFarm).filter(WebFarm.virtual_cluster_instance_id == vcluster.id)[0]
            standby_vm_ids = webfarm.standby.split(' ')
            online_vm_ids = webfarm.online.split(' ')
            all_vm_ids = standby_vm_ids + online_vm_ids
            for vm in self.s.query(VirtualMachineInstance).filter(VirtualMachineInstance.virtual_cluster_instance_id == vcluster.id):
                if str(vm.id) in all_vm_ids:
                    continue
                if vm.hostname == 'mu':
                    continue
                for dnet in self.s.query(Dnet).filter(Dnet.virtual_machine_instance_id == vm.id):
                    self.s.delete(dnet)
                self.s.delete(vm)
        self.s.flush()
    
    def taskids(self, job, session):
        self.s = session
        mapper = type(job.obj)
        functions = {
            VirtualClusterInstance:self.__vcluster_taskids,
            VlabInstance:self.__vlab_taskids,
            VirtualMachineInstance:self.__vmi_taskids,
        }
        get_taskids = functions[mapper]
        ret = get_taskids(job)
        return ret

    def deleterecords(self, obj, session):
        self.s = session
        mapper = type(obj)
        functions = {
            VirtualClusterInstance:self.__delete_vcluster,
            VlabInstance:self.__delete_vlab,
        }
        delete_obj = functions[mapper]
        delete_obj(obj)

taskgen = TaskGenerator()

def jobExceptionHandler(fun):
    def __wrapper(*args, **kwargs):
        try:
            return fun(*args, **kwargs)
        except Exception, e:
            logger.debug('jobExceptionHandler caught exception %s: %s', type(e), e.message)
            logger.exception(e)
            logger.debug('jobExceptionHandler set job.status to `failed` and ignores the exception')
            job = args[1]
            if not job.job_info :
                job.job_info = ''
            job.job_info = job.job_info + 'job exception: ' + e.message + ';'
            job.status = 'failed'
            session = args[0].session
            session.flush()
    return __wrapper

class JobDispatcher(threading.Thread):
    '''JobDispatcher thread class'''
    
    def __init__(self, Session, thread_num = 1, debug_soap = False, task_timeout = 10, exit_when_no_job = False):
        threading.Thread.__init__(self)
        self.Session = Session
        self.task_pool = Queue.Queue(0)
        self.running = True
        self.runners = []
        self.exit_when_no_job = exit_when_no_job
        self.debug_soap = debug_soap

        self.task_timeout = task_timeout
        socket.setdefaulttimeout(60 * task_timeout)

        for id in range(thread_num):
            r = TaskRunner(id, self)
            r.start()
            self.runners.append(r)

    def stop(self):
        self.running = False
    
    __objid_fmt0 = re.compile('id:([0-9]+)')
    __objid_fmt1 = re.compile('newcount:([0-9]+)')
    __snapshot_fmt = re.compile('snapshot:(.*)')

    def __change_state(self, instance, optype):
        if (not instance.status in stateOps) or (not optype in stateOps[instance.status]):
            raise LookupError, 'vCluster/vLab/vmi not in expected state'

        instance.status = opToStates[optype]
        self.session.flush()
    
    def __check_job_obj(self, job):
        vtype, optype = job.job_type.split('_')
        if optype == 'scale':
            content0, content1 = job.content.split(',')
            try:
                mat0 = re.match(self.__objid_fmt0, content0)
                mat1 = re.match(self.__objid_fmt1, content1)
                objid = int(mat0.groups()[0])
                count = int(mat1.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'job.content fomat is wrong'
        elif optype in ['snapshot', 'rollback', 'clone']:
            tokens = job.content.split(',')
            try:
                mat0 = re.match(self.__objid_fmt0, tokens[0])
                mat1 = re.match(self.__snapshot_fmt, tokens[1])
                objid = int(mat0.groups()[0])
                job.snapshot_uuid = mat1.groups()[0]
            except:
                raise LookupError, 'job.content fomat is wrong'
        else:
            try:
                mat = re.match(self.__objid_fmt0, job.content)
                objid = int(mat.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'object id not found in job.content'

        mapper = jobKey2Mapper[vtype]
        obj = self.session.query(mapper).get(objid)
        if not obj:
            raise LookupError, 'referenced object (id:%d) not found in database' % objid

        if optype == 'scale':
            if count > obj.worknode_count:
                optype = 'scale_more'
            else:
                optype = 'scale_less'

        if optype not in jobOps:
            raise LookupError, 'job_type not supported: %s' % optype

        job.obj = obj
        job.optype = optype
        if optype == 'scale_more' or optype == 'scale_less':
            job.count = count
	if job.optype == 'clone':
	    snapshot = self.session.query(Snapshot).filter(Snapshot.uuid == job.snapshot_uuid)[0]
	    job.obj,job.vmi_dict = clone_instance(job.obj, self.session, job.user_id, job.id)
	    job.content = 'id:%d,snapshot:%s' % (job.obj.id, snapshot.uuid)
	    job.snapshot = snapshot
	    self.session.flush()
        self.__change_state(job.obj, job.optype)

    def __get_pending_jobs(self):
        return self.session.query(Job).filter(Job.status == 'pending')

    def __is_no_scheduling_jobs(self):
        return self.session.query(Job).filter(Job.status == 'scheduling').count() == 0
    
    def __is_finished_job(self, job):
        condition = and_(Task.job_id == job.id, or_(Task.status == 'scheduling', Task.status == 'pending', Task.status == 'waiting'))
        return self.session.query(Task).filter(condition).count() == 0
    
    def __get_finished_jobs(self):
        scheduling_jobs = self.session.query(Job).filter(Job.status == 'scheduling')
        return filter(self.__is_finished_job, scheduling_jobs) 

    def __is_depending_task_finished(self, task):
        depending_task = self.session.query(Task).get(task.depend_task_id)
        return depending_task and depending_task.status in ['failed', 'finished']

    def __get_waiting_tasks(self):
         depending_tasks = self.session.query(Task).filter(Task.status == 'waiting').filter(Task.depend_task_id > 0)
         return filter(self.__is_depending_task_finished, depending_tasks)
    
    def __is_job_success(self, job):
        return self.session.query(Task).filter(Task.status == 'failed').filter(Task.job_id == job.id).count() == 0
    
    def set_scheduling(self, job):
        job.status = 'scheduling'
        self.session.flush()

    def __get_job_obj(self, job):
        vtype, optype = job.job_type.split('_')
        if optype == 'scale':
            content0, content1 = job.content.split(',')
            try:
                mat0 = re.match(self.__objid_fmt0, content0)
                mat1 = re.match(self.__objid_fmt1, content1)
                objid = int(mat0.groups()[0])
                count = int(mat1.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'job.content fomat is wrong'
        elif optype in ['snapshot', 'rollback', 'clone']:
            tokens = job.content.split(',')
	    try:
	        mat0 = re.match(self.__objid_fmt0, tokens[0])
		mat1 = re.match(self.__snapshot_fmt, tokens[1])
		objid = int(mat0.groups()[0])
		job.snapshot_uuid = mat1.groups()[0]
	    except:
		raise LookupError, 'job.content fomat is wrong'
        else:
            try:
                mat = re.match(self.__objid_fmt0, job.content)
                objid = int(mat.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'object id not found in job.content'
        mapper = jobKey2Mapper[vtype]
        obj = self.session.query(mapper).get(objid)
        return obj

    def set_failed(self, job):
        if not job.job_info:
            job.job_info = ''
        for task in self.session.query(Task).filter(Task.job_id == job.id):
            job.job_info = job.job_info + 'task ' + str(task.id) + ': ' + task.task_info + ';'
        job.status = 'failed'
        obj = self.__get_job_obj(job)
	obj.job_id = None
        obj.target_vmc_id = None
        obj.status = failedTransTable[obj.status]
        if job.job_type.split('_')[1] == 'scale':
            content0, content1 = job.content.split(',')
            try:
                mat1 = re.match(self.__objid_fmt1, content1)
                count = int(mat1.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'job.content fomat is wrong'
            if count < obj.worknode_count:
                taskgen.delete_vmis(obj, count, self.session)
            obj.worknode_count = count
        self.session.flush()

    def set_finished(self, job):
        job.status = 'finished'
        obj = self.__get_job_obj(job)
	obj.job_id = None
        obj.status = finishedTransTable[obj.status]
	if job.job_type.split('_')[1] == 'migrate':
	    obj.virtual_machine_container_id = obj.target_vmc_id
	    obj.target_vmc_id = None
        if job.job_type.split('_')[1] == 'undeploy':
            taskgen.deleterecords(obj, self.session)
        if job.job_type.split('_')[1] == 'scale':
            content0, content1 = job.content.split(',')
            try:
                mat1 = re.match(self.__objid_fmt1, content1)
                count = int(mat1.groups()[0])
            except (AttributeError, TypeError, ValueError):
                raise LookupError, 'job.content fomat is wrong'
            if count < obj.worknode_count:
                taskgen.delete_vmis(obj, count, self.session)
            obj.worknode_count = count
        self.session.flush()

    @jobExceptionHandler
    def scheduleJob(self, job):
        self.__check_job_obj(job)
        ids = taskgen.taskids(job, self.session)
        self.set_scheduling(job)

        for id in ids:
            self.task_pool.put(id)
        
    @jobExceptionHandler
    def finishJob(self, job):
        if self.__is_job_success(job):
            self.set_finished(job)
        else:
            self.set_failed(job)

    def set_task_timeout(self, task):
        task.status = 'failed'
        task.task_info = 'pending or scheduling time out'
        try:
            content = eval(task.content)
            task.obj = InstanceGetter.by_key(content['op_obj'], self.session)
            task.obj.status = 'error'
        except Exception, e:
            task.task_info += ';' + str(e)
            pass

    def do_loop(self):
        while self.running:

            pending_and_scheduling_tasks = self.session.query(Task).filter(and_(Task.status != 'finished', Task.status != 'failed')).all()
            for task in pending_and_scheduling_tasks:
                last_updated_time = task.updated_at
                pending_or_scheduling_time = time.mktime(datetime.utcnow().timetuple()) - time.mktime(last_updated_time.timetuple())
                if pending_or_scheduling_time >= self.task_timeout * 60:
                    logger.debug('found timeout Task :id:%d, status:%s', task.id, task.status)
                    self.set_task_timeout(task)
            
            pending_jobs = self.__get_pending_jobs() 
            for job in pending_jobs:
                logger.debug('found pending Job: id:%d, type:%s, status:%s', job.id, job.job_type, job.status)
                self.scheduleJob(job)
            
            finished_jobs = self.__get_finished_jobs()
            for job in finished_jobs:
                logger.debug('found finished Job: id:%d, type:%s, status:%s', job.id, job.job_type, job.status)
                self.finishJob(job)

            waiting_tasks = self.__get_waiting_tasks()
            for task in waiting_tasks:
                logger.debug('found continuable pending Task: id:%d, type:%s, status:%s', task.id, task.task_type, task.status)
                task.status = 'pending'
                self.session.flush()
                self.task_pool.put(task.id)
            self.session.flush()

            if self.exit_when_no_job and self.__is_no_scheduling_jobs():
                break

            self.session.close()
            time.sleep(1)

    def run(self):
        logger.info('jobdispatcher started')
        self.session = self.Session()
        
        try:
            self.do_loop()
        except Exception, e:
            logger.exception(e)
        
        self.running = False
        for r in self.runners:
            r.join()
            
        logger.info('jobdispatcher stopped')


##add by sishouyu
class VclusterMonitor(threading.Thread):
    def __init__(self, Session, vcluster_changes):
        threading.Thread.__init__(self)
        self.Session = Session
        self.send_url = self.get_send_url()
        if len(vcluster_changes) > 0 :
           self.op = vcluster_changes[0]
           self.uuid = vcluster_changes[1]
        else: self.uuid = ''

    def get_certs(self):
        config_monitor_path = project_path("/etc/ivic/monitor.yaml")
        certs = []
        try:
            config_monitor = Config.load(config_monitor_path)
            client_key = config_monitor['client_key']
            server_cert = config_monitor['server_cert']
            send_url = config_monitor['monitor_send_url']
            
            certs.append(client_key)
            certs.append(server_cert)
            certs.append(send_url)
            
        except Exception, e:
            logger.exception(e)
            
        return certs
        
    def get_send_url(self):
        vsaas_monitor_path = project_path("/etc/ivic/vsched.yaml")
        send_url = ''
        try:
            vsaas_monitor = Config.load(vsaas_monitor_path)
            send_url = vsaas_monitor['vsaas_send_url']
            
        except Exception, e:
            logger.exception(e)
            send_url = ''
            
        return send_url
        
    def curl_res(self):
        if(self.uuid != ''):
            
            message = "<events><event><type>vcluster_change</type><op>"+ self.op + "</op><uuid>" + self.uuid + "</uuid></event></events>"
            send_url = self.send_url

            cmd = "curl -X POST -d \"%s\" -H \"Content-Type: text/xml\" %s" % (message, send_url)
            os.system(cmd)
            logger.info(cmd)
            #print cmd
            
    def run(self):
        logger.info('curl vcluster_changes')
        self.session = self.Session

        try:
            #certs_for_curl = self.get_certs()
            self.curl_res()
        except Exception, e:
            logger.exception(e)
            
