#!coding:utf-8
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
    @author jay.han
"""
import os
import Image
import uuid
import logging
import errno
import libvirt

from eventlet import tpool

from cvirt.cnode.connection import get_connection
from cvirt.cnode.libvirtXmlObject import *
from cvirt.common.constant import AGENT_INSTANCE_PATH
from cvirt.common import exception,utils,config
from cvirt.cnode.vm import vm_utils
from cvirt.cnode.vm import power_state
from cvirt.common import constant
from cvirt.manager.models import FixedIp
from cvirt.cnode.network import bridge

LOG = logging.getLogger(__name__)

conf = config.load_settings()

class Driver(object):
    def __init__(self):

        self.instances_path = AGENT_INSTANCE_PATH
        # self._conn = conn
        self.enable_vepa = False
        self._ensure_instance_path()
        self._disk_cachemode = self._get_cachemode()

    @property
    def _conn(self):
        "libvirt连接不支持并发访问，每个请求单独建立连接"
        return get_connection()

    def _ensure_instance_path(self):
        vm_utils.ensure_fold(self.instances_path)

    def _get_cachemode(self):
        mode = 'none'
        # our ncloud doesn't support cache mode none
        if not self._supports_direct_io(self.instances_path):
            mode = "writethrough"
        return mode
    # 目前我们的云存储不支持o_direct
    def _supports_direct_io(self,dirpath):
        testfile = os.path.join(dirpath, ".tcloud.dio")
        hasDirectIO = True
        try:
            f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
            os.close(f)
            LOG.debug("Path '%(path)s' supports direct I/O" %
                      {'path': dirpath})
        except OSError, e:
            if e.errno == errno.EINVAL:
                LOG.debug(("Path '%(path)s' does not support direct I/O: "
                            "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
                hasDirectIO = False
            else:
                LOG.error(("Error on '%(path)s' while checking direct I/O: "
                            "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
                raise e
        except Exception, e:
            LOG.error(("Error on '%(path)s' while checking direct I/O: "
                        "'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
            raise e
        finally:
            try:
                os.unlink(testfile)
            except Exception:
                pass

        return hasDirectIO
    
    def list_instance_ids(self):
        if self._conn.numOfDomains() == 0:
            return []
        return self._conn.listDomainsID()

    def list_instances(self):
        names = []
        for domain_id in self.list_instance_ids():
            try:
                # domain_id should not be 0
                if domain_id != 0:
                    domain = self._conn.lookupByID(domain_id)
                    names.append(domain.name())
            except libvirt.libvirtError:
                # Instance was deleted while listing... ignore it
                pass
        return names

    def get_num_instances(self):
        """Efficient override of base instance_exists method."""
        return self._conn.numOfDomains()

    # def _connect(self):
    #     auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
    #             'root',
    #             None]
    #
    #     return libvirt.openAuth(self.libvirt_uri, auth, 0)

    def _lookup_by_name(self, instance_name):
        try:
            domain = self._conn.lookupByName(instance_name)
            # if domain is None:
            #     raise exception.NotFound("can not found vm %s" % (instance_name))
            return domain
        except libvirt.libvirtError as ex:
            raise exception.NotFound("can not found vm by name %s" % (instance_name))

    def _lookup_by_uuid(self, uuid):
        try:
            domain = self._conn.lookupByUUIDString(uuid)
            return domain
        except libvirt.libvirtError as ex:
            LOG.warn('can not found domain with uuid %s' % uuid)

    def fill_disk(self, disk):
        src_disk = GuestDisk()
        src_disk.driver_name = 'qemu'
        src_disk.source_device = disk['device_type']
        if disk.storage.protocol == 'iscsi':
            src_disk.source_type = 'block'
            # src_disk.source_path = disk['lun_path']
        # else:
        src_disk.source_path = disk['disk_path']

        if disk['format']:
            src_disk.driver_format = disk['format']
        if disk['cache']:
            src_disk.driver_cache = disk['cache']
        if disk['iomode']:
            src_disk.driver_io = disk['iomode']
        if disk['readonly']:
            src_disk.readonly = disk['readonly']
        src_disk.target_bus = disk['bus'] if disk['bus'] else "ide"
        src_disk.target_dev = disk['mountpoint']

        return src_disk

    def _prepare_xml_info(self,instance,snapshot=False,guest=None):

        """
        由于我们的libvirtobject和model之间没有绑定关系，这里我们每次都做整体更新
        @param instance:
        @param snapshot:
        @param guest:
        @return:
        """
        # if not guest:
        guest = Guest()
        #

        #TODO instance name应该不允许重复
        guest.name = instance.name
        guest.uuid = instance['uuid']

        guest.memory = int(instance['memory']) * 1024 #instance.memory is str,is that  possible ?
        guest.vcpus = instance.vcpu

        #######################cpu ############################
        if instance.cpu_host_model:
            guest.cpu = self.get_guest_cpu_config() #cpu-feature
            if 'amd' in instance.host.cpu_type.lower():
                nested = GuestCPUFeature()
                nested.name = 'svm'
                guest.cpu.features.append(nested)

        guest.acpi = instance.acpi
        guest.apic = instance.apic

        #smbios
        sysinfo = GuestSysinfo()
        sysinfo.system_uuid = instance['uuid']
        guest.sysinfo = sysinfo
        guest.os_smbios = GuestSMBIOS()


        #bootorder
        guest.bootorder = instance.bootorder.split(',')

        #bootmenu
        guest.bootmenu = instance.bootmenu

        #clock
        clk = GuestClock()
        clk.offset = instance.clock
        guest.set_clock(clk)

        ##############disk#####################################
        for disk in instance.vdisk_set.all():
            if disk.device_type == 'disk':
                src_disk = self.fill_disk(disk)

            elif disk.device_type == 'cdrom':
                LOG.debug('cdrom isopath %s' % disk.isopath)
                src_disk = GuestCdrom()
                src_disk.source_path = disk.isopath
                src_disk.readonly = True  #cdrom must be read only
                src_disk.target_dev = disk['mountpoint']

            guest.add_device(src_disk)

        #################input #################################
        tablet = GuestInput()
        mouse = GuestInputMouse()
        guest.add_device(tablet)
        guest.add_device(mouse)

        #################interface###############################
        for iface in instance.interface_set.filter(enable=True):
            interface = GuestInterface()
            interface.net_type= "network"
            interface.mac_addr =iface['mac']

            interface.model=iface['model']
            #interface一定会关联一个网络
            interface.source_network = iface.network.libvirt_name

            #------netfilter-----------
            interface.filtername = "nwfilter-%s-%s" % (instance.name,iface.name)

            guest.add_device(interface)

        #################Grapics#################################
        graphics = GuestGraphics()
        graphics.type = instance.graphics_type
        graphics.port = instance.graphics_port
        graphics.passwd = instance.remote_password
        if instance.graphics_type == 'spice':
            graphics.image_comp = instance.image_compress
            graphics.streaming = instance.streaming_mode
            graphics.jpeg = instance.jpeg_compress

            channel = Channel()
            channel.target_name = "com.redhat.spice.0"
            guest.add_device(channel)
            if instance.usbredir:
                self.add_spice_usbredir(guest)

            #开启多少个显示器，就等于添加多少张qxl显卡
            for _ in range(int(instance.monitor)):
                qxl = GraphicVideo()
                qxl.vram = str(instance.vram)
                guest.add_device(qxl)

        guest.add_device(graphics)

        #####
        ##  unix channel ,we can communicate with vm through this channel
        #####
        # channel_tcloud = UnixChannel()
        # channel_tcloud.source_path = Constant.getGuestAgentPath(instance['name'])
        # guest.add_device(channel_tcloud)

        #################Sound####################################
        if instance.soundcard:
            sound = GuestSound()
            sound.model = instance.soundcard
            guest.add_device(sound)
        #################USB######################################
        #TODO USB support

        #runonce或者开启还原模式
        if snapshot or instance.resetmode: guest.qemu_command = '-snapshot'

        #watchdog
        if instance.watchdog:
            watchdog = virtualWatchdog()
            watchdog.model = instance.watchdog
            watchdog.action = instance.dogaction
            guest.add_device(watchdog)

        # if "qemu_command" in instance:
        #     guest.qemu_command = instance['qemu_command']
        return guest.to_xml()


    def create_domain(self,xml,need_launch=False,autostart=False,uuid=None):
        #for domain name edit

        # if uuid:
        #     domain = self._lookup_by_uuid(uuid)
        #     if domain:
        #         LOG.debug("undefine exsists instance with uuid %s" % uuid)
        #         domain.undefine()

        # To create a persistent domain, first define it, then launch it.

        domain = self._conn.defineXML(xml)

        # libvirt服务执行优先级高于我们的存储和网络，所以autostart会失败
        # autostart机制放到init_instance中做
        # domain.setAutostart(autostart)
        if need_launch:
            LOG.debug("launch instance %s with uuid = %s" % (domain.name(),domain.UUIDString()))
            self._start_domain(domain)
        return domain

    def _start_domain(self,domain):
        domain.create()
        if self.enable_vepa:
            self._hairpin_mode(domain)

    def _undefine_domain(self, domain,name):

        if domain:
            try:
                """
                    增加 undefine mananged save 步骤, 防止libvirt 在有image的时候删不掉
                """
                try:
                    domain.undefineFlags(
                            libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
                except libvirt.libvirtError as e:
                    errcode = e.get_error_code()
                    LOG.error("Error from libvirt during destory domain with managed save "
                              "removal. Code=%(errcode)s Error=%(e)s" % locals())
                    domain.undefine()
            except libvirt.libvirtError as e:
                errcode = e.get_error_code()
                LOG.error("Error from libvirt during saved instance "
                              "removal. Code=%(errcode)s Error=%(e)s" % locals())
                raise exception.InstanceDestroyFailure(name=name)


    def _hairpin_mode(self,domain):
        vnics = self.get_interfaces(domain)
        # hairpin_mode
        for vnic in vnics:
            utils.execute('echo','>','1',
                              '/sys/class/net/%s/brport/hairpin_mode' % vnic)

    def _cleanup(self, path):

        LOG.info('deleting vdisk files %s' % path)
        if os.path.exists(path):
            os.remove(path)

    def _wait_for_running(self,instance_name):


        """Called at an interval until the VM is running."""
        try:
            state = self.get_info(instance_name)['state']
        except exception.NotFound:
            LOG.error("During start, instance %s disappeared." % instance_name)
            raise utils.LoopingCallDone

        if state == power_state.RUNNING:
            LOG.info("Instance %s started successfully." % instance_name)
            raise utils.LoopingCallDone

    def get_info(self, instance_name):
        virt_dom = self._lookup_by_name(instance_name)
        if not virt_dom:
            raise exception.NotFound("can not found domain")
        (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
        return {'state': state,
                'max_mem': max_mem,
                'mem': mem,
                'num_cpu': num_cpu,
                'cpu_time': cpu_time}

    def get_num_instances(self):
        try:
            return self._conn.numOfDomains()
        except libvirt.libvirtError:
            return 0

    def instance_exists(self, instance_id):
        try:
            self._conn.lookupByName(instance_id)
            return True
        except libvirt.libvirtError:
            return False

    def get_cpu_info(self):
        xml = self._conn.getCapabilities()
        caps = Caps()
        caps.parse_xml(xml)
        return caps

    def get_guest_cpu_config(self):

        caps = self.get_cpu_info()
        hostcpu = caps.host.cpu
        guestcpu = GuestCPU()

        guestcpu.model = hostcpu.model
        guestcpu.vendor = hostcpu.vendor
        guestcpu.arch = hostcpu.arch

        guestcpu.match = "exact"

        for hostfeat in hostcpu.features:
            guestfeat = GuestCPUFeature()
            guestfeat.policy = "require"
            guestfeat.name = hostfeat.name
            guestcpu.features.append(guestfeat)

        return guestcpu

    def get_interfaces(self, domain):
        xml = domain.XMLDesc(0)
        interface_list = GuestInterfaceList()
        interface_list.parse_xml(xml)
        rt = []
        for interface in interface_list:
            rt.append(interface.target_dev)

        return rt

    def get_max_vcpu(self):
        return self._conn.getMaxVcpus("kvm")

    def create(self,instance,snapshot=False, needlaunch=False):
        try:
            xml = self._prepare_xml_info(instance,snapshot=snapshot)
            LOG.info("generate xml for instance %s: \n %s" %(instance.name,xml))
            self.create_domain(xml,need_launch = needlaunch)
            return xml
        except libvirt.libvirtError as e:
            errcode = e.get_error_code()
            LOG.exception("Error from libvirt during define of %s. "
                          "Code=%s Error=%s" % (instance['name'], errcode, e))
            raise



    def ha_start_instance_remote(self,instance,need_launch=False):
        with file(os.path.join(instance.instance_path,
                               instance.name,
                               'libvirt.xml')) as f:
            xml = f.read()
        try:
            self.create_domain(xml,need_launch=need_launch)
        except libvirt.libvirtError as e:
            #如果uuid已经注册过则pass
            if not 'already exists with uuid' in e:
                raise

    def start(self, instance_name):


        domain = self._lookup_by_name(instance_name)

        (state, _, _, _, _) = domain.info()
        if state == power_state.RUNNING:
            LOG.warn('Instance %s is already running' % instance_name)
            return True
        try:
            self._start_domain(domain)
            timer = utils.LoopingCall(self._wait_for_running,instance_name)
            timer.start(interval=0.5, now=True)
            return True
        except libvirt.libvirtError as e:
            errcode = e.get_error_code()
            LOG.exception("Error from libvirt during create of %s. Code=%s "
                          "Error=%s" % (instance_name, errcode, e))
            raise
        except Exception as e:
            LOG.exception('Start instance %s failed as exception %s' %
                          (instance_name, e))
            raise



    def soft_reboot(self,instance_name):

        dom = self._lookup_by_name(instance_name)

        (state, _,_,_,_) = dom.info()

        if state == power_state.RUNNING:
            dom.reboot(0) #hypervisor choice
        else:
            return self.start(instance_name)

        # for x in xrange(60):
        #     (state, _max_mem, _mem, _cpus, _t) = dom.info()
        #     if state in [power_state.RUNNING]:
        #         LOG.info("Instance soft-shutdown successfully.")
        #         return True
        #     greenthread.sleep(1)
        # return False



    def soft_shutdown(self, instance_name):

        dom = self._lookup_by_name(instance_name)

        (state, _max_mem, _mem, _cpus, _t) = dom.info()
        #soft shutdown
        if state == power_state.RUNNING:
            dom.shutdown()
            dom.setAutostart(False)

        # for x in xrange(60):
        #     (state, _max_mem, _mem, _cpus, _t) = dom.info()
        #
        #     if state in [power_state.SHUTOFF,power_state.SHUTDOWN,power_state.CRASHED]:
        #         LOG.info("Instance soft-shutdown successfully.")
        #         return True
        #     greenthread.sleep(1)
        # return False

    def destroy(self, instance, cleanup=True):

        try:
            domain = self._lookup_by_name(instance.name)
        except:
            domain = None
        if domain is not None:
            try:
                domain.destroy()
                domain.setAutostart(False)
            except libvirt.libvirtError as e:
                is_okay = False
                errcode = e.get_error_code()
                if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
                    # vm is shutoff, so we need to undefine it
                    (state, _max_mem, _mem, _cpus, _t) = domain.info()
                    if state == power_state.SHUTOFF:
                        is_okay = True
                if not is_okay:
                    LOG.exception("Error from libvirt during destroy of %s. "
                                "Code=%s Error=%s" % (instance.name, errcode, e))
                    raise e
            except Exception as e:
                LOG.exception("Exception while doing domain destroy for instance %s"
                              % instance.name)
                raise e

        timer = utils.LoopingCall(self._wait_for_destroy,instance.name)
        timer.start(interval=0.5)

        if cleanup:
            self.clean_domain_and_network(domain,instance)

        return True


    def _wait_for_destroy(self,instance_name):

        try:
            state = self.get_info(instance_name)['state']
        except exception.NotFound:
            LOG.warn("Instance %s disappear successfully." % instance_name)
            raise utils.LoopingCallDone()

        if state == power_state.SHUTOFF:
            LOG.info("Instance %s shutdown successfully." % instance_name)
            raise utils.LoopingCallDone()

    def clean_domain_and_network(self,domain,instance):


        LOG.info("Deleting instance %s" % instance.name)
        self._undefine_domain(domain,instance.name)
        instance_paths = [disk.disk_path
                          for disk in instance.vdisk_set.all() if disk.device_type!='cdrom' and
                        disk.storage.protocol != 'iscsi']

        LOG.debug("clear disk path %s " % instance_paths)
        for p in instance_paths:
            self._cleanup(p)

        for iface in instance.interface_set.filter(enable=True):
            nwdomain = self._look_up_nw_by_name("nwfilter-%s-%s" % (instance.name,iface.name))
            if nwdomain: nwdomain.undefine()

    def _look_up_nw_by_name(self,nw_name):
        try:
            domain = self._conn.nwfilterLookupByName(nw_name)
            return domain
        except libvirt.libvirtError as ex:
            LOG.warn("Error from libvirt while looking up %s: "
                                  "%s" % (nw_name, ex))

    def _look_up_network_by_name(self,name):
        try:
            domain = self._conn.networkLookupByName(name)
            return domain
        except libvirt.libvirtError as ex:
            LOG.warn("Error from libvirt while looking up %s: "
                                  "%s" % (name, ex))

    def pause(self, instance_name):
        """
        """

        domain = self._lookup_by_name(instance_name)
        (state, _, _, _, _) = domain.info()
        if state != power_state.RUNNING:
            raise exception.InstancePauseRequired(name=instance_name)
        try:
            domain.suspend()
            LOG.info("Completed paused instance %s " % instance_name)
            return True
        except libvirt.libvirtError as e:
            errcode = e.get_error_code()
            LOG.exception("Error from libvirt during suspend of %s. Code=%s "
                          "Error=%s" % (instance_name, errcode, e))

            raise exception.InstancePauseFailure(name=instance_name)




    def unpause(self, instance_name):
        """
        """

        domain = self._lookup_by_name(instance_name)

        (state, _, _, _, _) = domain.info()
        if state != power_state.PAUSED:
            if state == power_state.RUNNING:
                LOG.warn('instance is running,instance status should be paused')
                return
            raise exception.InstanceUnpauseRequired(name=instance_name)
        try:
            domain.resume()
            LOG.info("Completed resume instance %s " % instance_name)
            return True
        except libvirt.libvirtError as e:
            errcode = e.get_error_code()
            LOG.exception("Error from libvirt during resume of %s. Code=%s "
                          "Error=%s" % (instance_name, errcode, e))
            raise exception.InstanceUnpauseFailure(name=instance_name)



    def hibernate(self, instance_name):
        """
        """

        domain = self._lookup_by_name(instance_name)

        (state, _, _, _, _) = domain.info()
        if state != power_state.RUNNING:
            raise exception.InstanceHibernateRequired(name=instance_name)
        try:
            domain.managedSave(0)
            return True

        except libvirt.libvirtError as e:
            errcode = e.get_error_code()
            LOG.exception("Error from libvirt during resume of %s. Code=%s "
                          "Error=%s" % (instance_name, errcode, e))

            raise exception.InstanceHibernateFailure(name=instance_name)


    def update(self,instance,snapshot=False):

        # domain = self._lookup_by_name(instance.name)
        #
        # (state, _, _, _, _) = domain.info()

        # if state == power_state.RUNNING:
        #     raise exception.InstanceIsRunning()

        # guest = Guest(parse_xml=domain.XMLDesc(0))

        new_xml = self._prepare_xml_info(instance,snapshot=snapshot)
        LOG.debug("update instance for new xml : \n%s\n" % new_xml)
        # self._undefine_domain(domain,instance.name)

        try:

            # Define a domain, but does not start it.
            # This definition is persistent, until explicitly undefined with
            # virDomainUndefine(). A previous definition for this domain would be
            # overriden if it already exists.
            if instance.autostart: LOG.debug("set autostart of instance %s " % instance.name)
            self.create_domain(new_xml,autostart=instance.autostart,uuid=instance.uuid)
            LOG.debug("instance %s updated successful" % instance.name)
            return new_xml
        except:
            LOG.exception("error in register new domain")
            raise


    def _get_disk_xml(self,domain):
        xml_desc = domain.XMLDesc(0)
        disks = GuestDiskList()
        disks.parse_xml(xml_desc)
        return disks

    # def _get_disk_path(self,domain):
    #
    #     return self._get_disk_xml(domain).disks[0].source_path

    def _delete_snap(self, domain, instance_name, snap_name):
        snap = domain.snapshotLookupByName(snap_name, 0)
        snap.delete(libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY)
        LOG.debug('delete snapshot %s from instance %s' % (snap.getName(), instance_name))

    def snapshot_destory(self,instance_name):
        domain = self._lookup_by_name(instance_name)
        snaps = domain.snapshotListNames(0)
        for snap_name in snaps:
            self._delete_snap(domain, instance_name, snap_name)

    def snapshot_delete(self,instance_name,snapshot):
        snap_name = snapshot['uuid']
        domain = self._lookup_by_name(instance_name)
        snaps = domain.snapshotListNames(0)
        if snap_name in snaps:
            self._delete_snap(domain, instance_name, snap_name)

    def update_device(self, inst,xml,modify_live=True):
        domain = self._lookup_by_name(inst.name)
        flags = libvirt.VIR_DOMAIN_DEVICE_MODIFY_CONFIG|libvirt.VIR_DOMAIN_DEVICE_MODIFY_FORCE
        if domain.isActive() and modify_live:
            flags |= libvirt.VIR_DOMAIN_DEVICE_MODIFY_LIVE

        LOG.debug('update device with xml : \n%s\n' % xml)
        return domain.updateDeviceFlags(xml, flags)


    def snapshot_restore(self,instance,snapshot):
        "恢复快照默认关闭虚拟机"
        snap_name = snapshot['uuid']
        domain = self._lookup_by_name(instance.name)
        (state, _max_mem, _mem, _cpus, _t) = domain.info()

        if state != power_state.SHUTOFF:
            LOG.error("snapshot needs instance to be shutoff %s, so shutdown it" % snapshot.name)
            # raise exception.SnapshotRestoreInvalid(name=instance_name)
            self.destroy(instance,cleanup=False)

        snap = domain.snapshotLookupByName(snap_name,0)

        if not snap:
            LOG.error("missing snapshot %s" % snapshot.name)
            raise exception.SnapshotNotFound(snapshot_id=snapshot['name'])

        domain.revertToSnapshot(snap,0)


    def _verify_domain(self,instance,is_template=False):
        # check snapshot first
        # max_snap = db_api.snapshot_count(instance.name)

        # if max_snap > 4:
        #     raise exception.SnapshotTooMuch()

        domain = self._lookup_by_name(instance.name)
        (state, _max_mem, _mem, _cpus, _t) = domain.info()

        #BUG make template always need vm to be shutdown
        if is_template:
            if state == power_state.RUNNING :
                LOG.error("currently the snapshot needs vm not to be running")
                raise exception.InstanceIsRunning(name=instance.name)
            # new device can't save
#            if domain.hasCurrentSnapshot(0) and domain.snapshotNum(0) > 4:
#                raise exception.SnapshotTooMuch()
#         else:
#             if state != power_state.RUNNING:
#                 raise exception.InstanceNotRunning(name=instance.name)
        return domain,state

    def save_as_template(self,vdisk,image):
        instance = vdisk.instance
        is_qcow2 = (vdisk.format == constant.KVM_DRIVER_COW)
        #need shutdown the filesystem
        self._verify_domain(instance,is_template=True)

        disk_path = vdisk.disk_path
        LOG.info('saving %s as template' % disk_path)

        backup = vdisk.image is not None

        image_path = os.path.join(image.storage.mount_point,"images")
        if not os.path.exists(image_path): vm_utils.ensure_fold(image_path)

        image_file = os.path.join(image_path,image.name)
        if is_qcow2 and backup:
            snapshot_uuid = uuid.uuid4().hex

#            if state == power_state.RUNNING:
#                domain.managedSave(0)

            vm_utils.create_snapshot(disk_path, snapshot_uuid)

            # source_format = 'qcow2'
            # image_format = "qcow2"
            try:
                vm_utils.extract_snapshot(disk_path, 'qcow2',
                                               snapshot_uuid, image_file,
                                               'qcow2')
            finally:
                vm_utils.delete_snapshot(disk_path, snapshot_uuid)
                # if state != power_state.RUNNING:
                #     domain.create()

        else:
            vm_utils.copy_file(disk_path,image_file)

        return os.path.getsize(image_file),vm_utils.vdisk_info(image_file)['size']

    def snapshot_take(self,uuid, instance,snap_desc=None):
        """Create snapshot from a running VM instance.
         This command only works with qemu 0.14+
         # Make the snapshot
         #1 get all snapshot of this image
         #2 if exceeds the max snapshot number than break
         """
        domain,state = self._verify_domain(instance)

        # Find the disk
        #disk_path = self._get_disk_path(domain)

        xml = """<domainsnapshot>
                    <name>%s</name>
                    <description>it's %s snapshot,%s</description>
                </domainsnapshot>""" % (uuid,instance.name,snap_desc)
        if state == 'running':
            LOG.debug("snapshot the memory and disk")
        else:
            LOG.debug('save disk checkpoint for snapshot')
        # domain.managedSave(0)
        tpool.execute(domain.snapshotCreateXML,xml,0)


    def _get_volume_device_info(self, device_path):
        if device_path.startswith('/dev/'):
            return 'block', None, None
        elif ':' in device_path:
            (protocol, name) = device_path.split(':')
            return 'network', protocol, name
        else:
            raise exception.InvalidDevicePath(path=device_path)




    def attach_volume(self, instance_name, device_path, mountpoint):
        """
         attach 有2种 网络 和 本地
        """
        virt_dom = self._lookup_by_name(instance_name)

        mount_device = mountpoint.rpartition("/")[2]
        (type, protocol, name) = \
            self._get_volume_device_info(device_path)
        if type == 'block':
            xml = """<disk type='block'>
                         <driver name='qemu' type='raw'/>
                         <source dev='%s'/>
                         <target dev='%s' bus='virtio'/>
                     </disk>""" % (device_path, mount_device)
        elif type == 'network':
            xml = """<disk type='network'>
                         <driver name='qemu' type='raw'/>
                         <source protocol='%s' name='%s'/>
                         <target dev='%s' bus='virtio'/>
                     </disk>""" % (protocol, name, mount_device)
        virt_dom.attachDevice(xml)

    def usb_pass_through(self,instance_name,vender_id,product_id):
        virt_dom = self._lookup_by_name(instance_name)
        xml = """
        <hostdev mode='subsystem' type='usb' managed='yes'>
            <source>
            <vendor id='0x%s'/>
            <product id='0x%s'/>
            </source>
        </hostdev>
        """ %(vender_id,product_id)
        virt_dom.attachDevice(xml)


    def live_migration(self, instance_ref, dest_host,
                       offline=False,timeout=None,bandwidth=0):
        # Do live migration.

        flagvals = libvirt.VIR_MIGRATE_UNDEFINE_SOURCE|libvirt.VIR_MIGRATE_PERSIST_DEST

        title = "offline"
        if not offline:
            flagvals |= libvirt.VIR_MIGRATE_LIVE
            title = 'live'

        LOG.debug('%s migrate %s from %s to %s' % (title,instance_ref.name,instance_ref.host.ip, conf['life.migration.uri'] % dest_host))

        dom = self._conn.lookupByName(instance_ref.name)

        destconn = get_connection(conf['life.migration.uri'] % dest_host)
        if timeout:
            loop = utils.LoopingCall(self._async_set_max_downtime,dom,int(timeout))
            loop.start(interval=0.1)
        #tpool execute do not block celery
        tpool.execute(dom.migrate,destconn.conn,
                    flagvals,None,"tcp:%s" % dest_host,int(bandwidth))


    def _async_set_max_downtime(self, domain,max_downtime,flag=0):
        try:
            domain.migrateSetMaxDowntime(max_downtime, flag)
            LOG.debug('setmexdowntime to %sms done' % max_downtime)
            raise utils.LoopingCallDone()
        except libvirt.libvirtError, e:
            if not isinstance(e, libvirt.libvirtError or
                e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID):
                LOG.warn("Error setting migrate downtime: %s", e)
                raise utils.LoopingCallDone()

            # migration has not been started, wait 100 milliseconds


    def get_hypervisor_type(self):
        """Get hypervisor type.
        :returns: hypervisor type (ex. qemu)
        """
        return self._conn.getType()

    def get_hypervisor_version(self):
        """Get hypervisor version.
        :returns: hypervisor version (ex. 12003)
        """
        return self._conn.getVersion()

    def get_vcpu_used(self):
        """ Get vcpu usage number of physical computer.
        :returns: The total number of vcpu that currently used.
        """
        total = 0
        for dom_id in self._conn.listDomainsID():
            dom = self._conn.lookupByID(dom_id)
            vcpus = dom.vcpus()
            if vcpus is None:
                total += 1
            else:
                total += len(vcpus[1])
        return total

    def screen_shot(self,inst_name):
        def handler(stream,buf,fd):
            fd.write(buf)
            fd.flush()

        shot_file = '/tmp/%s' % inst_name
        stream = self._conn.newStream(0)
        try:
            domain = self._conn.lookupByName(inst_name)
            domain.screenshot(stream,0,0)
            stream.recvAll(handler,file(shot_file,'w'))
            Image.open(file(shot_file)).save('%s.jpg' % shot_file)#默认是pnm格式，需要减少尺寸
            with file('%s.jpg' % shot_file) as f:
                jpg = f.read()

            return jpg
        except IOError:
            LOG.exception("cannot convert ppn to jpg")
        except:
            LOG.exception("error in screenshot, please double check the reason")
        finally:
            stream.finish()

    def get_instance_stats(self,instance_name):

        domain = self._lookup_by_name(instance_name)
        _dict = {
                 'disk_stats':self.block_stats(domain, "vda"),
                 'interface_stats':self.interface_stats(domain, 'vnet0'),
                 'memory_stats':self.memoryStats(domain),
                 'cpu_stats':self.cpu_stats(domain)
                 }

    def resize_base_disk(self,instance_name,size):

        domain = self._lookup_by_name(instance_name)
        diskpath = self._get_disk_path(domain)

        self.vm_utils.resize_disk(diskpath,size)
        LOG.debug(" instance %s resizing disk success " % instance_name)

    def cpu_stats(self,domain):
        output = {}
        try:
            cputime = domain.vcpus()[0]
            for i in range(len(cputime)):
                output["cpu" + str(i) + "_time"] = cputime[i][2]
        except libvirt.libvirtError:
            pass

    def block_stats(self, domain, disk):
        """
        Note that this function takes an instance name.
        """

        return domain.blockStats(disk)

    def interface_stats(self, domain, interface):
        """
        Note that this function takes an instance name.
        """

        return domain.interfaceStats(interface)

    def memoryStats(self,domain):
        # will enable until qemu 1.2
        return domain.memoryStats()

    def delete_disk(self,vdisk):
        #如果是未关联任何实例的磁盘，则不需要对实例做任何操作
        if vdisk.instance:
            domain = self._lookup_by_name(vdisk.instance.name)
            if domain:
                inst = Guest(parse_xml=domain.XMLDesc(0))
                for device in inst.disks:
                    if device.target_dev == vdisk.mountpoint:
                        LOG.debug('find mountpoint %s' % vdisk.mountpoint)
                        inst.disks.remove(device)
                xml = inst.to_xml()
                LOG.debug('rest xml is %s\n' % xml)
                return self.create_domain(xml)

    def clone(self, disk_relattion, newvm, originvm):
        "clone vm"
        domain = self._lookup_by_name(originvm.name)
        if domain and domain.info()[0] == power_state.RUNNING:
            self.pause(originvm.name)
        for oringin, dest in disk_relattion.items():
            # if fullclone:
            #     LOG.debug("full clone disk %s to %s for vm %s" % (oringin,dest, newvm.name))
            #     vm_utils.copy_file(oringin,dest)
            #
            # else:
            #是否有快照，有快照我们剥离出最近的磁盘状态
            if originvm.snapshot_set.count() > 0:
                diskinfo = vm_utils.vdisk_info(oringin)
                if diskinfo['format'] == constant.KVM_DRIVER_COW:
                    LOG.debug("extract disk with snapshot for vm %s" % newvm.name)
                    snapshot_uuid = uuid.uuid4().hex
                    vm_utils.create_snapshot(oringin, snapshot_uuid)
                    try:
                        vm_utils.extract_snapshot(oringin, 'qcow2',
                                                  snapshot_uuid, dest,
                                                  'qcow2')
                    finally:
                        vm_utils.delete_snapshot(oringin, snapshot_uuid)
                else:
                    raise exception.cannotsnapshotwithraw()
            else:
                LOG.debug("vm has not snapshot,we can full clone it's disk")
                vm_utils.copy_file(oringin, dest)

        return self.create(newvm)


    def add_spice_usbredir(self, guest):
        '''
        	    <controller type='usb' index='0' model='ich9-ehci1'>
                  <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x7'/>
                </controller>
                <controller type='usb' index='0' model='ich9-uhci1'>
                  <master startport='0'/>
                  <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0' multifunction='on'/>
                </controller>
                <redirdev bus='usb' type='spicevmc'>
                  <address type='usb' bus='0' port='3'/>
                </redirdev>
                <redirdev bus='usb' type='spicevmc'>
                  <address type='usb' bus='0' port='4'/>
                </redirdev>
        @param guest:
        @return:
        '''
        usbcontroller1 = virtualController()
        usbcontroller1.type = 'usb'
        usbcontroller1.index = '0'
        usbcontroller1.model = 'ich9-ehci1'

        add1 = virtualDeviceAddress()
        add1.type = 'pci'
        add1.domain = '0x0000'
        add1.bus = '0x00'
        add1.slot = '0x08'
        add1.function = '0x7'

        usbcontroller1.address = add1


        usbcontroller2 = virtualController()
        usbcontroller2.type = 'usb'
        usbcontroller2.index = '0'
        usbcontroller2.model = 'ich9-uhci1'

        master = virtualDeviceMaster()
        master.startport = '0'

        add2 = virtualDeviceAddress()
        add2.type = 'pci'
        add2.domain = '0x0000'
        add2.bus = '0x00'
        add2.slot = '0x08'
        add2.function = '0x0'
        add2.multifunction = 'on'

        usbcontroller2.address = add2
        usbcontroller2.master = master



        red1 = virtualRedirdev()
        red1.bus = 'usb'
        red1.type = 'spicevmc'

        redaddr1 = virtualDeviceAddress()
        redaddr1.type = 'usb'
        redaddr1.bus = '0'
        redaddr1.port = '3'

        red1.address = redaddr1

        red2 = virtualRedirdev()
        red2.bus = 'usb'
        red2.type = 'spicevmc'

        redaddr2 = virtualDeviceAddress()
        redaddr2.type = 'usb'
        redaddr2.bus = '0'
        redaddr2.port = '4'

        red2.address = redaddr2

        guest.devices.append(usbcontroller1)
        guest.devices.append(usbcontroller2)
        guest.devices.append(red1)
        guest.devices.append(red2)


if __name__ == '__main__':
    d = Driver()
    print d.get_pool_info('test-nfs')
    
    
    
