#!coding:utf-8
import multiprocessing
import os
import shutil
import sys
import uuid
import json
import re
import logging
from eventlet import tpool

from cvirt.cnode.libvirtXmlObject.disk import GuestCdrom
from cvirt.common.constant import AGENT_INSTANCE_PATH, TO_MB
from cvirt.manager.models import Instance, Snapshot, Server, Interface
from cvirt.cnode.vm import power_state,vm_utils,libvirtDriver
from cvirt.common import utils,exception
from cvirt.cnode.nwfilter import api as nwapi
from cvirt.cnode.network import api as netapi
from cvirt.cnode.guest.api import api

import logging
LOG = logging.getLogger(__name__)

from cvirt.common import config
conf = config.load_settings()

"""
    @author jay.han
"""
WARNING_DISK = 50 # for warning disk 50G

class AgentService(object):

    def __init__(self,_conf):
        self.driver = libvirtDriver.Driver()
        self.host = conf.get('cnode.host')
        self.hostid = os.environ['HOSTID'] if os.environ.has_key('HOSTID') and os.environ['HOSTID'].strip() else None  #不能为''

    def init(self):
        LOG.debug("begin to init storage")
        # init storage

        LOG.debug("begin to init instance")

        # init instance
        self.register_host()

        self.init_instance()


    def init_instance(self):
        host = Server.objects.get_or_none(pk=self.hostid)
        if host:
            instances = Instance.objects.filter(host=host)
            last = len(instances)
            for instance in instances:
                db_state = instance.power_status
                LOG.debug('Checking state of instance %s......' % instance.name)
                try:
                    drv_state = self.driver.get_info(instance.name)["state"]
                    if instance.autostart:
                        if drv_state == power_state.SHUTOFF:
                            LOG.debug("auto start instance %s " % instance.name)
                            tpool.execute(self.start,instance)
                        # drv_state = power_state.RUNNING
                except:
                    drv_state = power_state.FAILED

                LOG.debug('Current state is %(drv_state)s, state in DB is '
                          '%(db_state)s.' % locals())

                if db_state == drv_state:
                    continue
                else:
                    instance.update({'power_status':drv_state,'status':power_state.name(drv_state)})

    def _get_instance(self,instance_id):

        return Instance.objects.get(id=instance_id)

    def _clean_dirty_inst(self, inst_ref):
        #清楚磁盘
        for disk in inst_ref.vdisk_set.all():
            if disk.storage is not None:  #cdrom has no storage
                if os.path.exists(disk.disk_path):
                    os.remove(disk.disk_path)
        #清楚fixedip
        for iface in inst_ref.interface_set.all():
            netapi.unallocate_fixed_ip(iface)

        inst_ref.delete()

    def _setup_inst_status(self,need_launch):
        if need_launch:
            status = power_state.name(power_state.RUNNING)
            power_status = power_state.RUNNING
        else:
            status = power_state.name(power_state.SHUTDOWN)
            power_status = power_state.SHUTOFF
        return status,power_status

    # def _create_local(self,disk, unit='G',
    #                     fs_type=None, label=None):
    #     if not fs_type:
    #
    #         vm_utils.create_image(disk['disk_format'], disk['disk_path'],
    #                                '%d%c' % (disk['disk_size'], unit))
    # TODO
    # if fs_type:
    #     vm_utils.mkfs(fs_type, target, label)


    def _prepare_disks(self, instance,base_path,update=False):
        #实例磁盘的存储位置可能在多个存储上，磁盘存储路径：<storage_mount_point>/<instance_uuid>/<vdisk_name_id>
        def create_disk(disk):
            vm_utils.execute('qemu-img', 'create', '-f', disk['format'],
                                         disk.disk_path, "%sG" % disk.size)
            #TODO mkfs for second disk, you don't need to format
            #vm_utils.mkfs(constant.whichOS(os_type), fs_label='tcloud')

        for disk in instance.vdisk_set.all():
            #TODO device_type will be disk,cdrom,floppy
            if disk.device_type == 'disk' and disk.storage.protocol != 'iscsi':
                #make sure vdisks dir
                instancepath = os.path.join(disk.storage.mount_point, "vdisks")
                if not os.path.exists(instancepath): vm_utils.ensure_fold(instancepath)
                diskpath = disk.disk_path

                vm_utils.ensure_fold(os.path.dirname(diskpath))

                if not update:#新建实例磁盘
                    if disk.image:
                        vm_utils.execute('qemu-img', 'create', '-f', disk['format'], '-o',
                                         'cluster_size=2M,backing_file=%s' % os.path.join(base_path, disk.image.name),
                                         diskpath, "%sG" % disk.size)  #disk.size以GB为单位

                        if instance.os_type and instance.os_type.lower().startswith('windows'):
                            LOG.debug("change vm hostname to %s" % instance.name)
                            api.change_guest_name_offline(instance.name,diskpath)
                    else:
                        #no base imge(iso install)
                        create_disk(disk)

                else:#更新实例磁盘
                    if not os.path.exists(diskpath):
                        #新加的disk假设没有image
                        create_disk(disk)

                    #resize disk,but only support plus size
                    else:
                        diskinfo = vm_utils.vdisk_info(diskpath)
                        #diskinfo size 是bytes
                        if diskinfo['size']/1024/1024/1024 < disk.size:
                            try:
                                vm_utils.execute('qemu-img','resize',diskpath,'%sG' % disk.size)
                            except exception.ProcessExecutionError as e:
                                if "Can't resize an image which has snapshots" in e.stderr:
                                    raise exception.notallowresizewithsnapshot()
                                raise

                        else:
                            LOG.warn('disk size %s in new config less than or equals origin size %s' % (diskinfo['size']/1024/1024/1024,disk.size))


    def _prepare_base_img(self, instance):
        cdisk = instance.vdisk_set.first()
        #TODO device_type will be disk,cdrom,floppy
        if cdisk and cdisk.device_type == 'disk' and cdisk.storage.protocol != 'iscsi':
            #base镜像建立在第一块磁盘所在存储下
            base_path = os.path.join(cdisk.storage.mount_point, '_bases')
            if cdisk.image is not None:
                #镜像使用name为文件名
                base_image = os.path.join(base_path, cdisk.image.name)
                #如果base镜像不存在或者大小不一样
                if not os.path.exists(base_image) or os.path.getsize(base_image) != cdisk.image.size:
                    tpool.execute(vm_utils.copy_image,os.path.join(cdisk.image.storage.mount_point,'images',cdisk.image.name), base_path)
            return base_path

    def _check_storage(self, instance):
        #如果存储挂载点不存在或者不是共享存储，则终止虚拟机创建
        disks = instance.vdisk_set.all()
        for disk in disks:
            if disk.device_type != 'cdrom' and disk.storage.protocol != 'local' and disk.storage.protocol != 'iscsi':
                storage_path = disk.storage.mount_point
                if not os.path.exists(storage_path) or not os.path.isdir(storage_path) or not os.path.ismount(
                        storage_path):
                    raise exception.InvalidSharedStorageDeployment(path=storage_path, name=disk.storage.name)


    def allocate_ip(self, instance):
        #TODO should call linux_net.dhcprealse
         for interface in instance.interface_set.all():
            address = interface.fixedip.address if interface.fixedip else None  #先得到原来的ip，用于释放ip后重新获取
            netapi.unallocate_fixed_ip(interface)  #TODO 先释放，在分配，因为是整体更新，不能保留上次的状态。这样会导致更新磁盘等操作会出现fixedip的错误
            if interface.enable:#启用的网卡绑定新的ip
                if interface and interface.network.should_allocate_ip():
                    netapi.allocate_fixed_ip(interface, address)
                    LOG.info('allocate fixedip %s for interface %s' % (interface.fixedip.address, interface.name))


    def create(self,instance,snapshot=False, needlaunch=False):

        '''
        创建虚拟机
        #如果是image,准备base image
        #创建disk
        #检查网络环境，没有则重新创建?
        #创建netfilter
        #创建虚拟机

        @param instance:  django model
        @return: instance model
        '''

        try:
            self._check_storage(instance)
            base_path = self._prepare_base_img(instance)
            self._prepare_disks(instance,base_path)

            #创建netfilter，我们为每个虚拟机创建一个netfilter，虚拟机xml中引用。
            #这样我在更新网络规则时不用改动虚拟机xml，并且及时生效
            #instance xml ==> instancefilter ==> networkfilter or buildin filter
            nwapi.refresh_filter_for_instance(instance)

            #我们假设网络已经建立好了？

            self.allocate_ip(instance)

            LOG.debug('begin to build instance %s' % instance.name)
            xml = self.driver.create(instance,snapshot=snapshot, needlaunch=needlaunch)
            status,p_state = self._setup_inst_status(needlaunch)

            instance.update(dict(status=status,power_status=p_state,xml=xml))
            LOG.debug('finish to build instance %s' % instance['name'])
        except Exception as e:
            LOG.exception('create %s instance failed' % instance['name'])
            self._clean_dirty_inst(instance)
            raise

        return instance


    def delete_vdisk(self,vdisk,cleanup=False):
        self.driver.delete_disk(vdisk)
        LOG.debug('domain disk remove successful')
        if cleanup:
            if vdisk.device_type == 'disk':
                if os.path.exists(vdisk.disk_path):
                    LOG.debug("remove storage disk file %s" % vdisk.disk_path)
                    os.remove(vdisk.disk_path)
                vdisk.delete()
        else:
            vdisk.update(dict(instance=None))

    def update_cdrom(self,vdisk):
        '''
        热拔插cdrom，在磁盘设备里面，SATA也支持热拔插
        @param vdisk:
        @return:
        '''
        try:
            cdrom = GuestCdrom()
            cdrom.target_dev = vdisk.mountpoint
            cdrom.source_path = vdisk.isopath
            self.driver.update_device(vdisk.instance,cdrom.to_xml())
        except:
            LOG.exception("update cdrom for instance %s failed" % vdisk.instance.name)
            raise

    #TODO ide disk doesn't support update
    # def update_disk(self,vdisk):
    #     try:
    #
    #         diskdomain = self.driver.fill_disk(vdisk)
    #         self.driver.update_device(vdisk.instance,diskdomain.to_xml(),modify_live=False)
    #     except:
    #         LOG.exception("update disk for instance %s failed" % vdisk.instance.name)
    #         raise

    def update(self,instance,snapshot=False):
        '''
        删除disk，更新disk，更新disk和新建disk操作都在_prepare_disks
        除了disk，这里我们每次对instance都做整体更新.所以基本的操作都是先清除数据，再整体应用数据
        @param instance:
        @param snapshot:
        @return:
        '''
        @utils.synchronized(instance.name)
        def handle_instance(instance):
            LOG.debug("begin to update instance %s" % instance.name)
            try:
                #check instance exsist, or raise vm not found
                self.driver._lookup_by_name(instance.name)

                self._prepare_disks(instance,None,update=True)


                LOG.debug("(un)allocate ip for instance %s " % instance.name)
                self.allocate_ip(instance)

                xml = self.driver.update(instance,snapshot=snapshot)
                #移除原来存在的网络规则
                nwapi.remove_instance_filter(instance)
                #应用新的网络规则
                nwapi.refresh_filter_for_instance(instance)
                #FIXME nwfilter api 必须调用两次才会生效
                nwapi.refresh_filter_for_instance(instance)

                return instance.update(dict(xml=xml))
            except:
                LOG.exception("error in update instance %s " % instance.name)
                instance.update({'status':power_state.name(power_state.FAILED)})
                raise
        return handle_instance(instance)

    def wait_for_running(self,instance):
        timer = utils.LoopingCall(self.driver._wait_for_running,instance)
        timer.start(interval=0.5, now=True)


    def start(self,inst):
        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                is_start = self.driver.start(inst.name)
                status,p_state = self._setup_inst_status(need_launch=True)
                inst.update(dict(status=status,power_status=p_state))
                return is_start
            except:
                LOG.exception("error in start instance %s" % inst.name)
                inst.update({'status':power_state.name(power_state.FAILED)})
                raise
        return handle_instance(inst)

    def shutdown(self,inst,is_soft=True):

        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                if is_soft:
                    is_shutdown = self.driver.soft_shutdown(inst.name)
                else:
                    is_shutdown = self.driver.destroy(inst,cleanup=False)

                if is_shutdown:
                    status,p_state = self._setup_inst_status(need_launch=False)
                    inst.update(dict(status=status,power_status=p_state))
                return is_shutdown
            except Exception as e:
                LOG.exception("error in shutdown instance %s: %s" % (inst.name,e))
                inst.update({'status':power_state.name(power_state.FAILED)})
                raise
        return handle_instance(inst)

    def reboot(self,inst,is_soft=False):
        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                if is_soft:
                    is_reboot = self.driver.soft_reboot(inst.name)
                else:
                    self.driver.destroy(inst,cleanup=False)
                    is_reboot = self.driver.start(inst.name)
                if is_reboot:
                    status,p_state = self._setup_inst_status(need_launch=True)
                    inst.update(dict(status=status,power_status=p_state))
                return is_reboot
            except Exception as e:
                LOG.exception("error in reboot instance %s : %s" % (inst.name,e))
                inst.update({'status':power_state.name(power_state.FAILED)})
                raise

        return handle_instance(inst)

    def destroy(self,instance):

        @utils.synchronized(instance.name)
        def handle_instance(instance):
            try:
                self.driver.snapshot_destory(instance.name)

                self.driver.destroy(instance, cleanup=True)
                #unallocated ip
                for iface in instance.interface_set.filter(enable=True):
                    if iface.network.should_allocate_ip():
                        netapi.unallocate_fixed_ip(iface)

                nwapi.remove_instance_filter(instance)

                return True
            except:
                LOG.exception("error in shutdown instance %s" % instance.name)
                instance.update({'status':power_state.name(power_state.FAILED)})
                raise
        return handle_instance(instance)

    def pause(self,instance):
        @utils.synchronized(instance.name)
        def handle_instance(instance):
            try:
                self.driver.pause(instance.name)
                instance.update(dict(status='paused',power_status=power_state.PAUSED))
            except:
                LOG.exception("error in pause instance %s" % instance.name)
                instance.update({'status':power_state.name(power_state.FAILED)})
                raise

            return instance
        return handle_instance(instance)

    def unpause(self,inst):
        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                self.driver.unpause(inst.name)
                inst.update(dict(status='running',power_status=power_state.RUNNING))
            except:
                LOG.exception("error in resume instance %s" % inst.name)
                inst.update({'status':power_state.name(power_state.FAILED)})
                raise

            return inst
        return handle_instance(inst)


    def hibernate(self,inst):
        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                self.driver.hibernate(inst.name)
                inst.update(dict(status='saved',power_status=power_state.SAVED))
            except:
                LOG.exception("error in hibernate instance %s" % inst.name)
                inst.update({'status':power_state.name(power_state.FAILED)})
                raise

            return inst
        return handle_instance(inst)


    def snapshot_delete(self,inst,snap):
        @utils.synchronized(inst.name)
        def handle_instance(inst,snap):
            try:
                self.driver.snapshot_delete(inst.name,snap)
            except:
                LOG.exception('error in delete snapshot %s' % snap.name)
                snap.update({"status":power_state.name(power_state.FAILED)})
                raise

            return snap
        return handle_instance(inst,snap)

    def snapshot(self,inst,snapshot):
        @utils.synchronized(inst.name)
        def handle_instance(inst):
            try:
                self.driver.snapshot_take(snapshot.uuid,inst,snapshot.name)
                snapshot.update({"status":'ok'})
            except:
                LOG.exception("error in snapshot instance %s" % inst.name)
                snapshot.update({'status':power_state.name(power_state.FAILED)})
                raise

            return snapshot
        return handle_instance(inst)

    def snapshot_restore(self,instance,snapshot):
        @utils.synchronized(instance.name)
        def handle_instance(instance):
            try:
                self.driver.snapshot_restore(instance,snapshot)
            except:
                LOG.exception("error in restore snapshot %s of instance %s" % (snapshot.name,instance.name))
                instance.update({'status':power_state.name(power_state.FAILED)})
                raise

            return snapshot
        return handle_instance(instance)

    def save_as_template(self,vdisk,image):
        '''
        @param vdisk:
        @param storage:
        @param image:
        @return:
        '''
        LOG.debug("Save Template for vdisk %s of instance %s begin....." % (vdisk.name,vdisk.instance.name))
        try:
            #tpool execute will not block celery main thread
            image_size,virtualsize = tpool.execute(self.driver.save_as_template,vdisk, image)

            # if not vdisk.image:
            #     vdisk.update({'image':image})

            image.update({'status':'alive','size':image_size,'virtualsize':virtualsize})

        except:
            LOG.exception("error in save %s instance's template" % vdisk.instance.name)
            raise

        LOG.debug("Save Template for instance %s end....." % vdisk.instance.name)

    #TODO
    def backup(self,instance_id,host=None):
        pass


    def check_if_has_disk(self,on_disk):
        tmp_disk =  on_disk /TO_MB
        left_disk = self.get_local_gb_total() - self.get_local_gb_used()

        return left_disk - WARNING_DISK > tmp_disk

    def get_vcpu_total(self):

        try:
            return multiprocessing.cpu_count()
        except NotImplementedError:
            LOG.warn("function is not implemented for this platform. ")
            return 0

    def get_memory_mb_total(self):

        if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
            return 0

        meminfo = open('/proc/meminfo').read().split()
        idx = meminfo.index('MemTotal:')
        # transforming kb to mb.
        return int(meminfo[idx + 1]) / 1024

    def get_local_gb_total(self):
        """
        returns:
            The total amount of HDD(GB).
            Note that this value shows a partition where
             /home/vmos/instances mounts.
        """
        stats = vm_utils.get_fs_info(AGENT_INSTANCE_PATH)
        return stats['total'] / TO_MB
    def get_cpu_type(self):

        result,_ = vm_utils.execute('cat','/proc/cpuinfo')

        for line in result.split("\n"):
            if "model name" in line:
                return re.sub( ".*model name.*:", "", line,1)

    def get_cpu_arch(self):
        import platform
        return platform.machine()

    def get_vcpu_used(self):
        """
         Get vcpu usage number of physical computer.
        :returns: The total number of vcpu that currently used.
        """
        return self.driver.get_vcpu_used()

    def get_local_gb_used(self):
        """
        returns:
           The total usage of HDD(GB).
           Note that this value shows a partition where
           /home/vmos/instances mounts.
        """
        stats = vm_utils.get_fs_info(AGENT_INSTANCE_PATH)
        return stats['used'] / TO_MB

    def get_memory_mb_used(self):
        """
        returns: the total usage of memory(MB).
        """
        if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
            return 0

        m = open('/proc/meminfo').read().split()
        idx1 = m.index('MemFree:')
        idx2 = m.index('Buffers:')
        idx3 = m.index('Cached:')
        avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
        return  self.get_memory_mb_total() - avail

    def get_hypervisor_type(self):
        return self.driver.get_hypervisor_type()


    def get_hypervisor_version(self):
        return self.driver.get_hypervisor_version()

    def migrate(self,instance,dest,offline=False,timeout=None,bandwidth=0):
        # if offline:
        #     self.pause(instance)
        self.driver.live_migration(instance,dest,offline=offline,timeout=timeout,bandwidth=bandwidth)


    def pre_migrate(self,instance):
        #检查迁移必须的存储位置
        for disk in instance.vdisk_set.all():
            filepath = None
            #fixme 在isopath被使用时，iso存储不能卸载，在可以卸载的的节点上将不能迁移
            if disk.device_type=='cdrom':
                filepath = disk.isopath
            if disk.device_type=='disk':
                filepath = disk.disk_path
            if not os.path.exists(filepath):
                raise exception.InvalidMigrateStorage(path=filepath)

        #检查迁移必须的network
        for iface in instance.interface_set.all():
            if iface.enable:
                #检查nwfilter数据，没有则创建
                nwapi.refresh_filter_for_interface(iface)
                for sgroup in iface.securitygroups.all():
                    nwapi.refresh_secruity_group(sgroup)
                network = iface.network
                if network:
                    ndomain = self.driver._look_up_network_by_name(network.libvirt_name)
                    if not ndomain:
                        raise exception.InvalidMigrateNetwork(name=network.libvirt_name)


    def post_migrate(self,instance):
        '''
        迁移完成后清除虚拟机nwfilter数据
        @param instance:
        @return:
        '''
        nwapi.remove_instance_filter(instance)


    def register_host(self):
        ip = conf.get('cnode.host')
        LOG.info("look up server by host id %s" % self.hostid)
        server = Server.objects.get_or_none(pk=self.hostid)
        #有hostid则是正常状态，没有则新注册
        status = 2 if server else 1
        if not server:
            server = Server.objects.first(ip=ip)
            if not server:
                server = Server()

        server.update({'ip':ip,
                       'status':status,
                       'host_name':vm_utils.get_hostname(),
                       'password':conf.get('cnode.password'),
                       'port':conf.get('cnode.ssh_port'),
                       'use_sshkey':False,
                       'cpu':self.get_vcpu_total(),
                        'mem':self.get_memory_mb_total(),
                        'arch':self.get_cpu_arch(),
                        'os':'linux',
                        'hypervisor':self.get_hypervisor_type(),
                        'cpu_type':self.get_cpu_type(),
                        'qemu_version':self.get_hypervisor_version()})
        self.turn_ksm(server.memshare)

    def turn_ksm(self,memshare):
        try:
            with file('/sys/kernel/mm/ksm/run','w') as f:
                f.write(str(int(memshare)))
            LOG.debug('set ksm to %s successful' % memshare)
        except:
            import traceback
            traceback.print_exc()
            LOG.warn('set ksm to %s failed' % memshare)

    def unapply_sgroup(self, sgroup,interfaces):
        #从解除关联的网卡中移除sgroup,相当于调用了两次refresh_filter_for_interface，参见update中的fixme
        for iface_id in interfaces:
            iface = Interface.objects.get_or_none(pk=iface_id)
            if iface and iface.instance.host.ip == self.host:
                LOG.debug("unapply sgroup %s " % sgroup.name)
                nwapi.refresh_filter_for_interface(iface)

    def apply_sgroup(self,sgroup,interfaces=[]):
        self.unapply_sgroup(sgroup,interfaces)
        #应用sgroup到新的interface
        for interface in sgroup.interface_set.filter(instance__host__ip = self.host):
            nwapi.refresh_filter_for_interface(interface)

    def remove_sgroup(self,sgroup,interfaces=[]):
        if sgroup:
            LOG.debug("delete sgroup %s " % sgroup.name)
            #解除原先关联的网卡上的规则
            self.unapply_sgroup(sgroup,interfaces)
            #然后删除，这里也可以避免nwfilter in user的问题
            nwapi.remove_nwfilter(sgroup.name)

    def update_sgroup(self,sgroup):
        LOG.debug("apply sgroup %s " % sgroup.name)
        nwapi.refresh_secruity_group(sgroup)

        for interface in sgroup.interface_set.filter(instance__host__ip = self.host):
            nwapi.refresh_filter_for_interface(interface)


    def delete_template(self,template_path):
        if os.path.exists(template_path):
            os.remove(template_path)
            LOG.debug('remove template %s successful')


    def clone(self,disk_relattion,newvm,originvm, fullclone=False):
        @utils.synchronized(originvm.name)
        def handle_instance():
            try:
                nwapi.refresh_filter_for_instance(newvm)
                self.allocate_ip(newvm)
                xml = tpool.execute(self.driver.clone,disk_relattion,newvm,originvm)
                newvm.update(dict(status=power_state.name(power_state.RUNNING),power_status=power_state.SHUTOFF,xml=xml))
                LOG.debug("clone to vm %s successful" % newvm.name)
            except:
                self._clean_dirty_inst(newvm)
                raise
            finally:
                domain = self.driver._lookup_by_name(originvm.name)
                if domain and domain.info()[0] == power_state.PAUSED:
                    self.driver.unpause(originvm.name)

        return handle_instance()


API = AgentService(conf)

if __name__ == '__main__':
    API.start("i-00000013")
