#!/usr/bin/env python2
#-*- coding: utf-8 -*-

import re
import time
import json
import datetime
import traceback

from datetime import timedelta

from Ump import utils
from Ump.common import log
from Ump.common.utils import inspect_func
from Ump.common import exception

from Ump.objs.db import models
from Ump.objs.session_wrapper import enable_log_and_session, _sw, enable_oplog
from Ump.objs.manager_base import Manager
from Ump.jsonify import sqlalchemy_obj_to_dict, data2json

from Ump.umptypes import UmpPath
from agentapi import volume as volume_api

from Ump.lich.volume import LichVolume, LichVolumeParam


LOG = log.get_log('Ump.objs.volume.manager')


CREATING = 'creating'
COPYING = 'copying'
FAIL = 'fail'
CLONE = 'clone'
WAITING = 'waiting'
FLATTENING = 'flattening'
FLATTENED = 'flattened'

SKIP_STATUS = [CREATING, WAITING, FAIL]

@models.add_model(models.Volume)
class VolumeManager(Manager):

    def __init__(self):
        super(VolumeManager, self).__init__()
        self.lichVolume = LichVolume()

    def sync_create(self, kwargs):
        path = kwargs['path']

        kwargs['is_sync'] = True
        volume = self._create(**kwargs)
        return volume


    def create(self, kwargs):
        path = kwargs['path']

        volume = self._create(**kwargs)
        return volume

    @inspect_func
    def _create(self, path, size, provisioning, qos_id=None, snappolicy_id=None, is_sync=False,
                access_policy_id=None, cluster_id=None, protection_domain_id=None, priority=None,
                analysis_latency=None, username=None, ec_data=None, ec_code=None, repnum=None, chap_name=None, chap_password=None, **kw):
        """
        :param path:
        :param size:
        :param provisioning: thin | thick
        :param qos_id:
        :param isCreatePool:
        :param snappolicy_id:
        :param access_policy_id:
        :param cluster_id:
        :param isforce:
        :param protection_domain_id:
        :param repnum:
        :param kw:
        :return:
        """
        if provisioning not in ['thin', 'thick']:
            raise Exception("provisioning's value must be thin or thick")

        volume = _sw.get_volume(path)
        if volume:
            if volume.user.name != 'cinder':
                raise exception.AlreadyExists(path=path)

        pool = _sw.get_pool(path)
        if not pool:
            raise exception.PoolNotFound()

        if protection_domain_id is not None:
            pdomain = _sw.get_one(models.ProtectionDomain, id_or_spec=protection_domain_id)
            if not pdomain:
                raise exception.PDomainNotFound(id=protection_domain_id)

        if not cluster_id:
            cluster_id = _sw.get_clusterid()

        values = {
            'name': path.vol_name,
            'cluster_id': cluster_id,
            'protection_domain_id': protection_domain_id,
            'protocol': pool.protocol,
            'user_id': _sw.get_user_id(path.username),
            'pool_id': pool.id,
            'size': size,
            'used': 0,
            'ec-data': ec_data,
            'ec-code': ec_code,
            'repnum': repnum,
            'provisioning': provisioning,
            'is_share': False,
            'chap_name': chap_name,
            'chap_password': chap_password,
            'analysis_latency': analysis_latency
        }

        if access_policy_id:
            values['access_policy_id'] = access_policy_id

        #qos_name is a uuid get from openstack cinder
        if kw.get('qos_name'):
            qos_name = kw.get('qos_name')
            qos = models.QOS.query.filter_by(name=qos_name).first()
            qos_id = qos.id
        if qos_id:
            values['qos_id'] = qos_id

        if priority:
            values['priority'] = priority

        if snappolicy_id:
            # values['snapshot_policys'] = snappolicy_id
            values['snapshot_policys'] = _sw.get_list_by_ids(models.SnapshotPolicy, snappolicy_id)

        self.check_quota(size, pool)
        if values['ec-data'] and values['ec-code']:
            values['repnum'] = None
        else:
            values['ec-data'] = None
            values['ec-code'] = None

        volume = models.Volume(values).save()

        if volume.priority == '-1':
            priority = None
        else:
            priority = volume.priority

        volume.update({'status': CREATING})

        cmd = volume_api.VolumeCreateCmd()
        if is_sync:
            cmd = volume_api.VolumeSyncCreateCmd()
            self._package_cmd(volume, cmd)
            cmd.priority = priority

            try:
                self.taskm.sync_post(cmd, callback=self.volume_create_callback)
            except Exception, e:
                volume.delete()
                raise e
        else:

            self._package_cmd(volume, cmd)

            cmd.priority = priority

            response_or_task = self.taskm.async_post(cmd, callback=self.volume_create_callback)

        return volume

    def _package_cmd(self, volume, cmd):
        pool = volume.pool
        cmd.target = volume.path
        cmd.path = sqlalchemy_obj_to_dict(volume.path, is_sql_obj=False)
        cmd.size = volume.size
        cmd.lich_system_repnum = volume.repnum
        cmd.provisioning = volume.provisioning

        cmd.storage_area =  None 
        if volume.pd:
            cmd.storage_area = volume.pd.name

        cmd.lich_system_username = volume.sp_chap_name
        cmd.lich_system_password = volume.sp_chap_password

        cmd.analysis_latency = volume.analysis_latency

        cmd.timeout = 12 * 60 * 60

        cmd.snapshot_policys = data2json(volume.snapshot_policys)
        cmd.access_policy = data2json(volume.access_policy)
        cmd.qos = data2json(volume.qos)

        cmd.target_id = volume.id
        return cmd

    def check_quota(self, size, pool, skip_volume_id=-1):
        size = int(size)
        size_gb = utils.byte2GB(size, unit=1000)
        volumes_quota = 0
        free = size_gb * 2
        if pool.quota:
            for volume in pool.volumes:
                volumes_quota += int(volume.size_gb)
            free = int(pool.quota) - int(volumes_quota)

        elif pool.sp_quota:
           user = pool.user
           free = user.free_quota
        else:
            pass
        
        #free quota not calculate skip volume's size
        if skip_volume_id != -1:
            volume = self._get_one(skip_volume_id)
            free = free + volume.size_gb
        
        if size_gb > free:
            raise exception.QuotaError(free=free)
    
    @enable_oplog(resource='volume', event='create')
    def volume_create_callback(self, _logger, rsp):
        volume = self._get_one(rsp.target_id)

        _logger.update_props(oplog_obj=volume.path)

        if rsp.success:
            volume.update({'status': 'normal'})
        else:
            volume.update({'status': 'fail'})

        self._trigger_recovery_and_balance(volume.cluster_id)

        return volume

    def _trigger_recovery_and_balance(self, cluster_id):
        cluster = _sw.db_cluster(cluster_id)
        for host in cluster.hosts:
            self.utils.exception_pass(self.api_sync_call, host.ip, 'echo 1 > /dev/shm/lich4/nodectl/recovery/immediately')
            self.utils.exception_pass(self.api_sync_call, host.ip, 'echo 1 > /dev/shm/lich4/nodectl/balance/immediately')


    def scan_cluster_avail(self, cluster_id):
        cluster = _sw.db_cluster(cluster_id)
        clusterhosts = cluster.hosts

        cmd = '%s/lich/admin/cluster.py stat -h'%(self.lich_home)
        res = '0'
        for host in clusterhosts:
            try:
                res = self.api_sync_call(host.ip, cmd)
                break
            except Exception,e:
                LOG.error('get cluster capacity %s'%e)
                pass

        used = capacity = 0
        for x in res.split('\n'):
            if 'used' in x:
                used_tmp = x.split(':')
                used_tmp = [x for x in used_tmp if x]
                used = used_tmp[-1]
            if 'capacity' in x:
                capacity_tmp = x.split(':')
                capacity_tmp = [x for x in capacity_tmp if x]
                capacity = capacity_tmp[-1]

        avail = (int(capacity) - int(used))
        cluster.update({'avail':int(avail), 'capacity':int(capacity), 'used':int(used)})
        return avail

    @enable_log_and_session(resource='volume', event='delete')
    def delete(self, _logger, kwargs):
        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        _logger.update_props(oplog_obj=volume.path)

        kwargs['id'] = volume.id
        res = self._delete( volume, **kwargs)
        if not res:
            _logger.ok = False

        return True

    @inspect_func
    def _delete(self, volume, force=None, fake_delete=False, **kwargs):
        LOG.info('delete volume: %s: %s' % (volume.id, volume.name))

        self._check_before_delete(volume)

        #extra = {}
        #if volume.cluster_id is not None:
        #    extra.update({'cluster_id': volume.cluster_id})

        path = volume.path
        cluster_id = volume.cluster_id

        if fake_delete and volume.status == 'normal':
            return volume.fake_delete()

        is_succ = volume.delete()
        # TODO if No such file
        param = LichVolumeParam(path, cluster_id=cluster_id)
        res = self.lichVolume.delete(param)

        return is_succ

    def _check_before_delete(self, volume):
        if volume.snapshots:
            for x in volume.snapshots:
                LOG.info('--- snapshot %s %s'% (x.id, x.path))
            raise exception.VolumeReferenced('卷上有快照')
        if volume.snapshot_policys:
            raise exception.SnapshotReferenced('不能删除已设置快照策略的卷')
        return True

    @enable_log_and_session(resource='volume', event='expunge')
    def expunge(self, _logger, kwargs):
        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        path = volume.path
        cluster_id = volume.cluster_id
        _logger.update_props(oplog_obj=volume.path)

        volume.delete()
        param = LichVolumeParam(path, cluster_id=cluster_id)
        res = self.lichVolume.delete(param)
        return True

    @enable_log_and_session(resource='volume', event='restore')
    def restore(self, _logger, kwargs):
        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)
        _logger.update_props(oplog_obj=volume.path)
        return volume.restore()

    def expunge_auto(self):
        """expunge the faked deleted volume if expired setted days in sysconfig_for_ump.
        """
        volumes = models.Volume.query.filter_by(fake_deleted=1).all()
        interval = models.SysconfigForUMP.query.filter_by(name='volume_delete.period').first() or 7
        for volume in volumes:
            if datetime.datetime.now() - volume.fake_deleted_at > timedelta(days=int(interval)):
                volume.delete()
                param = LichVolumeParam(volume.path, cluster_id=volume.cluster_id)
                self.lichVolume.delete(param)

    @enable_log_and_session(resource='volume', event='update')
    def update(self, _logger, kwargs):
        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        _logger.update_props(oplog_obj=volume.path)

        volume = self._update(volume, **kwargs)
        return volume

    def _update(self, volume, size=None, repnum=None, access_policy_id=None, qos_id=None, priority=None,
                analysis_latency=None, snappolicy_id=None, chap_name=None, chap_password=None, **kwargs):
        pre_values = dict(access_policy_id=volume.access_policy_id,
                          qos_id=volume.qos_id,
                          snapshot_policys=volume.snapshot_policys)
        values = {}
        utils.update_values(values, 'access_policy_id', access_policy_id)
        utils.update_values(values, 'access_policy_id', access_policy_id)
        utils.update_values(values, 'qos_id', qos_id)
        utils.update_values(values, 'repnum', repnum)
        utils.update_values(values, 'size', size)
        utils.update_values(values, 'chap_name', chap_name)
        utils.update_values(values, 'chap_password', chap_password)
        utils.update_values(values, 'analysis_latency', analysis_latency)

        if snappolicy_id is not None:
            snapshot_policys = _sw.get_list_by_ids(models.SnapshotPolicy, snappolicy_id)
            utils.update_values(values, 'snapshot_policys', snapshot_policys, is_list=True)

        if size:
            self.check_quota(size, volume.pool, skip_volume_id=volume.id)
#            volume = self._resize(volume, size)

        if priority:
            if priority == '-1':
                volume = self._priority(volume, 'default')
            else:
                volume = self._priority(volume, priority)
            utils.update_values(values, 'priority', priority)

        # DB
        volume.update(values)

        cmd = volume_api.VolumeUpdateCmd()
        self._package_cmd(volume, cmd)

        cmd.priority = volume.priority

        self.taskm.async_post(cmd, callback=self.volume_update_callback)

        return volume

    def volume_update_callback(self, rsp):
        volume = self._get_one(rsp.target_id)
        volume.update({'status': 'normal'})

        self._trigger_recovery_and_balance(volume.cluster_id)

        return volume

    def copy(self, kwargs):
        '''
            path: volume copy src;
            path2: volume copy dist;
        '''
        path = kwargs['path']
        volume = _sw.get_volume(path)
        if not volume:
            raise exception.VolumeNotFound(path=path)

        path2 = kwargs.get('path2')
        pool2 = _sw.get_pool(path2)
        if not pool2:
            raise exception.TipException(code=300)
    
        priority = kwargs.get('priority')

        values = {
            'status': COPYING,
            'name': path2.vol_name,
            'cluster_id': volume.cluster_id,
            'protection_domain_id': volume.protection_domain_id,
            'protocol': pool2.protocol,
            'user_id': pool2.user_id,
            'pool_id': pool2.id,
            'clone_type': CLONE,
            'size': volume.size,
            'used': volume.used,
            'provisioning': volume.provisioning,
            'is_share': False,
            'priority': priority
        }
        new_volume = models.Volume(values).save()

        cmd = volume_api.VolumeCopyCmd()
        cmd.src_volume_path = str(path)
        cmd.dist_volume_path = str(path2)
        cmd.target = str(path2)
        cmd.target_id = new_volume.id

        cmd.timeout = 12 * 60 * 60

        if priority == '-1' or priority is None:
            cmd.priority = None
        else:
            cmd.priority = priority
        
        self.taskm.async_post(cmd, callback=self.copy_callback)

        return new_volume

    @enable_oplog(resource='volume', event='copy')
    def copy_callback(self, _logger, rsp):
        volume = self._get_one(rsp.target_id)

        _logger.update_props(oplog_obj=volume.path)

        if rsp.success:
            volume.update({'status': 'normal'})
        else:
            volume.delete()

        return volume


    @enable_log_and_session(resource='volume', event='resize')
    def resize(self, _logger, kwargs):
        new_size = kwargs['size']

        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        _logger.update_props(oplog_obj=volume.path)

        return self._resize(volume, new_size)

    def _resize(self, volume, new_size):

        # TODO check size
        _sw.check_pool_quota(volume.pool, new_size, old_size=volume.size)

        ## DB
        volume.size = new_size

        # LICH
        param = LichVolumeParam(volume.path, size=new_size)
        self.lichVolume.resize(param)
    
        return volume

    @enable_log_and_session(resource='volume', event='priority')
    def priority(self, _logger, kwargs):
        priority = kwargs['priority']

        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        _logger.update_props(oplog_obj=volume.path)

        return self._priority(volume, priority)

    def _priority(self, volume, priority):

        ## DB
        volume.priority = priority

        # LICH
        param = LichVolumeParam(volume.path, priority=priority)
        self.lichVolume.set_priority(param)

        return volume

    @enable_log_and_session(resource='volume', event='resize', disable_oplog=True)
    def list_snapshots(self, _logger, params):
        volume = _sw.get_volume(params)
        if not volume:
            raise exception.VolumeNotFound(params)

        return _sw.db_snapshots({'volume_id': volume.id})

    @enable_log_and_session(resource='volume', event='flatten')
    def flatten(self, _logger, kwargs):
        volume = _sw.get_volume(kwargs)
        if not volume:
            raise exception.VolumeNotFound(kwargs)

        # DB
        volume.update({'status': FLATTENING})

        # LICH
        cmd = volume_api.VolumeFlattenCmd()
        cmd.target = volume.path
        cmd.target_id = volume.id
        cmd.timeout = 6 * 60 * 60
        cmd.path = sqlalchemy_obj_to_dict(volume.path, is_sql_obj=False)

        self.taskm.async_post(cmd, callback=self.flatten_callback)

        return volume

    def flatten_callback(self, rsp):
        volume = self._get_one(rsp.target_id)
        if rsp.success:
            volume.update({'status': 'normal', 'clone_type': FLATTENED})
        else:
            volume.update({'status': 'normal', 'clone_type': CLONE})
        return volume

    @enable_log_and_session(resource='volume', event='rename')
    def rename(self, _logger, kwargs):
        # TODO 跨pool失败
        path = kwargs['path']
        path2 = kwargs['path2']

        if path.pool_path != path2.pool_path:
            raise exception.InvalidParameter(path=path, path2=path2)

        volume = _sw.get_volume(path)
        if not volume:
            raise exception.VolumeNotFound(path)

        volume2 = _sw.get_volume(path2)
        if volume2:
            raise exception.VolumeFound(path2)

        # LICH
        param = LichVolumeParam(volume.path)
        # self.lichVolume.rename(param, path2.pool_name)
        self.lichVolume.rename(param, path2.vol_name)

        # DB
        # volume.name = path2.pool_name
        volume.name = path2.vol_name

        return volume

    def _get_one(self, volume_id):
        volume = _sw.get_one(models.Volume, volume_id)
        if not volume:
            raise exception.VolumeNotFound(volume_id=volume_id)

        return volume

    ############## SYNC

    def volume_sync(self, cluster_id, isDay=False, is_create=False):
       cluster = _sw.db_cluster(cluster_id)
       pools  = _sw.db_pools({'cluster_id': cluster.id})

       # TODO host_ip
       host_ip = self._select_host(cluster_id)

       for pool in pools:
           cmd = '%s/lich/libexec/lichbd vol ls %s -p %s' % (self.lich_home, pool.realname, pool.protocol)
           res = self.api_sync_call(host_ip,cmd)
           lich_luns = res.split('\n')
           lich_luns =[lun.strip().split()[-1] for lun in lich_luns if lun and lun.strip().split()]
           self._delete_vol_not_inlich(pool, lich_luns)

           for lun in lich_luns:
               volume = self._sync_single_lich_lun(pool, lun, isDay=isDay, is_create=is_create)

    def _delete_vol_not_inlich(self, pool, lich_luns):
        for volume in pool.volumes :
            if (datetime.datetime.now() - volume.created_at).seconds < 8:
                continue
            if volume.name not in lich_luns and volume.status not in SKIP_STATUS:
                volume.delete()

    def _sync_single_lich_lun(self, pool, lich_volume_name, isDay=False, is_create=False):
        volume_path = UmpPath('%s/%s' % (pool.name, lich_volume_name), 
                             protocol=pool.protocol, username=pool.username)

        values = {
            'provisioning': 'thin',
        }
        volume = _sw.db_volume({'pool_id': pool.id, 'name': lich_volume_name})
        if volume:
            if isDay: 
                size, used, repnum = self.get_lun_stat(pool, volume.name)
                values.update({'used': used, 'repnum': repnum})

            volume.update(values)

            values['analysis_latency'] = volume.analysis_latency

        # LICH
        param = LichVolumeParam(volume_path)

        provisioning = self.lichVolume.get_attr(param, 'provisioning')
        if provisioning:
            provisioning = provisioning.strip().strip('"')
            values['provisioning'] = provisioning

        analysis_latency = self.lichVolume.get_attr(param, 'lich_system_latency')
        if analysis_latency:
            values['analysis_latency'] = analysis_latency

        values.update({
            'name': lich_volume_name.strip(),
            'cluster_id': pool.cluster_id,
            'protocol':pool.protocol,
            'user_id': pool.user_id,
            'pool_id': pool.id,
            'is_share': False,
        })

        if isDay:
            size, used, repnum = self.get_lun_stat(pool, lich_volume_name)
            values.update({'used':used, 'size': size, 'repnum':repnum})
        
        if volume :
            if volume.status in SKIP_STATUS:
                values['provisioning'] = 'thick'
            volume.update(values)
        else:
            if is_create:
                models.Volume(values).save()
        return volume

    def _sync_volume_used(self):
        volumes = models.Volume.query.all() 
        for volume in volumes:
            if volume.status in SKIP_STATUS:
                continue 

            size, used, repnum = self.get_lun_stat(volume.pool, volume.name)

            values = {'used':used, 'size': size, 'repnum':repnum}
            volume.update(values)
        
    def get_lun_stat(self, pool=None, lun=None):
        lun_path ='/%s/%s/%s' % (pool.protocol_root, pool.realname.strip(), lun)
        if pool is None:
            cmd = '%s/lich/admin/storage.py --stat /%s' % (self.lich_home)
        else:
            cmd = '%s/lich/libexec/lichfs --stat %s' % (self.lich_home, lun_path)

        host_ip = self._select_host(pool.cluster_id)

        used = count = 0
        size = allocated = repnum = '' 
        inspect_cmd = '%s/lich/libexec/lich.inspect --stat %s' % (self.lich_home, lun_path)
        dump_cmd = '%s/lich/libexec/lich.inspect --chunkinfo %s' % (self.lich_home, lun_path)
        cmd = ";".join([cmd, inspect_cmd, dump_cmd])
        while count < 2:
            count += 1
            res = self.api_sync_call(host_ip, cmd)
            lun_info = res.split('\n')
            for info in lun_info :
                info = info.strip()
                if info.startswith('Size'):
                    size = info

                if info.startswith('allocated'):
                    allocated = info

                if 'repnum' in info:
                    regexp = re.compile(r'repnum:([\s\S]*)')
                    repnums = regexp.findall(info)
                    repnum = ''.join(repnums[:1]).strip()

            size = filter(str.isdigit, str(size))
            if not size:
                raise Exception('存储服务异常，无法获取卷的大小')

            allocated_num = filter(str.isdigit, str(allocated))
            if allocated_num:
                used = int(allocated_num) * 1024 * 1024 

            size = int(size)
            if size == 0 and count<2:
                continue
            else:
                count = 3

        lun_info = res.split('\n')
        return size, used, repnum

    @enable_log_and_session(resource='sync', event='sync', disable_oplog=True)
    def volume_get_used(self, _logger, kwargs={}):
        volume = self._volume_get_used(**kwargs)

    def _volume_get_used(self, id, **kwargs):
        '''if used_gb update after five minute,to get volume used from lich '''
        volume_id = id
        volume = _sw.db_volume(volume_id)
        now = time.time()
        delta = timedelta(seconds=300)
        locktime = datetime.datetime.today()
        locked_at = volume.locked_at
        if locked_at is None:
            locked_at = locktime - timedelta(seconds=407)

        nowdate =  locked_at.fromtimestamp(now)
        if volume.status == COPYING:
            return volume.size_gb, volume.used_gb 
            
        if nowdate - locked_at > delta or volume.status in ['waiting', 'creating']:
            try :
                size, used, repnum = self.get_lun_stat(volume.pool, volume.name)
                volume.update({
                    'locked_at': locktime,
                    'size': size,
                    'used': used,
                    'repnum': repnum
                })
                size_gb = size/1000/1000/1000
                return size_gb, used 
            except Exception,e:
                pass

        return volume.size_gb, volume.used_gb 

    def sync_volume_connections(self):
        volumes = _sw.db_volumes()
        for volume in volumes:
            utils.exception_pass(self._get_volume_connections, volume)

    def _get_volume_connections(self, volume): 
        if volume.status in SKIP_STATUS:
            return 

        param = LichVolumeParam(volume.path)
        connections_dict = self.lichVolume.iscsi_connection(param)
        spec_column = 'hostname' if volume.cluster.config.get('globals.nohosts') == 'off' else 'ip'
        for srv, clients in connections_dict.iteritems():
            connections = volume.connections

            for connection in connections:
                if connection.address in clients:
                    continue

                connection.delete()

            for address in clients:
                connection = _sw.get_one(models.Connection, id_or_spec={'volume_id':volume.id, 'address':address})
                host = _sw.get_one(models.Host, id_or_spec={spec_column:srv})

                if connection:
                    continue

                values = {
                    'address': address.strip(),
                    'volume_id': volume.id,
                    'host_id': host.id,
                }
                models.Connection(values).save()

        return connections_dict

if __name__ == '__main__':
     vom = VolumeManager()
#    vom.volume_sync(1)
#    vom._get_volume_connections(volume)
#    vom.sync_volume_connections()
