#!/usr/bin/env python2
#-*- coding: utf-8 -*-

import os
import re
import json
import time
import copy
import traceback

from datetime import timedelta

from Ump.common import log
from Ump.common import config
from Ump import utils 

from Ump.objs.db import models
from Ump.objs.manager_base import Manager
from Ump.objs.iops._iops import mkdir_, volume_iops, host_iops_
from Ump.objs.session_wrapper import _sw

LOG = log.get_log('Ump.objs.iops.manager')

utc = lambda x:(float(x) + 28800) * 1000

class IopsManager(Manager):

    def __init__(self):
        super(IopsManager, self).__init__()

        self.io_path = '%s/tmp/ump/host/' % self.install_path
        self.proxy_download_port = int(config.proxy_download_port)
    
    def check_port(self, host):
        recode, output, outerr = utils._exec_pipe('lsof -i:%s'%(config.proxy_download_port))
        recode, pidput, err = utils._exec_pipe('cat /var/run/ump_proxy_download.pid')
        if not pidput == '' and  not output =='':
            if pidput.strip() not in output:
                recode, put, err = utils._exec_pipe('rm -rf  /var/run/ump_proxy_download.pid')
        
        if (output == '' and pidput  == '') or (not output == '' and pidput == '') or (pidput != '' and output ==''):
            os.system('ump-proxy-download --stop > /dev/null 2>&1')
            returncode, stdout, stderr = utils._exec_pipe('ump-proxy-download --start')
            if returncode != 0:
                msg_err = '%s download fail,ump-proxy-download 启动失败:%s' % (host.ip, stderr)
                LOG.error(msg_err)

    def download_io_tar(self, host):
        '''download io.tar.gz, if failed will delete file from ump_home/tmp/ump/hostname/*'''
    
        self.check_port(host)
    
        tg_dir = '%s/tmp/ump/host/%s/'%(self.install_path, host.name.strip())
        try:
            fileobj = self._http_down('proxy', self.proxy_download_port, host.ip)
        except Exception,e:
            traceback.print_exc()
            msg_err = '获取%s的IOPS数据包失败,检查存储节点lich_dump.py进程是否出错'%host.ip 
            os.system('rm -rf %s* 2 >/dev/null' % (tg_dir))
            LOG.warn(msg_err, extra={'cluster_id': host.cluster_id})
            return ''

        mkdir_(tg_dir)
        recode = os.system('tar zxf %s -C %s'%(fileobj,tg_dir))
        if not config.is_save_iops.lower() == 'true':
            os.system('rm -rf %s* 2 >/dev/null'%(fileobj))

        LOG.info('tar to %s'%tg_dir)
        return ''
    
    def _fetch_chunkid(self, volume):
        if volume.chunkid:
            return volume
            
        host_ip = self._select_host(volume.cluster_id)
        cmd = '%s/lich/libexec/lichfs --stat %s' % (self.lich_home, volume.path)
        res = self._exec_remote(host_ip, cmd)
        for line in res.split('\n'):
            if line.strip().startswith('File'):
                regexp = re.compile(r'Id:\s*(\S+)')
                _ids = regexp.findall(line)
                _id = ''.join(_ids[:1]).strip()
                volume.update({'chunkid':_id})
        return volume

    def cluster_volume_iops(self, cluster):
        for volume in cluster.no_fake_volumes:
            #start = datetime.datetime.now()
            if volume.status in ['creating', 'waiting', 'fail']:
                continue
            try:
                volume = self._fetch_chunkid(volume)
            except Exception,e:
                LOG.info(e)
                continue
            io, read, write, speed, in_, out_, timestamp = volume_iops(volume)

                    
            values = {
                '_iops':io,'_read':read,'_write':write, \
                '_in_out':speed,'_in':in_,'_out':out_,\
                'volume_id':volume.id,'timestamp':int(timestamp)
            }
            
            models.VolumeIops(values).save()

    def _host_iops(self, host):
        volumes = host.cluster.no_fake_volumes
        avg_in    = 0 
        avg_out   = 0 
        avg_iops  = 0 
        avg_read  = 0 
        avg_write = 0 
        avg_speed = 0 
        for volume in volumes:
            obj = volume.chunkid
            host_io_path = '%s/%s' % (self.io_path, host.name)
            mkdir_(host_io_path)
            dirs = os.listdir(host_io_path)
            if len(dirs) == 0:
                continue
            dir_ = sorted(dirs)[-1]
            dir_ = '%s/%s/%s' % (self.io_path, host.name, dir_)
            avg_iops_,avg_read_,avg_write_,\
                avg_in_,avg_out_,avg_speed_,times = host_iops_(dir_,obj)
            avg_in    += avg_in_ 
            avg_out   += avg_out_ 
            avg_iops  += avg_iops_ 
            avg_read  += avg_read_ 
            avg_write += avg_write_ 
            avg_speed += avg_speed_ 
        values = {
            '_in'    : int(avg_in),
            '_out'   : int(avg_out),
            '_iops'  : int(avg_iops),
            '_read'  : int(avg_read),
            '_write' : int(avg_write),
            '_speed' : int(avg_speed),
        }

        host.update({'iops_str':json.dumps(values)})

        host_values = {
            '_in'    : int(avg_in),
            '_out'   : int(avg_out),
            '_iops'  : int(avg_iops),
            '_read'  : int(avg_read),
            '_write' : int(avg_write),
            '_in_out' : int(avg_speed),
            'host_id': host.id,
            'timestamp': time.time(),
        }
        models.HostIops(host_values).save()
        return host

    def del_oneday_ago_data(self, datas, start_time):
        sep_index = 0 
        for index in range(len(datas)-1):
            if datas[index][0] < float(start_time):
                sep_index = index
            else:
                break

        if len(datas) - sep_index > 3:
            sep_index += 1
    
        data = datas[sep_index:]
        return data

    def add_zero(self, data, start_time, delta, stat):
        ''' 处理补0 '''
        if not (stat == 'month' and stat == 'week'):
            delta = delta * 4 
        datas = copy.deepcopy(data)

        data = self.del_oneday_ago_data(datas, start_time)
        datas = copy.deepcopy(data)
        redata = []
        if not datas == []:
            if datas[0][0] -  float(start_time) > delta :
                redata.append([start_time, 0])

        for d in range(len(datas)-1):
            redata.append(datas[d])
#            if stat == 'month' or stat == 'week':
#                continue
            if datas[d+1][0] - datas[d][0] > delta+200 and datas[d][1] !=0:
                redata.append([datas[d][0] + delta, 0])
            if datas[d+1][0] - datas[d][0] > delta+200 and datas[d+1][1]!=0:
                redata.append([datas[d+1][0]-delta-1, 0])
            #else:
            #    break
        redata.append(datas[-1])
        return redata

    def volumes_iops(self, volumes, num):
        io_ = 0 
        reads = 0  
        writes = 0 
        in_out = 0
        in_ = 0
        out_ = 0
        all_times = []
        for volume in volumes:
            iops = volume.iops[-num:]
            iops = volume.iops[-num:]
            avg_io_ = []
            avg_reads = []  
            avg_writes = []
            avg_in_out = []
            avg_in_ = []
            avg_out_ = []
            times = []
            for io in iops:
                avg_io_.append(io._iops)
                avg_reads.append(io._read) 
                avg_writes.append(io._write)
                avg_in_out.append(io._in_out)
                avg_in_.append(io._in)
                avg_out_.append(io._out)
                times.append(io.timestamp)
            if avg_io_ == []:
                continue

#            io_    += sum(avg_io_)/len(avg_io_)
            reads  += sum(avg_reads)/len(avg_reads)
            writes += sum(avg_writes)/len(avg_writes)
#            in_out += sum(avg_in_out)/len(avg_in_out)
            in_    += sum(avg_in_)/len(avg_in_)
            out_   += sum(avg_out_)/len(avg_out_)
            all_times.extend(times)
        timestamp = time.time()
        if not all_times == []:
            timestamp = max(all_times)
        io_ = reads + writes
        in_out = in_ + out_
        return io_,reads,writes,in_out,in_,out_,timestamp
            
    def pool_iops(self, cluster, now_time):
        day_io = day_read = day_write = day_speed = day_in_ = day_out_ = 0
        day_timestamp = []
        week_io = week_read = week_write = week_speed = week_in_ = week_out_ = 0
        week_timestamp = []
        month_io = month_read = month_write = month_speed = month_in_ = month_out_ = 0
        month_timestamp = [] 
        for pool in cluster.pools:
            LOG.info('start pool === %s'%pool.name)
            volumes = pool.volumes
            types = ['day','week','month']
            for status in types:
#                now_time = time.time()
                if status == 'day': 
                    ptime = 24*60*60
                if status == 'week': 
                    ptime = 7*24*60*60
                    start_day = utils.get_week(now_time)[0] + ' 00:00:00'
                    start_week = time.mktime(time.strptime(start_day,'%Y-%m-%d %H:%M:%S'))
                if status == 'month': 
                    ptime = 30*24*60*60
                    start_day = utils.get_month(now_time)[0] + ' 00:00:00'
                    start_month = time.mktime(time.strptime(start_day,'%Y-%m-%d %H:%M:%S'))
                start_time = utc(now_time - ptime)
                seed = [[utc(now_time - ptime), 0]] 

                iodata = copy.deepcopy(seed)
                rdata = copy.deepcopy(seed)
                wdata = copy.deepcopy(seed)
                sdata = copy.deepcopy(seed)
                in_data = copy.deepcopy(seed)
                out_data = copy.deepcopy(seed)

                iops = _sw.get_one(models.IOPS, id_or_spec={'pool_id':pool.id, 'status': status})
                if iops:
                    iodata = json.loads(iops._iops_data) 
                    rdata = json.loads(iops._read_data)
                    wdata = json.loads(iops._write_data)
                    sdata = json.loads(iops._swallow_spit_data)
                    in_data = json.loads(iops._swallow_data)
                    out_data = json.loads(iops._spit_data)

                delta = 200 * 1000
                if status == 'day': 
                    num = 1
                    io,read,write,speed,in_,out_,timestamp = self.volumes_iops(volumes, num)
                    day_io    += io
                    day_read  += read
                    day_write += write
                    day_speed += speed
                    day_in_   += in_
                    day_out_  += out_
                    day_timestamp.append(timestamp)

                    pool_values = {
                        '_in': int(in_),
                        '_out': int(out_),
                        '_iops': int(io),
                        '_read': int(read),
                        '_write': int(write),
                        '_in_out': int(speed),
                        'pool_id': pool.id,
                        'timestamp': time.time(),
                    }
                    models.PoolIops(pool_values).save()

                if status == 'week': 
                    num = 6
                    if iops:
                        delta = 1500 * 1000 
                        if  utc(now_time) - iodata[-1][0] < delta:
                            continue
                    io, read, write, speed, in_, out_, timestamp = self.volumes_iops(volumes, num)
                    week_io    += io
                    week_read  += read
                    week_write += write
                    week_speed += speed
                    week_in_   += in_
                    week_out_  += out_
                    week_timestamp.append(timestamp) 
                if status == 'month': 
                    num = 12
                    if iops:
                        delta = 3300 * 1000 
                        if  utc(now_time) - iodata[-1][0]  < delta:
                            continue
                    io, read, write, speed, in_, out_, timestamp = self.volumes_iops(volumes, num)
                    month_io    += io
                    month_read  += read
                    month_write += write
                    month_speed += speed
                    month_in_   += in_
                    month_out_  += out_
                    month_timestamp.append(timestamp) 
                if int(utc(timestamp)) == int(iodata[-1][0]):
                    io,read,write,speed,in_,out_,timestamp = 0,0,0,0,0,0,now_time
                LOG.info('@@@@@@@@@@@@@@@@@stop %s@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@' % pool.name)
                    
                iodata.append([utc(timestamp), io])     
                rdata.append([utc(timestamp), read])
                wdata.append([utc(timestamp), write])
                sdata.append([utc(timestamp), speed])
                in_data.append([utc(timestamp), in_])
                out_data.append([utc(timestamp), out_])
                iodata   = self.add_zero(iodata, start_time, delta, status)
                rdata    = self.add_zero(rdata, start_time, delta, status) 
                wdata    = self.add_zero(wdata, start_time, delta, status) 
                sdata    = self.add_zero(sdata, start_time, delta, status) 
                in_data  = self.add_zero(in_data, start_time, delta, status) 
                out_data = self.add_zero(out_data, start_time, delta, status) 

                values = {
                    '_iops':io,'_iops_data':json.dumps(iodata),\
                    '_iops_read':read,'_read_data':json.dumps(rdata),\
                    '_iops_write':write,'_write_data':json.dumps(wdata),'pool_id':pool.id,\
                    '_swallow_spit':speed,'_swallow_spit_data':json.dumps(sdata),\
                    '_swallow':in_,'_swallow_data':json.dumps(in_data),\
                    '_spit':out_,'_spit_data':json.dumps(out_data),'status':status\
                }

                if iops:
                    iops.update(values)
                else:
                    models.IOPS(values).save()

        day = day_io, day_read, day_write, day_speed, day_in_, day_out_, day_timestamp
        week = week_io, week_read, week_write, week_speed, week_in_, week_out_, week_timestamp 
        month = month_io, month_read, month_write, month_speed, month_in_, month_out_, month_timestamp 
        return day, week, month

    def cluster_iops(self, cluster):
        now_time = time.time()
        day, week, month = self.pool_iops(cluster, now_time)
        types = ['day','week','month']
        for status in types:
            if status == 'day': 
                ptime = 24*60*60
            if status == 'week': 
                ptime = 7*24*60*60
                start_day = utils.get_week(now_time)[0] + ' 00:00:00'
                start_week = time.mktime(time.strptime(start_day, '%Y-%m-%d %H:%M:%S'))
            if status == 'month': 
                ptime = 30*24*60*60
                start_day = utils.get_month(now_time)[0] + ' 00:00:00'
                start_month = time.mktime(time.strptime(start_day, '%Y-%m-%d %H:%M:%S'))
            start_time = utc(now_time - ptime)
            seed = [[utc(now_time - ptime),0]] 

            iops = _sw.get_one(models.IOPS, id_or_spec={'cluster_id':cluster.id, 'status': status})
            iodata = []
            if iops:
                iodata   = json.loads(iops._iops_data)
                rdata    = json.loads(iops._read_data)
                wdata    = json.loads(iops._write_data) 
                sdata    = json.loads(iops._swallow_spit_data) 
                in_data  = json.loads(iops._swallow_data) 
                out_data = json.loads(iops._spit_data) 

            if iodata == []:
                iodata = copy.deepcopy(seed)
                rdata = copy.deepcopy(seed)
                wdata = copy.deepcopy(seed)
                sdata = copy.deepcopy(seed)
                in_data = copy.deepcopy(seed)
                out_data = copy.deepcopy(seed)

            delta = 300 * 1000
            if status == 'day': 
                io, read, write, speed, in_, out_, timestamps = day 
            if status == 'week': 
                io, read, write, speed, in_, out_, timestamps = week
                if not iops:
                    delta = 1500 *1000
                    if  utc(now_time) - iodata[-1][0] < delta:
                        continue
                io, read, write, speed, in_, out_, timestamps = week 
            if status == 'month': 
                if not iops:
                    delta = 3300 *1000
                    io,read,write,speed,in_,out_,timestamps = month 
                    if  utc(now_time) - iodata[-1][0]  < delta:
                        continue
                io,read,write,speed,in_,out_,timestamps = month 
            timestamp = time.time()
            if not timestamps == []: 
                timestamp = max(timestamps)
            if int(utc(timestamp)) == int(iodata[-1][0]):
                io,read,write,speed,in_,out_,timestamp = 0,0,0,0,0,0,now_time
                
            iodata.append([utc(timestamp), io])     
            rdata.append([utc(timestamp), read])
            wdata.append([utc(timestamp), write])
            sdata.append([utc(timestamp), speed])
            in_data.append([utc(timestamp), in_])
            out_data.append([utc(timestamp),out_])
            iodata   = self.add_zero(iodata, start_time, delta, status)
            rdata    = self.add_zero(rdata, start_time, delta, status) 
            wdata    = self.add_zero(wdata, start_time, delta, status) 
            sdata    = self.add_zero(sdata, start_time, delta, status) 
            in_data  = self.add_zero(in_data, start_time, delta, status) 
            out_data = self.add_zero(out_data, start_time, delta, status) 

            values = {
                '_iops':io,'_iops_data':json.dumps(iodata),\
                '_iops_read':read,'_read_data':json.dumps(rdata),\
                '_iops_write':write,'_write_data':json.dumps(wdata),'cluster_id':cluster.id,\
                '_swallow_spit':speed,'_swallow_spit_data':json.dumps(sdata),\
                '_swallow':in_,'_swallow_data':json.dumps(in_data),\
                '_spit':out_,'_spit_data':json.dumps(out_data),'status':status\
            }

            iops = _sw.get_one(models.IOPS, id_or_spec={'cluster_id':cluster.id, 'status': status})
            if iops:
                iops.update(values)
            else:
                models.IOPS(values).save()
        
        for host in cluster.hosts:
            self._host_iops(host)
            data_dir = '%s/tmp/ump/host/%s/' % (self.install_path, host.name.strip())
            mkdir_(data_dir)
            dirs = os.listdir(data_dir)
            for dir_ in sorted(dirs):
                os.system('rm -rf %s/%s '%(data_dir, dir_))
                pass

    def default_io(self, cluster_id, pool='cluster'):
        types = ['day','week','month']
        for status in types:
            now_time = time.time()
            if status == 'day': 
                ptime = 24*60*60
                start_time = now_time - ptime
            if status == 'week': 
                start_day = utils.get_week(now_time)[0] + ' 00:00:00'
                start_time = time.mktime(time.strptime(start_day,'%Y-%m-%d %H:%M:%S'))
            if status == 'month': 
                start_day = utils.get_month(now_time)[0] + ' 00:00:00'
                start_time = time.mktime(time.strptime(start_day,'%Y-%m-%d %H:%M:%S'))
            seed = [[utc(start_time), 0], [utc(now_time), 0]] 

            iops = None
            if pool == 'cluster':
                iops = _sw.get_one(models.IOPS, id_or_spec={'cluster_id':cluster_id, 'status': status})

            iodata = [] 
            rdata = []
            wdata = []
            sdata = []
            in_data = []
            out_data = []
            if not iops:
                iodata = copy.deepcopy(seed)
                rdata = copy.deepcopy(seed)
                wdata = copy.deepcopy(seed)
                sdata = copy.deepcopy(seed)
                in_data = copy.deepcopy(seed)
                out_data = copy.deepcopy(seed)
            io,read,write,speed,in_,out_ = 0,0,0,0,0,0
            values = {'_iops':io,'_iops_data':json.dumps(iodata),\
                      '_iops_read':read,'_read_data':json.dumps(rdata),\
                      '_iops_write':write,'_write_data':json.dumps(wdata),\
                      '_swallow_spit':speed,'_swallow_spit_data':json.dumps(sdata),\
                      '_swallow':in_,'_swallow_data':json.dumps(in_data),\
                      '_spit':out_,'_spit_data':json.dumps(out_data),'status':status}

            if pool == 'cluster':
                values.update({'cluster_id':cluster_id})

            if not iops:
                models.IOPS(values).save()
            else:
                iops.update(values)

    def delete_tars(self, cluster_id):
        cluster = _sw.db_cluster(cluster_id)
        now = time.time()
        start_day = utils.get_month(now)[0] + ' 00:00:00'
        month_ago = time.mktime(time.strptime(start_day,'%Y-%m-%d %H:%M:%S'))
        for host in cluster.hosts:
            data_dir = '%s/tmp/ump/host/%s/' % (self.install_path, host.ip.strip())
            if  not os.path.exists(data_dir):
                continue 
            tars = os.listdir(data_dir)
            tars = sorted(tars)
            for tar in tars:
                timestamp = tar.split('.')[0]
                if not timestamp.isdigit():
                    continue
                if float(timestamp) < float(time.time()-24*60*60):
                    os.system('rm -rf %s/%s'%(data_dir,tar))
                else:
                    break

    def cluster_iops_sync(self):
        msgs = []
        cluster = _sw.db_cluster(1)
        if cluster.hosts == [] :
            return ''
    
        #获取IOPS信息
        for host in cluster.hosts:
            self.download_io_tar(host)
    
        self.cluster_volume_iops(cluster)
        self.cluster_iops(cluster)
        print 'complete iops'
        return msgs
    
    def delete_volume_iops(self):
        # pre_time = time.time() - 8888888
        delta = timedelta(days=30)
        models.Volume.is_check_deleted = 1
        volumes = models.Volume.query.all()

        now = time.time()
        for volume in volumes:
            # sql_iops = volume.iops
            models.Volume.is_check_deleted = 1
            sql_iops = models.VolumeIops.query.filter_by(volume_id=volume.id).all()

            if not sql_iops :
                continue

            # pre_time = sql_iops[-1].created_at
            for iops in sql_iops:
                create_at = iops.created_at
                nowdate =  create_at.fromtimestamp(now) 
                if nowdate -create_at > delta:
                    if not iops.deleted :
                        iops.delete()
                    iops.hard_delete()
                else:
                    break

    def delete_iops(self, model_name, model_iops):
        """
        :param _ss:
        :param model_name: Host, Pool, Volume
        :param model_iops: HostIops, PoolIops, VolumeIops
        :return:
        """
        print 'start delete @@@@@@@@@@@'

        # pre_time = time.time() - 8888888

        model = getattr(models, model_name)
        model.is_check_deleted = 1
        models_lists = model.query.all()

        now = time.time()
        delta = timedelta(days=30)

        for models_list in models_lists:
            spec = {'%s_id' % model_name.lower(): models_list.id}

            model = getattr(models, model_iops)
            model.is_check_deleted = 1

            sql_iops = model.query.filter_by(**spec).all()
            if not sql_iops:
                continue

            # pre_time = sql_iops[-1].created_at
            for iops in sql_iops:
                create_at = iops.created_at
                nowdate = create_at.fromtimestamp(now)
                if nowdate - create_at > delta:
                    if not iops.deleted:
                        iops.delete()
                    iops.hard_delete()

                else:
                    break




if __name__ == '__main__':
    iom = IopsManager()
    #iom.cluster_iops(2)
    # a= os.popen('cat /root/Desktop/q').read().strip()
    iom.delete_iops(model_name='Host', model_iops='HostIops')
    iom.delete_iops(model_name='Pool', model_iops='PoolIops')
