#!/usr/bin/env python2
#-*- coding: utf-8 -*-

import os
import re
import json
import time
import copy
import traceback

from datetime import timedelta

from Ump.common import log
from Ump.common import config
from Ump import utils 

from Ump.objs.db import models
from Ump.objs.manager_base import Manager
from Ump.objs.iops._iops import mkdir_, volume_iops, host_iops_
from Ump.objs.session_wrapper import _sw

from Ump.lich.shell import LichShell, LichShellParam


LOG = log.get_log('Ump.objs.iops.manager')


def utc_time(timestamp):
    return (float(timestamp) + 28800) * 1000


class LatencyManager(Manager):

    def __init__(self):
        super(LatencyManager, self).__init__()
        self.latency_dir = '/dev/shm/lich4/volume/latency'
        self.lichShell = LichShell()
    
    def _fetch_chunkid(self, volume):
        if volume.chunkid:
            return volume
            
        host_ip = self._select_host(volume.cluster_id)
        cmd = '%s/lich/libexec/lichfs --stat %s' % (self.lich_home, volume.path)
        res = self._exec_remote(host_ip, cmd)
        for line in res.split('\n'):
            if line.strip().startswith('File'):
                regexp = re.compile(r'Id:\s*(\S+)')
                _ids = regexp.findall(line)
                _id = ''.join(_ids[:1]).strip()
                volume.update({'chunkid':_id})
        return volume
    
    def cluster_volume_latency(self):
        cluster = _sw.db_cluster(1)

        latency_num = 0
        cluster_read_latency = cluster_write_latency = 0
        for volume in cluster.no_fake_volumes:

            if volume.status in ['creating', 'waiting', 'fail']:
                continue
            try:
                volume = self._fetch_chunkid(volume)
            except Exception,e:
                LOG.info(e)
                continue

            _inquire_ok, volume_latency = self._inquire_volume_latency(volume)
            if _inquire_ok:
                latency_num += 1
                cluster_read_latency += int(volume_latency.read_latency)
                cluster_write_latency += int(volume_latency.write_latency)

        read_latency_avg = write_latency_avg = 0
        if latency_num != 0:
            read_latency_avg = cluster_read_latency/latency_num 
            write_latency_avg = cluster_write_latency/latency_num

        LOG.info('read_latency: %s write_latency: %s' % (read_latency_avg, write_latency_avg))
        self.create_latency(cluster, read_latency_avg, write_latency_avg)

    def create_latency(self, cluster, read_latency_avg, write_latency_avg):
        now_timestamp = time.time()
        start_time = now_timestamp - 24*60*60

        utc_start_timestamp = utc_time(start_time)
        utc_timestamp = utc_time(now_timestamp)
        default_base = [[utc_start_timestamp, 0], [utc_timestamp-60*1000, 0]]
        default_read_data = default_base + [[utc_timestamp, read_latency_avg]]
        default_write_data = default_base + [[utc_timestamp, write_latency_avg]]
        values = {
            'timestamp': now_timestamp,
            'cluster_id': cluster.id,
            'read_latency_val': read_latency_avg,
            'write_latency_val': write_latency_avg,
            'read_latency_data': json.dumps(default_read_data),
            'write_latency_data': json.dumps(default_write_data),
        }

        latency = _sw.get_one(models.Latency, id_or_spec={'cluster_id':cluster.id})
        if latency:
            read_latency_data = latency.read_latency or []
            write_latency_data = latency.write_latency or []

            read_latency_data.append([utc_timestamp, read_latency_avg])
            write_latency_data.append([utc_timestamp, write_latency_avg])

            read_latency_data = self.handler_data(read_latency_data, utc_start_timestamp)
            write_latency_data = self.handler_data(write_latency_data, utc_start_timestamp)

            values['read_latency_data'] = json.dumps(read_latency_data)
            values['write_latency_data'] = json.dumps(write_latency_data)
            latency.update(values)
        else:
            latency = models.Latency(values).save()
        return latency

    def handler_data(self, datas, utc_start_time):
        data = self.del_oneday_ago_data(datas, utc_start_time)

        delta = 200 * 1000
        redata = []
        redata.append([utc_start_time, 0])
        if data and data[0][0] -  float(utc_start_time) > delta :
            redata.append([utc_start_time, 0])

        for d in range(len(data) - 1):
            redata.append(data[d])

            if data[d+1][0] - data[d][0] > delta + 200 and data[d][1] != 0:
                redata.append([data[d][0] + delta, 0])

            if data[d+1][0] - data[d][0] > delta + 200 and data[d+1][1] != 0:
                redata.append([data[d+1][0] - delta-1, 0])

        redata.append(data[-1])
        return redata

    def del_oneday_ago_data(self, datas, utc_start_time):
        sep_index = 0
        for index in range(len(datas)-1):
            if datas[index][0] < float(utc_start_time) :
                sep_index = index
            else:
                sep_index = index
                break

    
        data = datas[sep_index:]
        return data

    def _inquire_volume_latency(self, volume):
        lich_latency_file = os.path.join(self.latency_dir, volume.chunkid)
        latencies = {
            'read_latency': 0,
            'write_latency': 0,
        } 
        _inquire_ok = False
        for host in volume.cluster.hosts:
            param =  LichShellParam(host.ip, is_http=True)
            latencies = self.lichShell.cat_latency(param, lich_latency_file)
            if latencies:
                _inquire_ok = True
                break

        return _inquire_ok, self.create_volume_latency(volume, latencies)

    def create_volume_latency(self, volume, lich_latencies):
        lich_latencies['timestamp'] = time.time()
        lich_latencies['volume_id'] = volume.id
        volume_latency = models.VolumeLatency(lich_latencies).save()
        return volume_latency


if __name__ == '__main__':
    lam = LatencyManager()
    lam.cluster_volume_latency()
