#!/usr/bin/env python2
#-*- coding:utf-8 -*-

"""The summary api is not opened yet!!!
"""

import json
import datetime
from flask import Blueprint, request

from Ump import webutil
from Ump.common.utils import inspect_func, yy, conv_float, retains_decimal, percent
from Ump.objs.db import models
from Ump.objs.cluster.manager import ClusterManager
from Ump.objs.session_wrapper import enable_log_and_session, _sw
from sqlalchemy import text

from base import BaseHandler, add_manager

clusterm = ClusterManager()


@add_manager(clusterm)
class ClusterBase(BaseHandler):

    # @webutil.authenticated(auth=False)
    # def __init__(self):
    #     # self._ss = _ss

    def cluster_set_iops(self):
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            iops = self._sw.get_one(models.IOPS, id_or_spec={'cluster_id':cluster.id, 'status': 'day'})
            if iops:
                cluster.iops, cluster.iops_data,cluster.iops_read,cluster.read_data,\
                    cluster.iops_write,cluster.write_data = iops._iops, iops._iops_data, iops._iops_read,\
                    iops._read_data, iops._iops_write, iops._write_data
                cluster.swallow_spit,cluster.swallow_spit_data,cluster.iops_spit,cluster.spit_data,\
                    cluster.iops_swallow,cluster.swallow_data = iops._swallow_spit, iops._swallow_spit_data\
                    ,iops._spit, iops._spit_data, iops._swallow, iops._swallow_data 
            latency = self._sw.get_one(models.Latency, id_or_spec={'cluster_id':cluster.id})
            if latency:
                cluster.latency = 0
                cluster.read_latency = latency.read_latency_val
                cluster.write_latency = latency.write_latency_val
        return clusters

    def cluster_set_iops_bytime(self, start_time, end_time):
        iops_stmt = text("SELECT * FROM iops "
                        "WHERE created_at>:from_time and created_at<:end_time and cluster_id=:cluster_id")
        latency_stmt = text("SELECT * FROM latency "
                        "WHERE created_at>:from_time and created_at<:end_time and cluster_id=:cluster_id")

        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            iops = self._ss.query(
                    models.IOPS.created_at,
                    models.IOPS._iops,
                    models.IOPS._iops_data,
                    models.IOPS._iops_read,
                    models.IOPS._read_data,
                    models.IOPS._iops_write,
                    models.IOPS._write_data,
                    models.IOPS._swallow_spit,
                    models.IOPS._swallow_spit_data,
                    models.IOPS._spit,
                    models.IOPS._spit_data,
                    models.IOPS._swallow,
                    models.IOPS._swallow_data
                    ).from_statement(iops_stmt).params(
                    from_time=start_time,
                    end_time=end_time,
                    cluster_id=cluster.id
                ).all()
            if iops:
                content = map(list, zip(*iops))
                cluster.iops, cluster.iops_data,cluster.iops_read,cluster.read_data,\
                    cluster.iops_write,cluster.write_data =content[1], content[2], content[3],\
                    content[4], content[5], content[6]
                cluster.swallow_spit,cluster.swallow_spit_data,cluster.iops_spit,cluster.spit_data,\
                    cluster.iops_swallow,cluster.swallow_data = content[7], content[8], content[9],\
                    content[10], content[11], content[12]

            latency = self._ss.query(
                    models.Latency.created_at,
                    models.Latency.latency_val,
                    models.Latency.read_latency_val,
                    models.Latency.write_latency_val,
                    ).from_statement(latency_stmt).params(
                    from_time=start_time,
                    end_time=end_time,
                    cluster_id=cluster.id
                ).all()
            if latency:
                content = map(list, zip(*latency))
                cluster.latency = content[1]
                cluster.read_latency = content[2]
                cluster.write_latency = content[3]

        return clusters


    def cluster_get_info(self):
        x = self.custom_data
        # x = web.input()
        start_time = x.get('start_time')
        end_time = x.get('end_time')

        if start_time is not None and end_time is not None:
            clusters = self.cluster_set_iops_bytime(start_time, end_time)
        else:
            clusters = self.cluster_set_iops()

        return clusters


class ClusterInfo(ClusterBase):

    def get(self):
        clusters = self.cluster_get_info()
        return webutil.success2json(clusters)


@add_manager(clusterm)
class Cluster_cpu(BaseHandler):

    @webutil.authenticated(auth=False)
    def get(self):
        clusters = _sw.query(models.Cluster).all()
        for cluster in clusters:
            cpu = cluster.usage_cpu
        return webutil.success2json(cpu)


@add_manager(clusterm)
class Cluster_memory(BaseHandler):
    def set_memory(self, cluster):
        if cluster.mem_total:
            cluster.memory_total_show = int(cluster.mem_total)
        cluster.usage_memory_show = '0.0'
        if cluster.usage_mem:
            cluster.usage_memory_show = "%.2f" % conv_float(cluster.usage_mem * 100.0)
        return cluster 

    @webutil.authenticated(auth=False)
    def get(self):
        clusters = _sw.query(models.Cluster).all()
        for cluster in clusters:
            cluster = self.set_memory(cluster)
            total = cluster.memory_total_show
            used = cluster.usage_memory_show
        return webutil.success2json(total)


class Cluster_iops(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_info()
        for cluster in clusters:
            res.append((cluster.name, cluster.iops,cluster.iops_data,cluster.iops_read,cluster.read_data,cluster.iops_write,cluster.write_data))

        return webutil.success2json(res)


class Cluster_io(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_info()
        for cluster in clusters:
            res.append((cluster.name, cluster.swallow_spit,cluster.swallow_spit_data,cluster.iops_spit,cluster.spit_data,\
                    cluster.iops_swallow,cluster.swallow_data))

        return webutil.success2json(res)


class Cluster_storage(ClusterBase):

    def get(self):
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            cluster.disk_used_per = cluster.disk_percent
        return webutil.success2json(clusters)

class Cluster_volume(ClusterBase):

    def get(self):
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            cluster.volume_total_ = cluster.volume_total
            cluster.volume_used_per = "%.2f" % 0
            if cluster.volume_total and int(cluster.volume_total) != 0: 
                cluster.volume_used_per = "%.2f" % conv_float(cluster.volume_used / 2 / cluster.volume_total * 100.0)
        return webutil.success2json(clusters)


class Cluster_rebuild(ClusterBase):

    def get(self):
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            health_list = self._ss.session.query(models.Health).join(models.Health.cluster).filter(models.Cluster.id==cluster.id).all()
            for health in health_list:
                if health:
                    progress_per = percent(health.chunk_recovery_success, health.chunk_recovery_total, 100)
                    cluster.chunk_recovery_offline, cluster.chunk_need_recovery,cluster.chunk_recovery_success, \
                    cluster.chunk_recovery_lost, cluster.chunk_recovery_fail, cluster.node_offline, cluster.last_scan, \
                    cluster.chunk_recovery_total, cluster.disk_offline = health.chunk_recovery_offline, health.chunk_need_recovery, \
                                                                health.chunk_recovery_success, health.chunk_recovery_lost, \
                                                                health.chunk_recovery_fail, health.node_offline, health.last_scan, \
                                                                health.chunk_recovery_total, health.disk_offline 
                    cluster.progress_per = progress_per 

        return webutil.success2json(clusters)



class CapacityInfo(ClusterBase):

    def get(self):
        res = []
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            res.append((cluster.name, cluster.capacity, cluster.used, cluster.available))
        return webutil.success2json(res)


class Capacity_total(ClusterBase):

    def get(self):
        res = []
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            res.append((cluster.name, cluster.capacity))
        return webutil.success2json(res)


class Capacity_used(ClusterBase):

    def get(self):
        res = []
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            res.append((cluster.name, cluster.used))
        return webutil.success2json(res)


class Capacity_available(ClusterBase):

    def get(self):
        res = []
        clusters = self._ss.query(models.Cluster).all()
        for cluster in clusters:
            res.append((cluster.name, cluster.available))
        return webutil.success2json(res)


class ClusterDisk(ClusterBase):

    def get(self):
        clusters = self.cluster_get_disk()
        return webutil.success2json(clusters)


class Cluster_DiskTotal(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_disk()
        for cluster in clusters:
            res.append((cluster.name, cluster.disk_total_gb))
        return webutil.success2json(res)


class Cluster_DiskUsed(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_disk()
        for cluster in clusters:
            res.append((cluster.name, cluster.disk_used_gb))
        return webutil.success2json(res)


class Cluster_DiskAvailable(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_disk()
        for cluster in clusters:
            res.append((cluster.name, cluster.disk_available_gb))
        return webutil.success2json(res)


class Cluster_DiskPercent(ClusterBase):

    def get(self):
        res = []
        clusters = self.cluster_get_disk()
        for cluster in clusters:
            res.append((cluster.name, cluster.disk_percent))
        return webutil.success2json(res)


class HostBase(BaseHandler):

    # @webutil.authenticated(auth=False)
    # def __init__(self):
    #     self._ss = _ss

    def host_get_info(self):
        hosts = self._sw.query(models.Host).all()
        for host in hosts:
            if host.cpu_util != None:
                host.usage_cpu_ = "%.2f" %(float(json.loads(host.cpu_util))*100)
            else:
                host.usage_cpu_ = '0.00'

            if host.usage_mem != None:
                host.usage_mem_ = "%.2f"%(float(host.usage_mem)*100)
            else:
                host.memory_total = 0
                host.usage_mem_ =  "%.2f%%" %(0)
            host.mem_total_ = yy(int(host.mem_total), 1024*1024)

            if host.status == None:
                host.status = 'unknown'
            if host.status == 'unknown':
                host.usage_cpu_ =  "0.00"
                host.usage_mem_ = "0.00"
                host.mem_total_ = 0

        return hosts


class HostInfo(HostBase):

    def get(self):
        hosts = self.host_get_info()
        return webutil.success2json(hosts)


class Host_cpu(HostBase):

    def get(self):
        res = []
        hosts = self.host_get_info()
        for host in hosts:
            res.append((host.name, host.usage_cpu_))

        return webutil.success2json(res)


class Host_mem(HostBase):

    def get(self):
        res = []
        hosts = self.host_get_info()
        for host in hosts:
            res.append((host.name,host.usage_mem_,host.mem_total_))

        return webutil.success2json(res)


class Host_swap(BaseHandler):

    @webutil.authenticated(auth=False)
    def get(self):
        hosts = _sw.query(models.Host).all()
        for host in hosts:
            if host.usage_swap != None:
                host.usage_swap_ = xx(host.usage_swap)
                host.usage_swap_ = "%.2f%%" %(float(host.usage_swap_))
            else:
                host.usage_swap_ = "%.2f%%" %(0)

        return webutil.success2json(hosts)



class VolumeIopsBase(BaseHandler):

    @webutil.authenticated(auth=False)
    def __init__(self):
        # self._ss = _ss

        _get_params = {
            'optional': [
                'skip',
                'limit',
                'order',
                'desc',
                'start_time',
                'end_time',
            ]
        }

    def volume_get_iops(self):
        volumes = _sw.query(models.Volume).all()
        for volume in volumes:
            iops = _sw.get_one(models.VolumeIops, id_or_spec={'volume_id':volume.id})
            iops_ = iops_read = iops_write = swallow_spit = spit =  swallow = 0
            if iops:
                iops_, iops_read, iops_write= iops._iops, iops._read, iops._write
                swallow_spit, spit, swallow = iops._in_out, iops._out, iops._in

            volume.IOPS = 'r：%s，w：%s' % (iops_read, iops_write)
            volume.swallow = 'in：%s，out：%s' % (swallow, spit)

        return volumes

    def volume_get_iops_bytime(self, start_time, end_time):

        stmt = text("SELECT * FROM volume_iops "
                        "WHERE created_at>:from_time and created_at<:end_time and volume_id=:volume_id")

        volumes = _sw.query(models.Volume).all()
        for volume in volumes:
            iops = _sw.query(
                    models.VolumeIops.created_at,
                    models.VolumeIops._iops,
                    models.VolumeIops._read,
                    models.VolumeIops._write,
                    models.VolumeIops._in_out,
                    models.VolumeIops._in,
                    models.VolumeIops._out
                    ).from_statement(stmt).params(
                    from_time=start_time,
                    end_time=end_time,
                    volume_id=volume.id
                ).all()
            iops_ = iops_read = iops_write = swallow_spit = spit =  swallow = 0

            if iops:
                content = map(list, zip(*iops))
                iops_, iops_read, iops_write= content[1], content[2], content[3]
                swallow_spit, swallow, spit = content[4], content[5], content[6]

            volume.IOPS = 'r：%s，w：%s' % (iops_read, iops_write)
            volume.swallow = 'in：%s，out：%s' % (swallow, spit)
            volume.readbw = swallow
            volume.writebw = spit
            volume.readiops = iops_read
            volume.writeiops = iops_write

        return volumes


    def volume_get_info(self):
        x = self.custom_data
        # x = web.input()
        start_time = x.get('start_time')
        end_time = x.get('end_time')

        if start_time is not None and end_time is not None:
            volumes = self.volume_get_iops_bytime(start_time, end_time)
        else:
            volumes = self.volume_get_iops()

        return volumes


class VolumeInfo(VolumeIopsBase):

    def get(self):
        volumes = self.volume_get_info()
        return webutil.success2json(volumes)


class Volume_readbw(VolumeIopsBase):
    def get(self):
        res = []
        volumes = self.volume_get_info()
        for vol in volumes:
            res.append((vol.name, vol.readbw))

        return webutil.success2json(res)


class Volume_writebw(VolumeIopsBase):
    def get(self):
        res = []
        volumes = self.volume_get_info()
        for vol in volumes:
            res.append((vol.name, vol.writebw))

        return webutil.success2json(res)


class Volume_readiops(VolumeIopsBase):
    def get(self):
        res = []
        volumes = self.volume_get_info()
        for vol in volumes:
            res.append((vol.name, vol.readiops))

        return webutil.success2json(res)


class Volume_writeiops(VolumeIopsBase):
    def get(self):
        res = []
        volumes = self.volume_get_info()
        for vol in volumes:
            res.append((vol.name, vol.writeiops))

        return webutil.success2json(res)


class VolumeLatencyBase(BaseHandler):

    @webutil.authenticated(auth=False)
    def __init__(self):
        # self._ss = _ss

        _get_params = {
            'optional': [
                'skip',
                'limit',
                'order',
                'desc',
                'start_time',
                'end_time',
            ]
        }

    def volume_get_latency(self):
        volumes = _sw.query(models.Volume).all()
        for volume in volumes:
            latencies = _sw.get_one(models.VolumeLatency, id_or_spec={'volume_id':volume.id})
            for latency in latencies:
                volume.read_latency, volume.read_last= latency.read_latency, latency.read_last
                volume.write_latency, volume.write_last_ = latency.write_latency, latency.write_last

        return volumes

    def volume_get_latency_bytime(self, start_time, end_time):
        stmt = text("SELECT * FROM volume_latency "
                        "WHERE created_at>:from_time and created_at<:end_time and volume_id=:volume_id")

        volumes = _sw.query(models.Volume).all()
        for volume in volumes:
            latency = _sw.query(
                    models.VolumeLatency.created_at,
                    models.VolumeLatency.read_latency,
                    models.VolumeLatency.read_last,
                    models.VolumeLatency.write_latency,
                    models.VolumeLatency.write_last,
                    ).from_statement(stmt).params(
                    from_time=start_time,
                    end_time=end_time,
                    volume_id=volume.id
                ).all()

            if latency:
                content = map(list, zip(*latency))
                volume.read_latency, volume.read_last= content[1], content[2]
                volume.write_latency, volume.write_last_ = content[3], content[4]

        return volumes


    def volume_latency_info(self):
        x = self.custom_data
        # x = web.input()
        start_time = x.get('start_time')
        end_time = x.get('end_time')

        if start_time is not None and end_time is not None:
            volumes = self.volume_get_latency_bytime(start_time, end_time)
        else:
            volumes = self.volume_get_latency()

        return volumes


class Volume_iolatency(VolumeLatencyBase):
    def get(self):
        res = []
        volume = self.volume_latency_info()
        for vol in volume:
            res.append((vol.name, vol.read_latency, vol.read_last, vol.write_latency, vol.write_last))

        return webutil.success2json(res)

app = Blueprint('summary', __name__, template_folder='remplates')
app.add_url_rule('/summary/cluster', view_func=ClusterInfo.as_view('cluster_info'))

app.add_url_rule('/summary/iops', view_func=Cluster_iops.as_view('cluater_iops'))
app.add_url_rule('/summary/io', view_func=Cluster_io.as_view('cluster_io'))
app.add_url_rule('/summary/memory', view_func=Cluster_memory.as_view('cluster_memory'))
app.add_url_rule('/summary/cpu', view_func=Cluster_cpu.as_view('cluster_cpu'))
app.add_url_rule('/summary/storage', view_func=Cluster_storage.as_view('cluster_storage'))
app.add_url_rule('/summary/volume', view_func=Cluster_volume.as_view('cluster_volume'))
app.add_url_rule('/summary/rebuild', view_func=Cluster_rebuild.as_view('cluster_rebuild'))

app.add_url_rule('/summary/capacity', view_func=CapacityInfo.as_view('cluster_capacity'))
app.add_url_rule('/summary/capacity/total', view_func=Capacity_total.as_view('cluster_total'))
app.add_url_rule('/summary/capacity/available', view_func=Capacity_available.as_view('cluster_available'))
app.add_url_rule('/summary/capacity/used', view_func=Capacity_used.as_view('cluster_used'))

app.add_url_rule('/summary/cluster/disk', view_func=ClusterDisk.as_view('disk'))
app.add_url_rule('/summary/cluster/disk/total', view_func=Cluster_DiskTotal.as_view('disk_total'))
app.add_url_rule('/summary/cluster/disk/available', view_func=Cluster_DiskAvailable.as_view('disk_available'))
app.add_url_rule('/summary/cluster/disk/used', view_func=Cluster_DiskUsed.as_view('disk_used'))
app.add_url_rule('/summary/cluster/disk/percent', view_func=Cluster_DiskPercent.as_view('disk_percent'))

app.add_url_rule('/summary/host', view_func=HostInfo.as_view('host_info'))
app.add_url_rule('/summary/host/cpu', view_func=Host_cpu.as_view('host_cpu'))
app.add_url_rule('/summary/host/memory', view_func=Host_mem.as_view('host_memory'))

app.add_url_rule('/summary/volume', view_func=VolumeInfo.as_view('volume'))
app.add_url_rule('/summary/volume/read_bw', view_func=Volume_readbw.as_view('volume_read_bw'))
app.add_url_rule('/summary/volume/write_bw', view_func=Volume_writebw.as_view('volume_write_bw'))
app.add_url_rule('/summary/volume/write_iops', view_func=Volume_writeiops.as_view('volume_write_iops'))
app.add_url_rule('/summary/volume/read_iops', view_func=Volume_readiops.as_view('volume_read_iops'))
app.add_url_rule('/summary/volume/io_latency', view_func=Volume_iolatency.as_view('volume_io_latency'))


#class Pool_total
#class Pool_available
#class Pool_used

#class Pool_readbw
#class Pool_writebw
#class Pool_readiops
#class Pool_writeiops
#class Pool_iolatency
#class Pool_restructuring


