#!/usr/bin/env python2
# -*- coding: utf-8 -*-

import os
import web
import json
import csv
from datetime import datetime, timedelta

from Umpweb.base import render_jinja
from Umpweb.db import api as db_api
from Umpweb.common.utils import login_required, conv_float, yy, byte2GB 
from Umpweb.common import utils, log
from Umpweb.webapps import base
from Umpweb import defs



LOG = log.get_log('Umpweb.webapps.summary')


urls = (
        '/fusionstor',              'Summary',
        '/fusionnas',		    'FusionNas',
        '/iops',                    'IopsSummary',
        '/iopss',                   'IopssSummary',
        '/io',                      'IoSummary',
        '/memory',                  'MemorySummary',
        '/cpu',                     'CpuSummary',
        '/storage',                 'StorageSummary',
        '/volume',                  'VolumeSummary',
        '/health',                  'HealthSummary',
        '/rebuild',                 'RebuildSummary',
        '/iops_export',             'IopsExport',

)

app = web.application(urls, locals())
render = render_jinja('static/templates/summary', encoding='utf-8',)

base_cache = {'value':None}

class Base(base.ClusterLicenseBase):

    def __init__(self):
        self.value = self.get_values() 
    
    def get_values(self):
        cluster = db_api.cluster_get(1)

        #LICENSE STAT
        cluster = self.cluster_license()

        hosts = db_api.host_get_all()

        users = db_api.user_get_all()

        alerts_num = db_api.alert_count_nothandled()

        if cluster.mem_total:
            cluster.memory_total_show = int(cluster.mem_total)

        failhosts = [x for x in hosts if x.status != 'running' or (x.lichd_status !='running' and not defs.is_fusionnas) ]
        normalhosts = [x for x in hosts if x not in failhosts]

        folders = db_api.folder_get_all()
        failfolders = [x for x in folders if x.status != 'normal']
        normalfolders = [x for x in folders if x.status == 'normal']

        disks = db_api.disk_get_with_isjoin(False)
        faildisks = [x for x in disks if x.stat == 'Failed']
        normaldisks = [x for x in disks if x.stat != 'Failed']

        disk_total_ = 0
        disk_total_ = conv_float(byte2GB(cluster.disk_total))
        cluster.disk_total_ = disk_total_
        cluster.volume_used_per = "%.2f" % 0
        if cluster.disk_total and int(cluster.disk_total) != 0: 
            cluster.volume_used_per = "%.2f" % conv_float(cluster.volume_used / disk_total_ * 100.0)

        volumes = db_api.volume_get_all()
        iscis_volume_num = len([x for x in volumes if x.protocol == 'iscsi'])
        nbd_volume_num = len([x for x in volumes if x.protocol == 'nbd'])
        abnormal_volume_num = len([x for x in volumes if x.status == 'fail'])

        _volume_total = cluster.volume_total
        volume_used_per = "%.2f" % 0
        if _volume_total and int(_volume_total) != 0:
            volume_used_per = "%.2f" % conv_float(cluster.volume_used / 2 / _volume_total * 100.0)

        cluster.usage_memory_show = '0.0'
        if cluster.usage_mem:
            cluster.usage_memory_show = "%.2f" % conv_float(cluster.usage_mem * 100.0)

        users_number = db_api.user_get_all()

        value = {
            'db_api':db_api,
            'cluster':cluster,
            'alerts_num': alerts_num,
            'users':users,
            'hosts':hosts,
            'folders':folders,
            'disks':disks,
            'failfolders':failfolders,
            'normalfolders':normalfolders,
            'faildisks':faildisks,
            'normaldisks':normaldisks,
            'failhosts':failhosts,
            'normalhosts':normalhosts,
            'iscis_volume_num':iscis_volume_num,
            'nbd_volume_num':nbd_volume_num,
            'abnormal_volume_num':abnormal_volume_num,
            'volume_used_per':volume_used_per,
            'users_number':users_number,
        }
        return value

class Summary(Base):
    @login_required
    def GET(self):
        web.header("Content-Type", "text/plain")
        return render.fusionstor_summary(**self.value)

class FusionNas(Base):
    @login_required
    def GET(self):
        web.header("Content-Type", "text/plain")
        cluster = self.value['cluster']
        hosts = self.value['hosts']
        sorted_cpu_hosts = sorted(hosts, key=lambda x :x.cpu_util)
        sorted_cpu_hosts.reverse()

        cpu_top3_hosts = sorted_cpu_hosts[:3]
        cluster.cpu_top3_hosts = cpu_top3_hosts

        sorted_memory_hosts = sorted(hosts, key=lambda x :x.usage_mem)
        sorted_memory_hosts.reverse()
        memory_top3_hosts = sorted_memory_hosts[:3]
        for host in memory_top3_hosts:
            host.usage_mem_show = "%.0f%%" %(float(host.usage_mem)*100)
        cluster.memory_top3_hosts = memory_top3_hosts

        return render.fusionnas_summary(**self.value)

class IOBase(object):

    def cluster_set_iops(self):
        clusters = db_api.cluster_get_all()
        for cluster in clusters:
            iops = db_api.iops_get_with_cluster_status(cluster.id, 'day')
            if iops:
                cluster.iops, cluster.iops_data,cluster.iops_read,cluster.read_data,\
                    cluster.iops_write,cluster.write_data = iops._iops, iops._iops_data, iops._iops_read,\
                    iops._read_data, iops._iops_write, iops._write_data
                cluster.swallow_spit,cluster.swallow_spit_data,cluster.iops_spit,cluster.spit_data,\
                    cluster.iops_swallow,cluster.swallow_data = iops._swallow_spit, iops._swallow_spit_data\
                    ,iops._spit, iops._spit_data, iops._swallow, iops._swallow_data 
            latency = db_api.latency_get_with_cluster(cluster.id)
            if latency:
                cluster.latency = 0
                cluster.read_latency = latency.read_latency_val
                cluster.write_latency = latency.write_latency_val
        return clusters


class IopsSummary(IOBase):
    def GET(self):
        web.header("Content-Type", "text/plain")
        clusters = self.cluster_set_iops()
        return render.iops_summary(clusters=clusters)
    
class IopssSummary(IOBase):
    def GET(self):
        web.header("Content-Type", "text/plain")
        clusters = self.cluster_set_iops()
        return render.iopss_summary(clusters=clusters)
    

class IoSummary(IOBase):

    def GET(self):
        web.header("Content-Type", "text/plain")
        clusters = self.cluster_set_iops()
        return render.io_summary(clusters=clusters)


class IopsExport(object):

    def __init__(self):
        self.abs_path = os.path.dirname(os.path.realpath(__file__))

    @login_required
    def GET(self):
        x = web.input()
        mode = x.get('mode')
        LOG.info('%s' % mode)
        web.header("Content-Type", "text/plain")
        iops = db_api.iops_get_with_cluster_status(1, mode)
        if iops:
            time_record = [(datetime.fromtimestamp(item[0] / 1000) - timedelta(hours=8)).strftime('%Y年%m月%d日 %H:%M')
                           for item in iops._iops_data]
            iops_record = [item[1] for item in iops._iops_data]
            read_record = [item[1] for item in iops._read_data]
            write_record = [item[1] for item in iops._write_data]
            swallow_spit_record = [item[1] for item in iops._swallow_spit_data]
            swallow_record = [item[1] for item in iops._swallow_data]
            spit_record = [item[1] for item in iops._spit_data]

            export_file = os.path.join(self.abs_path, '%s.csv' % mode)
            LOG.info('%s' % mode)
            csv_file = open(export_file, 'w+')
            try:
                writer = csv.writer(csv_file)
                writer.writerow(('时间', 'iops', 'read', 'write', '吞吐量', 'in', 'out'))
                record_num = min(len(iops_record), len(read_record), len(write_record),
                                 len(swallow_spit_record), len(swallow_record), len(spit_record))
                for i in range(record_num):
                    writer.writerow((time_record[i], iops_record[i], read_record[i], write_record[i],
                                     swallow_spit_record[i], swallow_record[i], spit_record[i]))
            finally:
                csv_file.close()

            try:
                f = open(export_file, "rb")
                web.header('Content-Type', 'application/octet-stream')
                web.header('Content-disposition', 'attachment; filename=%s.csv' % mode)
                while True:
                    c = f.read()
                    if c:
                        yield c
                    else:
                        break
            except Exception, e:
                LOG.info('%s' % (e))
                yield 'Error'
            finally:
                if f:
                    f.close()

            os.system('rm -rf %s' % export_file)


class MemorySummary():


    def GET(self):
        cluster = db_api.cluster_get(1)
        hosts = db_api.host_get_all()
        sorted_cpu_hosts = sorted(hosts, key=lambda x: float(x.usage_cpu), reverse=True)
        cpu_top3_hosts = sorted_cpu_hosts[:3]
        cluster.cpu_top3_hosts = cpu_top3_hosts

        return render.memory_summary(cluster=cluster)

    def POST(self):
        cluster = db_api.cluster_get(1)
        cluster = self.set_memory(cluster)
        return cluster.usage_memory_show


class CpuSummary():

    def GET(self):
        cluster = db_api.cluster_get(1)
        return render.cpu_summary(cluster=cluster)


class StorageSummary():

    def GET(self):
        cluster = db_api.cluster_get(1)
        cluster.disk_used_per = cluster.disk_percent
        return render.storage_summary(cluster=cluster)


class VolumeSummary(IOBase):

    def GET(self):
        cluster = db_api.cluster_get(1)

        cluster.volume_total_ = cluster.volume_total
        cluster.volume_used_per = "%.2f" % 0
        if cluster.volume_total and int(cluster.volume_total) != 0: 
            cluster.volume_used_per = "%.2f" % conv_float(cluster.volume_used / 2 / cluster.volume_total * 100.0)
        return render.volume_summary(cluster=cluster)


class RebuildSummary():
    
    def GET(self):
        cluster = db_api.cluster_get(1)
        if cluster.mem_total:
            cluster.memory_total_show = int(cluster.mem_total)

        health_list = db_api.health_get_with_cluster(cluster.id)
        values = {'chunk_recovery_success':0, 'chunk_recovery_total':0, 'last_scan':'0'}
        health = health_list[0] if health_list else db_api.Struct(values)
        progress_per = utils.percent(health.chunk_recovery_success, health.chunk_recovery_total, 100)
        cluster.health = health

        if not cluster.health :
            cluster.health['progress_per'] = progress_per 
        else:
            cluster.health.progress_per = progress_per
        users = db_api.user_get_all()
        return render.rebuild_summary(cluster=cluster, users=users)
        
