# coding:utf-8
from flask import Blueprint, render_template, jsonify, abort, request
from flask_login import login_required, current_user
from user.role_check import get_all_cn
from config import mysql_database
from utilities.mysql_helper import getresult
from utilities.paramiko_helper import get_message_from_remote
from datetime import timedelta, datetime
from config import hue_oozie, ppd_data_sync, kettle_crontab_task_command, kettle_log_path, kettle_hdfs_log_path
import os
from collections import OrderedDict
import time, copy

option = {
    'grid': {
        'top': '10%',
        'left': '2%',
        'right': '3%',
        'bottom': '0%',
        'containLabel': True
    },
    'legend': {
        'x': 'center',
        'data': [],
    },
    'tooltip': {
        'trigger': 'axis',
    },
    'xAxis': {
        'type': 'category',
        'boundaryGap': False,
        'data': []
    },
    'yAxis': {
        'type': 'value',
        'name': 'Mb/s'
    },
    'series': [

    ]
}

monitor = Blueprint('monitor', __name__)


@monitor.route('/monitor_oozie')
@login_required
def monitor_oozie():
    if '4.1' in get_all_cn[current_user.get_cn()] or 'all' in get_all_cn()[current_user.get_cn()]:
        return render_template('data_monitor/monitor_oozie.html')
    else:
        abort(401)


@monitor.route('/monitor_get_latest_error')
@login_required
def monitor_oozie_get_latest_error():
    time_now = datetime.now()
    time_pre7 = time_now - timedelta(days=6)
    sql = 'select count(1) from COORD_ACTIONS where status not in ("SUCCEEDED","RUNNING") and last_modified_time>"%s" and last_modified_time<"%s" and job_id in (select id from COORD_JOBS where status="RUNNING")' % (
        str(time_pre7), str(time_now))
    result = getresult(sql, (), mysql_database['oozie'])
    count1 = 0
    for row in result:
        count1 = row[0]

    time_now = time_now.strftime('%Y%m%d')
    time_pre7 = time_pre7.strftime('%Y%m%d')
    status_files = os.listdir(ppd_data_sync)
    status_files.remove('log')
    count2 = 0
    for f in status_files:
        date = f[f.index('.') + 1:]
        if time_pre7 <= date <= time_now:
            if 'SUCCEED' not in f:
                fr = open(ppd_data_sync + '/' + f, 'r')
                for line in fr:
                    count2 += 1
    return jsonify(data={'count1': count1, 'count2': count2})


@monitor.route('/monitor_oozie_get_tasks')
@login_required
def monitor_oozie_get_tasks():
    time_now = datetime.now()
    time_pre7 = time_now - timedelta(days=6)
    sql = "select a.id,a.app_name,a.app_path,a.created_time,a.end_time,a.frequency,a.last_modified_time,a.next_matd_time,a.user_name,IFNULL(b.count,0) FROM COORD_JOBS as a" + " left JOIN (select job_id,count(1) as count from COORD_ACTIONS where status not in ('SUCCEEDED','RUNNING') and  last_modified_time>'%s' and last_modified_time<'%s' GROUP BY job_id ) as b" % (
        str(time_pre7), str(time_now)) + " on a.id=b.job_id where a.status='RUNNING'"
    result = getresult(sql, (), mysql_database['oozie'])
    tasks = []
    for row in result:
        tasks.append(
            (row[0], row[1], row[2], str(row[3]), str(row[4]), row[5], str(row[6]), str(row[7]), row[8], row[9]))
    return jsonify(data=tasks, oozie_url=hue_oozie)


@monitor.route('/monitor_kettle')
@login_required
def monitor_kettle():
    if '4.2' in get_all_cn()[current_user.get_cn()] or 'all' in get_all_cn()[current_user.get_cn()]:
        return render_template('data_monitor/monitor_kettle.html')
    else:
        abort(401)


@monitor.route('/monitor_ppdsync')
@login_required
def monitor_ppddata():
    if '4.3' in get_all_cn()[current_user.get_cn()] or 'all' in get_all_cn()[current_user.get_cn()]:
        return render_template('data_monitor/monitor_ppddata_sync.html')
    else:
        abort(401)


@monitor.route('/monitor_ppddatasync_get_data')
@login_required
def monitor_ppddata_get_data():
    time_now = datetime.now()
    time_pre7 = time_now - timedelta(days=6)
    time_now = time_now.strftime('%Y%m%d')
    time_pre7 = time_pre7.strftime('%Y%m%d')
    status_files = os.listdir(ppd_data_sync)
    status_files.remove('log')
    result = OrderedDict()
    for f in status_files:
        date = f[f.index('.') + 1:]
        if time_pre7 <= date <= time_now:
            result[date] = []
            if 'SUCCEED' not in f:
                fr = open(ppd_data_sync + '/' + f, 'r')
                for line in fr:
                    line = line.replace('__', '').strip()
                    result[date].append(line)
    return jsonify(data=result)


@monitor.route('/monitor_kettle_get_tasks')
@login_required
def monitor_kettle_get_tasks():
    time_now = datetime.now()
    time_pre6 = (time_now - timedelta(days=6)).strftime('%Y%m%d')
    time_pre5 = (time_now - timedelta(days=5)).strftime('%Y%m%d')
    time_pre4 = (time_now - timedelta(days=4)).strftime('%Y%m%d')
    time_pre3 = (time_now - timedelta(days=3)).strftime('%Y%m%d')
    time_pre2 = (time_now - timedelta(days=2)).strftime('%Y%m%d')
    time_pre1 = (time_now - timedelta(days=1)).strftime('%Y%m%d')
    time_now = time_now.strftime('%Y%m%d')
    paramiko_command = [kettle_crontab_task_command, 'ls ' + kettle_log_path + time_now,
                        'ls ' + kettle_log_path + time_pre6,
                        'ls ' + kettle_log_path + time_pre5, 'ls ' + kettle_log_path + time_pre4,
                        'ls ' + kettle_log_path + time_pre3, 'ls ' + kettle_log_path + time_pre2,
                        'ls ' + kettle_log_path + time_pre1, ]
    tasks_detail = get_message_from_remote(paramiko_command)

    result = {}
    hdfs_task_names_commands = []
    for line in tasks_detail[0]:
        if line.startswith('/usr/local/data-integration/kitchen.sh'):
            a = line.index('"PPC_DB_JOB')
            b = line.index(' ', a)
            task_name = line[a + 1:b - 1]
            hdfs_lower_task_name = "hadoop fs -ls " + kettle_hdfs_log_path + task_name.replace('PPC_DB_JOB_',
                                                                                               '').lower()
            hdfs_task_names_commands.append(hdfs_lower_task_name)
            result[task_name] = {time_now: [], time_pre6: [], time_pre5: [], time_pre4: [], time_pre3: [],
                                 time_pre2: [], time_pre1: [], }

    hdfs_tasks_detail = get_message_from_remote(hdfs_task_names_commands)
    for key in result.keys():
        if key + '.log' in tasks_detail[1]:
            result[key][time_now].append(key + '.log')
        else:
            result[key][time_now].append('')
        if key + '.log' in tasks_detail[2]:
            result[key][time_pre6].append(key + '.log')
        else:
            result[key][time_pre6].append('')
        if key + '.log' in tasks_detail[3]:
            result[key][time_pre5].append(key + '.log')
        else:
            result[key][time_pre5].append('')
        if key + '.log' in tasks_detail[4]:
            result[key][time_pre4].append(key + '.log')
        else:
            result[key][time_pre4].append('')
        if key + '.log' in tasks_detail[5]:
            result[key][time_pre3].append(key + '.log')
        else:
            result[key][time_pre3].append('')
        if key + '.log' in tasks_detail[6]:
            result[key][time_pre2].append(key + '.log')
        else:
            result[key][time_pre2].append('')
        if key + '.log' in tasks_detail[7]:
            result[key][time_pre1].append(key + '.log')
        else:
            result[key][time_pre1].append('')
    for details in hdfs_tasks_detail:
        for key in result.keys():
            for detail in details:
                if key.replace('PPC_DB_JOB_', '').lower() in detail.split('/'):
                    time_curr = detail.split('/')[-1]
                    if time_curr in result[key]:
                        result[key][time_curr].append(detail)
    return jsonify(data=result)


@monitor.route('/monitor_kettle_get_tasks_log')
@login_required
def monitor_kettle_get_tasks_log():
    file_path = request.args['path']
    result = get_message_from_remote('cat ' + kettle_log_path + file_path)
    return jsonify(data=result)


@monitor.route('/get_kettle_task_consist')
@login_required
def get_kettle_task_consist():
    start_date = request.args['start'] if 'start' in request.args else (datetime.now() - timedelta(days=1)).strftime(
        '%Y-%m-%d')
    end_date = request.args['end'] if 'end' in request.args else (datetime.now() - timedelta(days=1)).strftime(
        '%Y-%m-%d')
    sql_select = 'select b.date,b.table_name,a.hive_count,a.mysql_count,case 1 WHEN hive_count is null or mysql_count is null THEN "EXCEPTION" WHEN hive_count=mysql_count then "OK" when hive_count!=mysql_count then "ERROR" end from (select a.date,b.table_name from dim_date as a INNER JOIN  kettle_sync_log_table as b where a.date<="%s" and a.date>="%s" ORDER BY a.date desc) as b LEFT JOIN kettle_sync_log_table_count as a on a.count_source_date=b.date and a.table_name=b.table_name' % (
        end_date, start_date)
    result_counts = getresult(sql_select, (), mysql_database['portal'])
    result = {'columns': [{'title': 'Date'}, {'title': 'Table Name'}, {'title': 'Source Count'},
                          {'title': 'Hive Count'}, {'title': 'Status'}], 'dataset': result_counts}
    return jsonify(data=result)


@monitor.route('/monitor_eagle')
@login_required
def monitor_eagle():
    if '4.3' in get_all_cn()[current_user.get_cn()] or 'all' in get_all_cn()[current_user.get_cn()]:
        return render_template('data_monitor/monitor_eagle.html')
    else:
        abort(401)


@monitor.route('/monitor_cloudera')
@login_required
def monitor_cloudera():
    return render_template('data_monitor/monitor_cloudera.html')


@monitor.route('/get_monitor_eagle_type')
@login_required
def get_monitor_eagle_type():
    timestamp = int(time.time()) * 1000 - 24 * 60 * 60 * 1000
    sql = 'select a.application,IFNULL(b.count,0) from alertexecutor_alertexecutor as a LEFT JOIN (select application,count(1) as count from alertdetail_hadoop where timestamp>%d GROUP BY application) as b on a.application=b.application' % timestamp
    result = getresult(sql, (), mysql_database['eagle'])
    return jsonify(data=result)


@monitor.route('/get_monitor_eagle_detail')
@login_required
def get_monitor_eagle_detail():
    from utilities.time_format import timestamp_datetime
    import json
    task_type = request.args['type']
    timestamp = int(time.time()) * 1000 - 24 * 60 * 60 * 1000
    sql = 'select alertContext,TIMESTAMP from alertdetail_hadoop where application="%s" and timestamp>=%d ORDER by timestamp desc ' % (
        task_type, timestamp)
    result = getresult(sql, (), mysql_database['eagle'])
    dataset = []
    for row in result:
        jr = json.loads(row[0])
        dataset.append(
            (timestamp_datetime(row[1] / 1000.0), jr['user'], jr['policyId'], jr['alertMessage'], jr['alertDetailUrl']))
    columns = ['alertTimestamp', 'user', 'policyId', 'alertMessage', 'detail']
    data = {'columns': [{'title': item} for item in columns], 'dataset': dataset}
    return jsonify(data=data)


@monitor.route('/get_monitor_cloudera_data')
@login_required
def get_monitor_cloudera_data():
    from cm_api.api_client import ApiResource, timeseries
    time_zone = request.args['time_zone']
    cm_host = "192.168.213.23"
    api = ApiResource(cm_host, server_port=7180, username="admin", password="ppc@hadoop", version=12)
    cpu_option = copy.deepcopy(option)
    cpu_option['yAxis']['name'] = '%'
    diskio_option = copy.deepcopy(option)
    netio_option = copy.deepcopy(option)
    hdfsio_option = copy.deepcopy(option)
    memory_option = copy.deepcopy(option)
    memory_option['yAxis']['name'] = 'GB'
    dfs_option = copy.deepcopy(option)
    dfs_option['yAxis']['name'] = 'TB'
    if int(time_zone) >= 1440:
        time_format = '%m-%d %H:%M'
    else:
        time_format = '%H:%M'
    from_time = datetime.fromtimestamp(time.time() - int(time_zone) * 60)
    to_time = datetime.fromtimestamp(time.time())
    query = "select cpu_percent_across_hosts where category = CLUSTER;select stats(read_bytes_rate_across_disks, total), stats(write_bytes_rate_across_disks, total) where category = CLUSTER;select stats(bytes_receive_rate_across_network_interfaces, total), stats(bytes_transmit_rate_across_network_interfaces, total) where category = CLUSTER;select stats(bytes_read_rate_across_datanodes, total), stats(bytes_written_rate_across_datanodes, total) where category = SERVICE and serviceType = HDFS;select dfs_capacity, dfs_capacity_used, dfs_capacity_used_non_hdfs where entityName=hdfs:nameservice1;SELECT allocated_memory_mb_cumulative, available_memory_mb where category=YARN_POOL and serviceName=yarn and queueName=root"
    result = api.query_timeseries(query, from_time, to_time)
    hdfs_flag = 0
    for ts_list in result:
        for ts in ts_list.timeSeries:
            # print "--- %s: %s ---" % (ts.metadata.entityName, ts.metadata.metricName)
            if ts.metadata.metricName == 'cpu_percent_across_hosts':
                cpu_data = {
                    'name': u'整个集群主机CPU使用率',
                    'type': 'line',
                    'smooth': True,
                    'data': []
                }
                cpu_data_max = {
                    'name': u'使用率最高',
                    'type': 'line',
                    'smooth': True,
                    'data': []
                }
                cpu_data_min = {
                    'name': u'使用率最低',
                    'type': 'line',
                    'smooth': True,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    cpu_option['xAxis']['data'].append(time_point)
                    cpu_data['data'].append('%.2f' % point.value)
                    cpu_data_max['data'].append('%.2f' % point.aggregateStatistics.max)
                    cpu_data_min['data'].append('%.2f' % point.aggregateStatistics.min)
                cpu_option['series'].append(cpu_data)
                cpu_option['series'].append(cpu_data_max)
                cpu_option['series'].append(cpu_data_min)
                cpu_option['legend']['data'].append(cpu_data['name'])
                cpu_option['legend']['data'].append(cpu_data_max['name'])
                cpu_option['legend']['data'].append(cpu_data_min['name'])
            elif ts.metadata.metricName == 'stats(read_bytes_rate_across_disks, total)':
                disk_data = {
                    'name': u'集群磁盘读取',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    diskio_option['xAxis']['data'].append(time_point)
                    disk_data['data'].append(point.value / 1024.0 / 1024)
                diskio_option['series'].append(disk_data)
                diskio_option['legend']['data'].append(u'集群磁盘读取')
            elif ts.metadata.metricName == 'stats(write_bytes_rate_across_disks, total)':
                disk_data = {
                    'name': u'集群磁盘写入',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    disk_data['data'].append(point.value / 1024.0 / 1024)
                diskio_option['series'].append(disk_data)
                diskio_option['legend']['data'].append(u'集群磁盘写入')
            elif ts.metadata.metricName == 'stats(bytes_receive_rate_across_network_interfaces, total)':
                netio_data = {
                    'name': u'集群网络接口接收',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    netio_option['xAxis']['data'].append(time_point)
                    netio_data['data'].append(point.value / 1024.0 / 1024)
                netio_option['series'].append(netio_data)
                netio_option['legend']['data'].append(u'集群网络接口接收')
            elif ts.metadata.metricName == 'stats(bytes_transmit_rate_across_network_interfaces, total)':
                netio_data = {
                    'name': u'集群网络接口传送',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    netio_data['data'].append(point.value / 1024.0 / 1024)
                netio_option['series'].append(netio_data)
                netio_option['legend']['data'].append(u'集群网络接口传送')
            elif ts.metadata.metricName == 'stats(bytes_read_rate_across_datanodes, total)':
                hdfs_data = {
                    'name': '',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                if ts.metadata.entityName == 'HDFS nameservice1':
                    hdfs_data['name'] = u'HDFS nameservice1 读取'
                    for point in ts.data:
                        if len(hdfsio_option['xAxis']['data']) != 30:
                            time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                            hdfsio_option['xAxis']['data'].append(time_point)
                        hdfs_data['data'].append(point.value / 1024.0 / 1024)
                    hdfsio_option['series'].append(hdfs_data)
                    hdfsio_option['legend']['data'].append(u'HDFS nameservice1 读取')
                else:
                    if hdfs_flag == 0:
                        hdfs_data['name'] = u'HDFS 读取'
                        hdfs_flag += 1
                        for point in ts.data:
                            if len(hdfsio_option['xAxis']['data']) != 30:
                                time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                                hdfsio_option['xAxis']['data'].append(time_point)
                            hdfs_data['data'].append(point.value / 1024.0 / 1024)
                        hdfsio_option['series'].append(hdfs_data)
                        hdfsio_option['legend']['data'].append(u'HDFS 读取')
            elif ts.metadata.metricName == 'stats(bytes_written_rate_across_datanodes, total)':
                hdfs_data = {
                    'name': '',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                if ts.metadata.entityName == 'HDFS nameservice1':
                    hdfs_data['name'] = u'HDFS nameservice1 写入'
                    for point in ts.data:
                        hdfs_data['data'].append(point.value / 1024.0 / 1024)
                    hdfsio_option['series'].append(hdfs_data)
                    hdfsio_option['legend']['data'].append(u'HDFS nameservice1 写入')
                else:
                    if hdfs_flag == 1:
                        hdfs_data['name'] = u'HDFS 写入'
                        hdfs_flag += 1
                        for point in ts.data:
                            hdfs_data['data'].append(point.value / 1024.0 / 1024)
                        hdfsio_option['series'].append(hdfs_data)
                        hdfsio_option['legend']['data'].append(u'HDFS 写入')
            elif ts.metadata.metricName == 'allocated_memory_mb_cumulative':
                memory_data = {
                    'name': u'已分配的内存',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    memory_option['xAxis']['data'].append(time_point)
                    memory_data['data'].append(point.value / 1024)
                memory_option['series'].append(memory_data)
                memory_option['legend']['data'].append(u'已分配的内存')
            elif ts.metadata.metricName == 'available_memory_mb':
                memory_data = {
                    'name': u'可用内存',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    memory_option['xAxis']['data'].append(time_point)
                    memory_data['data'].append(point.value / 1024)
                memory_option['series'].append(memory_data)
                memory_option['legend']['data'].append(u'可用内存')
            elif ts.metadata.metricName == 'dfs_capacity':
                hdfs_data = {
                    'name': u'配置的容量',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    dfs_option['xAxis']['data'].append(time_point)
                    hdfs_data['data'].append(point.value / 1024 / 1024 / 1024 / 1024)
                dfs_option['series'].append(hdfs_data)
                dfs_option['legend']['data'].append(u'配置的容量')
            elif ts.metadata.metricName == 'dfs_capacity_used':
                hdfs_data = {
                    'name': u'使用的HDFS',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    dfs_option['xAxis']['data'].append(time_point)
                    hdfs_data['data'].append(point.value / 1024 / 1024 / 1024 / 1024)
                dfs_option['series'].append(hdfs_data)
                dfs_option['legend']['data'].append(u'使用的HDFS')
            elif ts.metadata.metricName == 'dfs_capacity_used_non_hdfs':
                hdfs_data = {
                    'name': u'使用的非HDFS',
                    'type': 'line',
                    'smooth': False,
                    'data': []
                }
                for point in ts.data:
                    time_point = (point.timestamp + timedelta(hours=8)).strftime(time_format)
                    dfs_option['xAxis']['data'].append(time_point)
                    hdfs_data['data'].append(point.value / 1024 / 1024 / 1024 / 1024)
                dfs_option['series'].append(hdfs_data)
                dfs_option['legend']['data'].append(u'使用的非HDFS')
    # print memory_option
    # print dfs_option
    if len(hdfsio_option['series']) != 4:
        hdfs_data = {
            'name': u'HDFS nameservice1 读取',
            'type': 'line',
            'smooth': False,
            'data': [0 for i in range(30)]
        }
        hdfs_data1 = {
            'name': u'HDFS nameservice1 写入',
            'type': 'line',
            'smooth': False,
            'data': [0 for i in range(30)]
        }
        hdfsio_option['series'].append(hdfs_data)
        hdfsio_option['series'].append(hdfs_data1)
        hdfsio_option['legend']['data'].append(u'HDFS nameservice1 读取')
        hdfsio_option['legend']['data'].append(u'HDFS nameservice1 写入')
    return jsonify(cpu_option=cpu_option, diskio_option=diskio_option, netio_option=netio_option,
                   hdfsio_option=hdfsio_option, memory_option=memory_option, dfs_option=dfs_option)


@monitor.route('/monitor_cloudera_services')
@login_required
def monitor_cloudera_services():
    return render_template('data_monitor/monitor_cloudera_services.html')


@monitor.route('/monitor_cloudera_services_data')
@login_required
def monitor_cloudera_services_data():
    from cm_api.api_client import ApiResource

    cm_host = "172.19.100.12"
    # api = ApiResource(cm_host, server_port=7180, username="admin", password="ppc@hadoop", version=5)
    api = ApiResource(cm_host, server_port=7180, username="user", password="user", version=12)

    hbase_ = {'MASTER': {}, 'REGIONSERVER': {}}
    yarn_ = {'NODEMANAGER': {}, 'RESOURCEMANAGER': {}}
    hdfs_ = {'NAMENODE': {}, 'DATANODE': {}}
    hive_ = {'HIVEMETASTORE': {}, 'HIVESERVER2': {}}
    kafka_ = {'KAFKA_BROKER': {}}
    zk_ = {'SERVER': {}}
    oozie_ = {'OOZIE_SERVER': {}}
    for c in api.get_all_clusters():
        if c.version == "CDH5":
            for s in c.get_all_services():
                if s.type == "YARN":
                    for r in s.get_all_roles():
                        if r.type in yarn_.keys():
                            if r.entityStatus in yarn_[r.type].keys():
                                yarn_[r.type][r.entityStatus] += 1
                            else:
                                yarn_[r.type][r.entityStatus] = 1
                elif s.type == 'HBASE':
                    for r in s.get_all_roles():
                        if r.type in hbase_.keys():
                            if r.entityStatus in hbase_[r.type].keys():
                                hbase_[r.type][r.entityStatus] += 1
                            else:
                                hbase_[r.type][r.entityStatus] = 1
                elif s.type == 'HDFS':
                    for r in s.get_all_roles():
                        if r.type in hdfs_.keys():
                            if r.entityStatus in hdfs_[r.type].keys():
                                hdfs_[r.type][r.entityStatus] += 1
                            else:
                                hdfs_[r.type][r.entityStatus] = 1
                elif s.type == 'HIVE':
                    for r in s.get_all_roles():
                        if r.type in hive_.keys():
                            if r.entityStatus in hive_[r.type].keys():
                                hive_[r.type][r.entityStatus] += 1
                            else:
                                hive_[r.type][r.entityStatus] = 1
                elif s.type == 'KAFKA':
                    for r in s.get_all_roles():
                        if r.type in kafka_.keys():
                            if r.entityStatus in kafka_[r.type].keys():
                                kafka_[r.type][r.entityStatus] += 1
                            else:
                                kafka_[r.type][r.entityStatus] = 1
                elif s.type == 'ZOOKEEPER':
                    for r in s.get_all_roles():
                        if r.type in zk_.keys():
                            if r.entityStatus in zk_[r.type].keys():
                                zk_[r.type][r.entityStatus] += 1
                            else:
                                zk_[r.type][r.entityStatus] = 1
                elif s.type == 'OOZIE':
                    for r in s.get_all_roles():
                        if r.type in oozie_.keys():
                            if r.entityStatus in oozie_[r.type].keys():
                                oozie_[r.type][r.entityStatus] += 1
                            else:
                                oozie_[r.type][r.entityStatus] = 1
    return jsonify(hive_=hive_, hdfs_=hdfs_, hbase_=hbase_, zk_=zk_, kafka_=kafka_, yarn_=yarn_, oozie_=oozie_)


@monitor.route('/monitor_streaming')
@login_required
def monitor_streaming():
    return render_template('data_monitor/monitor_streaming.html')


@monitor.route('/monitor_streaming_result')
@login_required
def monitor_streaming_result():
    from cm_api.api_client import ApiResource

    cm_host = "172.19.100.12"
    # api = ApiResource(cm_host, server_port=7180, username="admin", password="ppc@hadoop", version=5)
    api = ApiResource(cm_host, server_port=7180, username="user", password="user", version=12)
    cdh5 = api.get_cluster('Cluster 1')
    yarn = cdh5.get_service('yarn')
    list_ = yarn.get_yarn_applications(start_time=(datetime.now() - timedelta(minutes=1)), end_time=datetime.now(),
                                       filter_str='pool=streaming')
    application_list = {}
    for item in list_.applications:
        name = item.name
        starttime = item.startTime.strftime('%Y-%m-%d %H:%M')
        applicationid = item.applicationId
        state = item.state
        if name not in application_list.keys():
            application_list[name] = [starttime, applicationid, state]
    return jsonify(list=application_list)
