import json

import requests
from flask import jsonify

from blues.mon import bp
from blues.cluster.cluster_base import clusterPromQuery

from public.time_utils import get_week, get_date_from_week_num, get_last_seven_days
from config import UA

from model.cluster import Cluster, ClusterRelatedInstance
from model.weekly_report import WeeklyReport, WeeklyReportService


@bp.route('/api/v1/mon/weekly', methods=['GET'])
def api_weekly_all_nodes():
    w = WeeklyReport()
    c = Cluster()
    try:
        list = w.find_all()
        cluster_d = c.get_cluster_dict()
        for i in list:
            i["cluster"] = cluster_d[i["cluster_id"]]
            i["date_range"] = get_date_from_week_num(i['year'], i['week'])
        return jsonify({"code": 200, "data": list})
    except Exception as e:
        print(e)
        return jsonify({"code": 500, "message": str(e)})


@bp.route('/api/v1/mon/weekly/service', methods=['GET'])
def api_weekly_all_service():
    w = WeeklyReportService()
    c = Cluster()
    try:
        list = w.find_all()
        cluster_d = c.get_cluster_dict()
        for i in list:
            i["cluster"] = cluster_d[i["cluster_id"]]
            i["date_range"] = get_date_from_week_num(i['year'], i['week'])
        return jsonify({"code": 200, "data": list})
    except Exception as e:
        print(e)
        return jsonify({"code": 500, "message": str(e)})


def generate_weekly_cluster():
    def getQueryData(cluster_id, query_str, point=0):
        d = clusterPromQuery(cluster_id, query_str)
        if d:
            print(d)
            d = d['data']['result'][0]['value'][1]
            print(round(float(d), point))
            return round(float(d), point)
        else:
            return 0

    # step1 找出所有带有prom字段的cluster
    c = Cluster()
    clusters_have_prom = c.find_all_have_prom()
    weekly = WeeklyReport()
    year, week = get_week()
    # step2 准备语句，调用prom接口
    query_str_cluster_cpu = "1-avg(rate(node_cpu_seconds_total{mode='idle'}[7d]))"
    query_str_cluster_mem = "avg((1 - avg_over_time(node_memory_MemAvailable_bytes[7d]) / avg_over_time(node_memory_MemTotal_bytes[7d])))"
    query_str_cluster_disk = "avg((avg_over_time(node_filesystem_size_bytes{fstype=~'ext.?|xfs'}[7d])-avg_over_time(node_filesystem_free_bytes{fstype=~'ext.?|xfs'}[7d]))/((avg_over_time(node_filesystem_size_bytes{fstype=~'ext.?|xfs'}[7d]))))"
    query_str_cluster_net_in = "sum(irate(container_network_receive_bytes_total{namespace=~'.+'}[7d]))/1024"
    query_str_cluster_net_out = "sum(irate(container_network_transmit_bytes_total{namespace=~'.+'}[7d]))/1024"

    message = ''
    for c in clusters_have_prom:
        cluster_id = c['id']
        print("--> cluster weekly:", cluster_id, c['name'])
        cluster_cpu = getQueryData(cluster_id, query_str_cluster_cpu, 3)
        cluster_mem = getQueryData(cluster_id, query_str_cluster_mem, 3)
        cluster_disk = getQueryData(cluster_id, query_str_cluster_disk, 3)
        cluster_net_in = getQueryData(cluster_id, query_str_cluster_net_in)
        cluster_net_out = getQueryData(cluster_id, query_str_cluster_net_out)
        if cluster_cpu + cluster_mem + cluster_disk + cluster_net_in + cluster_net_out > 0:
            # cluster数据插入数据库
            weekly.insert(cluster_id, year, week, 'cluster', '', cluster_cpu, cluster_mem, cluster_disk, cluster_net_in,
                          cluster_net_out)
            # 针对集群中每个node，分别判断其周监控数据
            print("--> node weekly")
            node_data, msg = generate_weekly_node(cluster_id)
            message += msg

            # 遍历总计到的数据data，插入数据库中
            # print(data)
            for k, v in node_data.items():
                print(k, v)
                try:
                    weekly.insert(cluster_id=cluster_id, year=year, week=week, scope='node', node=k,
                                  d_cpu=v['cpu'], d_mem=v['mem'], d_disk=v['disk'], d_net_in=v['net_in'],
                                  d_net_out=v['net_out'])
                except Exception as e:
                    print(e)
                    message += str(e)

        # 针对集群的每个web，分别判断其可用性
        print("--> web weekly")
        web_data, web_msg = generate_weekly_web(cluster_id)
        message += web_msg
        for k, v in web_data.items():
            print(k, v)
            try:
                weekly.insert_web(cluster_id=cluster_id, year=year, week=week, scope='web', web=k, d_aval=v)
            except Exception as e:
                print(e)
                message += str(e)
    if not message:
        message = "success"
    print(message)


def generate_weekly_node(cluster_id):
    # 调用k8s apiserver，获取node基本信息
    c = Cluster()
    cluster = c.find_by_id(cluster_id)
    url = cluster['apiserver'] + "/api/v1/nodes"
    headers = {"Authorization": "Bearer " + cluster['token'], "User-Agent": UA}

    message = ""  # 一旦有错误信息，就会添加到message中
    try:
        response = requests.get(url=url, headers=headers, verify=False)
        # print(response.text)
        # print(response.json())
        res = response.json()
    except Exception as e:
        print("获取node失败", e)
        message += str(e)
        return message

    # 生成数据结构，每个节点对应一个字典，初始化每个监控值为0
    data = {}
    for i in res['items']:
        name = i['metadata']['name']
        data[name] = {}
        data[name]['cpu'] = 0
        data[name]['mem'] = 0
        data[name]['disk'] = 0
        data[name]['net_in'] = 0
        data[name]['net_in'] = 0

    # 调用prom接口获取node监控信息
    url = cluster['prom'] + "/api/v1/query"
    try:
        params = {'query': "1-(avg(rate(node_cpu_seconds_total{mode='idle'}[7d])) by(instance) )"}
        response = requests.get(url=url, headers=headers, verify=False, params=params)
        res_cpu = response.json()
        # print(response.text)
        for i in res_cpu['data']['result']:
            value = round(float(i['value'][1]), 3)
            if i['metric']['instance'] in data:
                data[i['metric']['instance']]['cpu'] = value
    except Exception as e:
        print(e)
        message += str(e)

    try:
        params = {
            'query': "1-avg_over_time(node_memory_MemAvailable_bytes[7d])/avg_over_time(node_memory_MemTotal_bytes[7d])"}
        response = requests.get(url=url, headers=headers, verify=False, params=params)
        res_mem = response.json()
        # print(response.text)
        for i in res_mem['data']['result']:
            value = round(float(i['value'][1]), 3)
            if i['metric']['instance'] in data:
                data[i['metric']['instance']]['mem'] = value
    except Exception as e:
        print(e)
        message += str(e)

    try:
        params = {
            'query': "1-(avg_over_time(node_filesystem_free_bytes{fstype=~'ext.?|xfs'}[7d]))/avg_over_time(node_filesystem_size_bytes{fstype=~'ext.?|xfs'}[7d])"
        }
        response = requests.get(url=url, headers=headers, verify=False, params=params)
        res_disk = response.json()
        # print(response.text)
        for i in res_disk['data']['result']:
            value = round(float(i['value'][1]), 3)
            if i['metric']['instance'] in data:
                data[i['metric']['instance']]['disk'] = value
    except Exception as e:
        print(e)
        message += str(e)

    try:
        params = {'query': "rate(node_network_receive_bytes_total{device='eth0'}[2m])/1024"}
        response = requests.get(url=url, headers=headers, verify=False, params=params)
        res_net_in = response.json()
        # print(response.text)
        for i in res_net_in['data']['result']:
            value = round(float(i['value'][1]))
            if i['metric']['instance'] in data:
                data[i['metric']['instance']]['net_in'] = value
    except Exception as e:
        print(e)
        message += str(e)

    try:
        params = {'query': "rate(node_network_transmit_bytes_total{device='eth0'}[2m])/1024"}
        response = requests.get(url=url, headers=headers, verify=False, params=params)
        res_net_out = response.json()
        # print(response.text)
        for i in res_net_out['data']['result']:
            value = round(float(i['value'][1]))
            if i['metric']['instance'] in data:
                data[i['metric']['instance']]['net_out'] = value
    except Exception as e:
        print(e)
        message += str(e)
    return data, message


def generate_weekly_web(cluster_id):
    message = ''
    # 根据cluster_id获取关联的instance
    cri = ClusterRelatedInstance()
    ins_list = cri.find_by_cluster_id(cluster_id)
    print(ins_list)
    ins_dict = {}
    for i in ins_list:
        prom_url = i['prom_url']
        instance = i['instance']
        params = {
            'query': "avg_over_time(probe_success{instance='" + instance + "'}[7d])"
        }
        url = prom_url + "/api/v1/query"
        headers = {"User-Agent": UA}
        try:
            response = requests.get(url=url, headers=headers, verify=False, params=params)
            print(response.text)
            data = response.json()
            v = round(float(data['data']['result'][0]['value'][1]), 3)
            ins_dict[instance] = v
        except Exception as e:
            print(e)
            message += str(e)
    return ins_dict, message


def generate_weekly_service(cluster_id):
    # step1 获取所有service 列表
    c = Cluster()
    cluster = c.find_by_id(cluster_id)
    before, now = get_last_seven_days()

    w = WeeklyReportService()
    year, week = get_week()
    # 获取k
    url = cluster['skw'] + "/graphql"
    payload = json.dumps({
        "query": "query queryServices($duration: Duration!,$keyword: String!) {\n    services: getAllServices(duration: $duration, group: $keyword) {\n      key: id\n      label: name\n      group\n    }\n  }",
        "variables": {
            "duration": {
                "start": str(before),
                "end": str(now),
                "step": "DAY"
            },
            "keyword": ""
        }
    })
    headers = {
        "User-Agent": UA,
        "Content-Type": "application/json"
    }
    response = requests.request("POST", url, headers=headers, data=payload)
    print(response.text)
    data = response.json()
    # print(data)
    service_data = {}
    for i in data['data']['services']:
        service_name = i['label']
        service_data[service_name] = {"apdex": 0, "resp_time": 0, "sla": 0, "cpm": 0}

    # step2 针对每个service，分别查询每个的4项指标
    for service in service_data.keys():
        # 1 apex
        payload_apdex = json.dumps({
            "query": "query queryData($condition: MetricsCondition!, $duration: Duration!) {\n  readMetricsValue: readMetricsValue(condition: $condition, duration: $duration)}",
            "variables": {
                "duration": {
                    "start": str(before),
                    "end": str(now),
                    "step": "DAY"
                },
                "condition": {
                    "name": "service_apdex",
                    "entity": {
                        "scope": "Service",
                        "serviceName": service,
                        "normal": True
                    }
                }
            }
        })

        response = requests.request("POST", url, headers=headers, data=payload_apdex)
        # print(response.text)
        apdex = response.json()['data']['readMetricsValue']

        # 2 resp_time
        payload_resp_time = json.dumps({
            "query": "query queryData($condition: MetricsCondition!, $duration: Duration!) {\n  readMetricsValues: readMetricsValues(condition: $condition, duration: $duration) {\n    label\n    values {\n      values {value}\n    }\n  }}",
            "variables": {
                "duration": {
                    "start": str(before),
                    "end": str(now),
                    "step": "DAY"
                },
                "condition": {
                    "name": "service_resp_time",
                    "entity": {
                        "scope": "Service",
                        "serviceName": service,
                        "normal": True
                    }
                }
            }
        })

        response = requests.request("POST", url, headers=headers, data=payload_resp_time)
        data = response.json()
        count = 0
        value = 0
        for i in data['data']['readMetricsValues']['values']['values']:
            value += i['value']
            count += 1
        resp_time = int(value / count)

        # 3 sla
        payload_sla = json.dumps({
            "query": "query queryData($condition: MetricsCondition!, $duration: Duration!) {\n  readMetricsValue: readMetricsValue(condition: $condition, duration: $duration)}",
            "variables": {
                "duration": {
                    "start": str(before),
                    "end": str(now),
                    "step": "DAY"
                },
                "condition": {
                    "name": "service_sla",
                    "entity": {
                        "scope": "Service",
                        "serviceName": service,
                        "normal": True
                    }
                }
            }
        })

        response = requests.request("POST", url, headers=headers, data=payload_sla)
        # print(response.text)
        sla = response.json()['data']['readMetricsValue']

        # 4 cpm
        payload_cpm = json.dumps({
            "query": "query queryData($condition: MetricsCondition!, $duration: Duration!) {\n  readMetricsValue: readMetricsValue(condition: $condition, duration: $duration)}",
            "variables": {
                "duration": {
                    "start": str(before),
                    "end": str(now),
                    "step": "DAY"
                },
                "condition": {
                    "name": "service_cpm",
                    "entity": {
                        "scope": "Service",
                        "serviceName": service,
                        "normal": True
                    }
                }
            }
        })

        response = requests.request("POST", url, headers=headers, data=payload_cpm)
        # print(response.text)
        cpm = response.json()['data']['readMetricsValue']

        # 整合数据
        service_data[service]['apdex'] = apdex / 10000
        service_data[service]['resp_time'] = resp_time
        service_data[service]['sla'] = sla / 10000
        service_data[service]['cpm'] = cpm
        print(cluster_id, service, service_data[service])

    # 整合之后的数据存入数据库
    for k, v in service_data.items():
        print("insert to db ...")
        w.insert(cluster_id, year, week, k, v['apdex'], v['resp_time'], v['sla'], v['cpm'])

    return 'ok'


def generate_weekly_skw():
    # step1 找出所有带有prom字段的cluster
    print("BEGIN ---------------> generate_weekly_skw_service")
    c = Cluster()
    clusters_have_skw = c.find_all_have_skw()
    for cluster in clusters_have_skw:
        generate_weekly_service(cluster['id'])
    print('generate_weekly_skw_service --------------->  END')
