from common.prometheus.prometheus import *
from .model import PlatformUsers

HOUR_UNIT_TIME_MILL_SECOND = 60*60*1000
DAY_UNIT_TIME_MILL_SECOND = 24*HOUR_UNIT_TIME_MILL_SECOND
HOUR_UNIT_TIME_SECOND = 60*60
DAY_UNIT_TIME_SECOND = 24*HOUR_UNIT_TIME_SECOND
TOP_NUM = 10
MILL_SECOND = 1000

def sample_prometheus_series(prometheus_result, begin_day_time, interval):
    if prometheus_result["data"]["result"] and "values" in  prometheus_result["data"]["result"][0]:
        utilization_hour = prometheus_result["data"]["result"][0]["values"]
    else:
        utilization_hour = []
    print("[sample_prometheus_series] utilization_hour is {}".format(utilization_hour))

    utilization_list = []
    for index_day in range(interval):
        utilization_temp = []
        #获取一天内的使用率采样点
        for sample_hour in utilization_hour:
            if sample_hour[0] >= (begin_day_time + (index_day)*DAY_UNIT_TIME_SECOND) and \
                    sample_hour[0] <(begin_day_time + (index_day + 1)*DAY_UNIT_TIME_SECOND):
                utilization_temp.append(float(sample_hour[1]))

        sum_util_day = sum(utilization_temp)
        if len(utilization_temp) > 0:
            util_day = (sum_util_day/len(utilization_temp))*100
            util_day = float("{:.2f}".format(util_day))
        else:
            util_day = 0
        utilization_list.append([(begin_day_time + index_day*DAY_UNIT_TIME_SECOND)*1000, util_day])

    return utilization_list

#返回零值得时间序列[[timestamp, 0], [timestamp, 0]]
def make_zero_time_series(begin_day_time, interval):
    result_empty = []
    for index in range(interval):
        result_empty.append([(begin_day_time + index*DAY_UNIT_TIME_MILL_SECOND), 0])

    return result_empty

#返回时间间隔天对应的秒
def get_time_day_edge_second(interval):
    now_time = int(time.time())
    now_day_time = now_time - now_time % DAY_UNIT_TIME_SECOND
    begin_day_time = now_day_time - (interval - 1)*DAY_UNIT_TIME_SECOND
    return dict(start_day = begin_day_time, end_day=now_day_time, now_time = now_time)

#返回时间间隔天对应的秒
def get_time_day_edge_millsecond(interval):
    now_time = int(time.time())
    now_day_time = now_time - now_time % DAY_UNIT_TIME_SECOND
    begin_day_time = now_day_time - (interval - 1)*DAY_UNIT_TIME_SECOND
    return dict(start_day = begin_day_time*1000, end_day=now_day_time*1000, now_time = now_time*1000)

#resource_info:{"username": resource(数字类型)}
def top_number_users_resource(db_session, top_user_number, resource_info):
    top_users_map = {}
    top_users_list = []
    for index in range(top_user_number):
        max_value = 0
        user_id_select = ""
        for user_id in resource_info:
            if user_id not in top_users_map and resource_info[user_id] > max_value:
                user_id_select = user_id
                max_value = resource_info[user_id]

        if user_id_select:
            top_users_map[user_id_select] = max_value
            top_users_list.append((user_id_select, max_value))

    #user_id 转account
    users_info = db_session.query(PlatformUsers.id, PlatformUsers.account, PlatformUsers.email)\
        .filter(PlatformUsers.id.in_(list(top_users_map.keys()))).all()
    user_account_map = {}

    for user_info in users_info:
        user_id = user_info[0]
        account = user_info[1]
        email = user_info[2]
        user_account_map[user_id] = dict(account=account, email=email)

    #获取最终的输出 email jobs_number
    top_user_account_list = []
    for top_user in top_users_list:
        if top_user[0] not in user_account_map:
            continue

        if not user_account_map[top_user[0]]["account"]:
            out_user_count = dict(username = user_account_map[top_user[0]]["email"],
                                        value = top_user[1])
        else:
            out_user_count = dict(username = user_account_map[top_user[0]]["account"],
                            value = top_user[1])

        top_user_account_list.append(out_user_count)

    return top_user_account_list

def get_job_info_from_prometheus(job_name, cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    job_info_exp = 'container_GPUPerc{{container_label_PAI_JOB_NAME="{}"}}'.format(job_name)
    return  query_prometheus_resource(cluster_prometheus_server_uri, job_info_exp)

def get_node_info_from_prometheus(cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    #总内存
    node_memory_total = query_prometheus_resource(cluster_prometheus_server_uri, "node_memory_MemTotal")

    #已用内存
    node_memory_used_exp = "node_memory_MemTotal+-+node_memory_MemFree+-+node_memory_Buffers+-+node_memory_Cached"
    node_memory_used = query_prometheus_resource(cluster_prometheus_server_uri, node_memory_used_exp)

    #gpu内存使用率
    node_gpu_memory_used_exp = "avg+(nvidiasmi_utilization_memory)+by+(instance)"
    node_gpu_memory_used = query_prometheus_resource(cluster_prometheus_server_uri, node_gpu_memory_used_exp)

    #gpu使用率
    node_gpu_used_exp = "avg+(nvidiasmi_utilization_gpu)+by+(instance)"
    node_gpu_used = query_prometheus_resource(cluster_prometheus_server_uri, node_gpu_used_exp)

    #GPU总个数
    node_gpu_total_exp = "sum(nvidiasmi_attached_gpus)"
    node_gpu_total = query_prometheus_resource(cluster_prometheus_server_uri, node_gpu_total_exp)

    #正在被调用的GPU个数
    node_gpu_active_exp = "count(nvidiasmi_utilization_gpu > 0)"
    node_gpu_active = query_prometheus_resource(cluster_prometheus_server_uri, node_gpu_active_exp)

    #已经被分配的GPU个数
    node_gpu_allocated_exp = "count(container_GPUPerc)"
    node_gpu_allocated = query_prometheus_resource(cluster_prometheus_server_uri, node_gpu_allocated_exp)

    node_cpu_used_exp = "100%20-%20(avg%20by%20(instance)(irate(node_cpu%7Bmode%3D%22idle%22%7D%5B5m%5D))%20*%20100)"
    node_cpu_used = query_prometheus_resource(cluster_prometheus_server_uri, node_cpu_used_exp)

    node_io_write_exp = "sum+(rate(node_disk_bytes_written%5B5m%5D))+by+(instance)"
    node_io_write = query_prometheus_resource(cluster_prometheus_server_uri, node_io_write_exp)

    node_io_read_exp = "sum+(rate(node_disk_bytes_read%5B5m%5D))+by+(instance)"
    node_io_read = query_prometheus_resource(cluster_prometheus_server_uri, node_io_read_exp)

    node_network_receive_exp = "sum+(rate(node_network_receive_bytes%5B5m%5D))+by+(instance)"
    node_network_receive = query_prometheus_resource(cluster_prometheus_server_uri, node_network_receive_exp)

    node_disk_size_total_exp = '(max(node_filesystem_size{fstype=~"ext3|ext4|xfs"})' \
                               ' by(instance, device))/1024/1024/1024'
    node_disk_size_total = query_prometheus_resource(cluster_prometheus_server_uri, node_disk_size_total_exp)

    node_disk_size_free_exp = '(max(node_filesystem_free{fstype=~"ext3|ext4|xfs"})' \
                              ' by(instance, device))/1024/1024/1024'
    node_disk_size_free = query_prometheus_resource(cluster_prometheus_server_uri, node_disk_size_free_exp)

def get_current_alarms(cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    alert_exp = 'ALERTS{alertstate="firing"}'
    current_alert = query_prometheus_resource(cluster_prometheus_server_uri, alert_exp)
    print(current_alert)

if __name__ == '__main__':
    get_current_alarms('http://10.12.3.2:9091')
