from common.prometheus.prometheus import query_range_prometheus_resource
import time
from config.cluster_config_new import cluster_client

def get_job_gpu_info_from_prometheus(job_name, cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    # job_info_exp = 'container_GPUPerc{{container_label_PAI_JOB_NAME="{}"}}'.format(job_name)
    job_info_exp = 'task_gpu_percent{{job_name="{}"}}'.format(job_name)

    end_time = time.time()
    start_time = end_time - 5*60
    result = query_range_prometheus_resource(cluster_prometheus_server_uri, job_info_exp, start_time, end_time)
    gpu_info ={}

    if not result:
        return  gpu_info

    if 'data' not in result:
        return  gpu_info

    if 'result' not in result['data']:
        return  gpu_info

    for container_info in result['data']['result']:
        gpu_info.setdefault(container_info['metric']['instance'], [])
        gpu_info[container_info['metric']['instance']].append(container_info['metric']['minor_number'])

    return  gpu_info

def get_jobs_status(jobs_list):
    if len(jobs_list) < 100:
        return cluster_client.task_client.get_batch_jobs(jobs_list)

    import math
    length = int(math.ceil(len(jobs_list)/100))
    status_list = []
    i = 0
    for i in range(length-1):
        status_list += cluster_client.task_client.get_batch_jobs(jobs_list[i*100:(i+1)*100])

    status_list += cluster_client.task_client.get_batch_jobs(jobs_list[i * 100:])
    return status_list


def get_running_jobs_from_prometheus(cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    jobs_list = []
    # running_container_exp = "count(container_CPUPerc) by(container_label_PAI_JOB_NAME)"
    running_container_exp = "count(task_mem_limit_byte) by (job_name, instance)"
    end_time = time.time()
    start_time = end_time - 5*60
    running_containers = query_range_prometheus_resource(cluster_prometheus_server_uri,
                                                         running_container_exp,
                                                         start_time, end_time)
    for job in running_containers['data']['result']:
        # job_name = job['metric']['container_label_PAI_JOB_NAME']
        job_name = job['metric']['job_name']
        jobs_list.append(job_name)

    return jobs_list

def get_zombie_jobs_info(frameworkluanch_uri, cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    jobs_list = []
    # running_container_exp = "count(container_CPUPerc) by(container_label_PAI_JOB_NAME, instance)"
    running_container_exp = "count(task_mem_limit_byte) by (job_name, instance)"
    end_time = time.time()
    start_time = end_time - 5*60
    running_containers = query_range_prometheus_resource(cluster_prometheus_server_uri,
                                                         running_container_exp,
                                                         start_time, end_time)
    jobs_host_map = {}
    for job in running_containers['data']['result']:
        # job_name = job['metric']['container_label_PAI_JOB_NAME']
        job_name = job['metric']['job_name']
        host_id = job['metric']['instance']
        jobs_host_map.setdefault(job_name, [])
        jobs_host_map[job_name].append(host_id)
        jobs_list.append(job_name)

    print("jobs_list is {}".format(jobs_list))
    jobs_status = []
    if jobs_list:
        jobs_status = get_jobs_status(jobs_list)

    zombie_jobs = []
    for job in jobs_status:
        if job['executionType'] == 'STOP':
            gpu_info = get_job_gpu_info_from_prometheus(job.name(),
                                                        cluster_prometheus_server_uri)

            #cpu任务为僵尸容器
            for host_id in jobs_host_map[job["frameworkName"]]:
                if host_id not in gpu_info:
                    gpu_info[host_id] = []

            zombie_jobs.append(dict(name=job["frameworkName"],
                                    userName=job["userName"],
                                    gpu = gpu_info))


    return zombie_jobs

"""
{'10.11.2.32': 
    {'gpu': ['0'], 
     'jobs': [{'name': 'debug_yolact-2gm5wu', 'userName': '_bace8e5b43c9435289f43197ab88c799', 'gpu': ['0']
     }]
     }
}
"""
def get_zombie_info(frameworkluanch_uri, cluster_prometheus_server_uri = 'http://192.168.8.82:9091'):
    zombie_jobs_info = get_zombie_jobs_info(frameworkluanch_uri, cluster_prometheus_server_uri)

    zombies_info = {}
    for zombie_job in zombie_jobs_info:
        for host in zombie_job['gpu']:
            zombies_info.setdefault(host, {})
            zombies_info[host].setdefault('gpu', [])
            zombies_info[host].setdefault('jobs', [])
            zombies_info[host]['gpu'] += zombie_job['gpu'][host]
            zombies_info[host]['jobs'].append(dict(name=zombie_job['name'],
                                                   userName=zombie_job['userName'],
                                                   gpu=zombie_job['gpu'][host]))

    for host in zombies_info:
        zombies_info[host]['gpu'].sort()

    return zombies_info
