#!/bin/env python
# -*- coding: utf8 -*-
import subprocess
import sys

modules = {
    "sys": None,
    "os": None,
    "pymongo": "pymongo==3.7.1",
    "json": None,
    "subprocess": None,
    "math": None,
    "pytz": "pytz==2024.1",
    "dateutil": "python-dateutil==2.9.0.post0",
    "collections": None,
    "requests": "requests==2.27.1",
    "re": None,
    "datetime": None,
    "time": None,
    "numpy": "numpy==1.16.6",
    "pandas": "pandas==0.24.2",
    "scipy": "scipy==1.2.3",
    "sklearn": "scikit-learn==0.20.3",
    "prometheus-client": "prometheus-client==0.12.0"
}

def install(package):
    subprocess.check_call([sys.executable, "-m", "pip", "install", package])

for module, version in modules.items():
    try:
        __import__(module)
    except ImportError:
        if version:
            print "Installing {}".format(version)
            install(version)
        else:
            print "Module {} does not require installation or is part of Python's standard library.".format(module)


import os
from pymongo import MongoClient
import json
import math
import pytz
from dateutil.relativedelta import relativedelta
from collections import defaultdict
import requests
import re
import time
from datetime import datetime,timedelta
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from prometheus_client import start_http_server, Gauge

root_capacity_gauge = Gauge('root_capacity_tb', 'root capacity in TB', ['root_type','cluster_identity'])
root_utilization_gauge = Gauge('current_root_utilization', 'Current root utilization', ['root_type','cluster_identity'])
root_capacity_remain_gauge = Gauge('root_capacity_remain_tb','root remain capacity in TB',['root_type','cluster_identity'])
root_ratio_gauge = Gauge('root_ratio','the sum of all pool ratio for each root',['root_type','cluster_identity'])
root_utilization_trend_gauge = Gauge('root_utilization_trend_tb','root utilization trend in TB',['root_type','cluster_identity','future_time'])
root_available_time_week_gauge = Gauge('root_available_time_recentweek_rate','based on recent week rate,root writable time', ['root_type','cluster_identity'])
root_available_time_month_gauge = Gauge('root_available_time_recentmonth_rate','based on recent month rate,root writable time', ['root_type','cluster_identity'])
root_need_capacity_week_gauge = Gauge('root_need_capacity_next_week_tb','the capacity of root need(TB) in next week',['root_type','cluster_identity'])
root_need_capacity_month_gauge = Gauge('root_need_capacity_next_month_tb','the capacity of root need(TB) in next month',['root_type','cluster_identity'])

def get_divmod_nums(a, b):
    a = float(a)
    b = float(b)
    c = a / b
    d = round(c, 2)
    return d

def get_root_structure(root):
    popen = subprocess.Popen("ceph osd tree | grep rack",stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    tmp_res = popen.stdout.read()
    if len(tmp_res) != 0:
       return 'rack'
    else:
       return 'host'
 
def get_rule_capacity():

    popen = subprocess.Popen("ceph osd df tree -f json-pretty",stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    tmp_res = popen.stdout.read()
    result_data = tmp_res.replace('nan', '0').replace('-nan','0')
    result_data_json = json.loads(result_data)
    result_data_nodes = result_data_json['nodes'] #列表，每个元素都是一个字典
    tmp_root_high_info = []
    tmp_dic = {}  
    root_capacity={}
    root_used={}
    current_root_utilization = {}
    #for i in result_data_nodes: # i is a dict
    #    if i['type'] == 'root':
    #        #if i['kb'] != 0 and i['pgs'] != 0:
    #        if i['kb'] != 0:
    #            i.update({'use_percent': get_divmod_nums(i.get('kb_used'), i.get('kb'))})
    #        else:
    #            i.update({'use_percent':0})
    #        tmp_root_high_info.append(i) # tmp_root_high_info is a list, each element is a dict ,each dict is a root
    #        root_capacity.update({i['name']:i['kb']})

    for i in result_data_nodes: 
        if i['type'] == 'root':
           #if i['kb'] != 0:
              if get_root_structure(i['name']) == 'rack':
                 # rack
                 popen = subprocess.Popen("tp=\"root\"; name=%s; ceph osd df tree -f json | jq -r --arg tp \"$tp\" --arg name \"$name\" '.nodes as $nodes | $nodes[] | select(.type == $tp and .name == $name) | .children[] as $rack_id | $nodes[] | select(.id == $rack_id) | .children[] as $host_id | $nodes[] | select(.id == $host_id) | .children[] as $osd_id | $nodes[] | select(.id == $osd_id) | .kb_used' | jq -s add" %(i['name']),stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
                 used = popen.stdout.read().strip() 
                 if used is not None and used.isdigit():
                    #print '{}故障域为rack，root used:{}'.format(i['name'],int(used))
                    root_used.update({i['name']:round(int(used)/1024/1024/1024,2)})
                 else:
                    root_used.update({i['name']:0})
                 popen = subprocess.Popen("tp=\"root\"; name=%s; ceph osd df tree -f json | jq -r --arg tp \"$tp\" --arg name \"$name\" '.nodes as $nodes | $nodes[] | select(.type == $tp and .name == $name) | .children[] as $rack_id | $nodes[] | select(.id == $rack_id) | .children[] as $host_id | $nodes[] | select(.id == $host_id) | .children[] as $osd_id | $nodes[] | select(.id == $osd_id) | .kb' | jq -s add" %(i['name']),stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
                 capacity = popen.stdout.read().strip()
                 if capacity is not None and capacity.isdigit():
                    #print '{}故障域为rack，root capacity:{}'.format(i['name'],int(capacity)) 
                    root_capacity.update({i['name']:round(int(capacity)/1024/1024/1024,2)})
                 else:
                    root_capacity.update({i['name']:0})
              else: # host
                 popen = subprocess.Popen("tp=\"root\"; name=%s; ceph osd df tree -f json | jq -r --arg tp \"$tp\" --arg name \"$name\" '.nodes as $nodes | $nodes[] | select(.type == $tp and .name == $name) | .children[] as $host_id | $nodes[] | select(.id == $host_id) | .kb_used' | jq -s add" %(i['name']),stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
                 used = popen.stdout.read().strip()
                 if used is not None and used.isdigit():
                    #print '{}故障域为host，root used:{}'.format(i['name'],int(used))
                    root_used.update({i['name']:round(int(used)/1024/1024/1024,2)})
                 else:
                    root_used.update({i['name']:0})
                 popen = subprocess.Popen("tp=\"root\"; name=%s; ceph osd df tree -f json | jq -r --arg tp \"$tp\" --arg name \"$name\" '.nodes as $nodes | $nodes[] | select(.type == $tp and .name == $name) | .children[] as $host_id | $nodes[] | select(.id == $host_id) | .kb' | jq -s add" %(i['name']),stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True )
                 capacity = popen.stdout.read().strip()
                 if capacity is not None and capacity.isdigit():
                    #print '{}故障域为host，root capacity:{}'.format(i['name'],int(capacity))
                    root_capacity.update({i['name']:round(int(capacity)/1024/1024/1024,2)}) 
                 else:
                    root_capacity.update({i['name']:0})
    for key,value in root_capacity.items():
        if value == 0: #删除容量为0的root
           del root_capacity[key]

    for key,value in root_capacity.items():
        current_root_utilization[key]=round(root_used[key]/root_capacity[key],2)
    print 'current_root_utilization:{}'.format(current_root_utilization)
    print 'root_capacity:{}'.format(root_capacity)

    return current_root_utilization,root_capacity
   
def get_rule_info():
    popen = subprocess.Popen("ceph osd crush rule dump -f json" ,stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    tmp_res = popen.stdout.read() #tem_res is a string
    result_data = json.loads(tmp_res) # 列表，每个元素都是一个字典
    rule_id = {} #key---sata_ruleset  value----id
    rule_info = {} #key---sata_ruleset value----root-sata
     
    for item in result_data:
        rule_id[item['rule_id']]=item['rule_name']
        rule_info[item['rule_name']]=item['steps'][0]['item_name']

    suffixes = ['~ssd', '~hdd'] #需要筛选的结尾
    for key, value in rule_info.items():
        for suffix in suffixes:
            if value.endswith(suffix):
                # 更新值为 '~' 前面的部分
                rule_info[key] = value.split('~')[0]
 
    return rule_id,rule_info

def get_pool_info():
    popen = subprocess.Popen("ceph osd pool ls detail -f json",stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    tmp_res = popen.stdout.read() #tem_res is a string
    result_data = json.loads(tmp_res)
    pool_rule = {} #key----pool_name  value----rule id
    for item in result_data: 
        if 'crush_ruleset' in item.keys(): 
            pool_rule[item['pool_name']]=item['crush_ruleset']
        else: 
            pool_rule[item['pool_name']]=item['crush_rule']
    return pool_rule

def compute_ratio(pool_name):

    popen = subprocess.Popen('rbd ls %s | xargs -P 8 -i rbd info %s/{}| grep objects$| awk \"{sum+=\$(NF-1)}END{print sum*4/1024/1024}\" ' % (pool_name,pool_name),stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    tmp_ratio = popen.stdout.read().strip()

    return float(tmp_ratio) * 3

def get_rule_ratio(root_capacity):
    rule_ratio={}
    tmp_rule_ratio=defaultdict(int)# 
    rule_id,rule_info=get_rule_info() # 0: sata_ruleset
    pool_rule=get_pool_info() #pool_rule: key---pool_name value---rule_id
    #print '----pool_rule:{}'.format(pool_rule)
   
    for key,value in pool_rule.items():
           tem_ratio=round(compute_ratio(key),2)
           tmp_rule_ratio[value]+=tem_ratio
    for key,value in tmp_rule_ratio.items():
           ruleset_name=rule_id[key]
           root_name=rule_info[ruleset_name]
           rule_ratio[root_name]=value 

    #for key,value in rule_ratio.items():
    #       rule_ratio[key]=round(rule_ratio[key]/root_capacity[key],2)
    return rule_ratio

def get_cluster_capacity():

    popen = subprocess.Popen("hostname",stdout=subprocess.PIPE,stderr=subprocess.PIPE, bufsize=1, shell=True)
    hostname = popen.stdout.read()
    cluster_name=extract_info(hostname) #rg26_hfb3
 
    current_root_utilization,root_capacity = get_rule_capacity()
    #print 'current_root_utilization:{},root_capacity:{}'.format(current_root_utilization,root_capacity)

    rule_ratio=get_rule_ratio(root_capacity)
    #print 'rule_ratio:{}'.format(rule_ratio)
    for key,value in rule_ratio.items():
        if value == 0.0:
           print '对象存储集群'
           break
        else:
           print '块存储集群'
           break

    capacity_remain_ca={}
    capacity_remain_rat={}
    capacity_remain={}
    for key,value in root_capacity.items():
        if value != 0: 
           tem_result_ca=round(value*(0.7-current_root_utilization[key])/3,2)
           capacity_remain_ca[key]=tem_result_ca

           tem_result_rat=round((value-rule_ratio[key])/3,2)
           capacity_remain_rat[key]=tem_result_rat

    for key,value in capacity_remain_ca.items():
        """root_capacity记录的是root三副本数据量，capacity_remain记录的是单副本可用量"""
        if min(value,capacity_remain_rat[key]) < 0:
           #print 'root:{},单副本总容量为:{}T,可用空间为:{}T'.format(key,round(root_capacity[key]/3,2),0)
           capacity_remain[key]=0
        else:
           #print 'root:{},单副本总容量为:{}T,可用空间为:{}T'.format(key,round(root_capacity[key]/3,2),min(value,capacity_remain_rat[key]))
           capacity_remain[key]=min(value,capacity_remain_rat[key])
    
    return current_root_utilization,root_capacity,capacity_remain
 
def extract_info(domain):
    """根据主机名，提取出集群名称(rg26_hfb3)"""
    match = re.match(r"(rg\d+)-.*?\..*?\.([^.]+)\.iflytek\.net", domain) 
    if match:
        region = match.group(1)
        site = match.group(2)
        result = "{}_{}".format(region,site)
        return result
    return None

def get_date_timestamp():
    current_date = datetime.now()
    dates_and_timestamps = []
    dates_future = []
    for i in range(31):
        day = current_date - timedelta(days=i)
        date_str = day.strftime('%Y-%m-%d')
        timestamp = int(time.mktime(day.timetuple()))
        dates_and_timestamps.append((date_str, timestamp))
      
        day_fut = current_date + timedelta(days=i+1)
        day_fut_str = day_fut.strftime('%Y-%m-%d')
        timestamp_fut = int(time.mktime(day_fut.timetuple()))
        dates_future.append((day_fut_str,timestamp_fut))
    return dates_and_timestamps,dates_future

def get_root_utilization_trend():
    
    popen = subprocess.Popen("ceph -v" ,stdout=subprocess.PIPE,stderr=subprocess.PIPE, bufsize=1, shell=True)
    version = popen.stdout.read()
    print 'ceph集群版本:{}'.format(version)

    popen = subprocess.Popen("hostname",stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    hostname = popen.stdout.read()    
    cluster_name=extract_info(hostname) #rg26_hfb3
    job=cluster_name+'.ceph_exporter'
    filter_job=cluster_name+'-ceph' #'rg1_imkf-ceph'

    if 'gw' in hostname and cluster_name == 'rg1_bjxq':
        filter_job = 'rg1_bjxq-lb'
        job = 'rg1_bjxq_gw.ceph_exporter'
   
    if cluster_name == 'rg12_bjsh':
       filter_job = 'rg12_bjsh_ceph'

    if 'obj'in hostname and cluster_name == 'rg1_hfb3':
       filter_job = 'rg36_hfb3-ceph'
       job = 'rg36_hfb3.ceph_exporter'

    if cluster_name == 'rg2_hfb3':
       filter_job = 'rg54_hfb3-ceph'
       job = 'rg54_hfb3.ceph_exporter'

    if cluster_name == 'rg52_csyc4':
       filter_job = 'rg52_hfb3-ceph'
       job='rg52_hfb3.ceph_exporter'


    # 如下接口返回的是所有的集群信息，每个集群信息内包含了集群名称、集群endpoint、集群所属prometheus的IP和端口等信息
    # 因此可以通过该接口查看集群所属prometheus的IP和端口    
    mongo_url="http://xx.xx.xx.xx:xxxx/query?collection=cluster"
    response = requests.get(mongo_url)
    if response.status_code == 200:
       data = response.json()
    else:
       print "请求失败，状态码:{}".format(response.status_code)

    for item in data:
       if "include" in item:
           if filter_job in item["include"]:
              prom_ip_port = "{}:{}".format(item["endpoints"][0][0],item["endpoints"][0][1])
              #print 'prom_ip_port:{}'.format(prom_ip_port)

    dates_and_timestamps,dates_future=get_date_timestamp()

    history_utilization=defaultdict(int)
    root_history_utilization_latest=defaultdict(int)
    root_history_utilization_oldest=defaultdict(int)
    root_history_utilization_recent=defaultdict(int)
    pool_utilization_trend = {}
    root_utilization_trend = defaultdict(int)

    rule_id,rule_info=get_rule_info()
    #rule_id: key----rulesetid value---ruleset   rule_info: key---sata1_ruleset  value---root-sata1
    pool_rule=get_pool_info() #pool_rule: key---pool_name value---ruleset_id

    # print 'rule_id:{}'.format(rule_id)
    # print 'rule_info:{}'.format(rule_info)
    # print 'pool_rule:{}'.format(pool_rule)

    all_pool=[]
    for key,value in pool_rule.items():
    #    if str(key).endswith('pool') or str(key) == 'vms' or str(key) == 'images':
         all_pool.append(key)
    #print 'all_pool:{}'.format(all_pool) #获取所有的pool 

    sub_str="http://{}/api/v1/query?query=ceph_pool_used_bytes".format(prom_ip_port)
    for pool in all_pool:
        for item in dates_and_timestamps:
            url='{}{{job="{}",pool="{}"}}&time={}'.format(sub_str,job,pool,item[1])
            #e.g. url=http://xx.xx.xx.xx:9090/api/v1/query?query=ceph_pool_used_bytes{job="rg3_hfb3.ceph_exporter",pool="ssd_pool"xx.xx.xx.xx10842
            #print 'url:{}'.format(url)
            response = requests.get(url)
            if response.status_code == 200:
                data = response.json()
                """注意通过接口获取到的数据是单副本数据"""
                if len(data['data']['result']) != 0:
                    #if re.search(r'ceph version 14\..*nautilus',version) or re.search(r'ceph version 15\..*octopus',version):
                    #    #print 'ceph version is 14 or 15'
                    #    history_utilization[item[0]] += round(int(data['data']['result'][0]['value'][1])/1024/1024/1024/1024,2)
                    #else:
                    history_utilization[item[0]] += round(int(data['data']['result'][0]['value'][1])*3/1024/1024/1024/1024,2)

            else:
                print "请求失败，状态码:{}".format(response.status_code)
        #print 'pool:{}-----history_utilization:{}'.format(pool,history_utilization)

        ## 线性预测
        data = [(date, timestamp, history_utilization[date]) for date, timestamp in dates_and_timestamps if date in history_utilization]
        data = sorted(data, key=lambda x: x[1])  # 按照时间戳排序
        #print 'linear predict data:{}'.format(data)
        timestamps = np.array([item[1] for item in data]).reshape(-1, 1)
        write_volumes = np.array([item[2] for item in data])
        model = LinearRegression()
        model.fit(timestamps, write_volumes)
        last_timestamp = timestamps[-1, 0]

        future_timestamps_1_month = np.array([last_timestamp + i * 86400 for i in range(1, 31)]).reshape(-1, 1)
        forecast_1_month = model.predict(future_timestamps_1_month)
        pool_utilization_trend[pool] = forecast_1_month        

        ruleset_id=pool_rule[pool]  
        ruleset_name=rule_id[ruleset_id]
        root_name=rule_info[ruleset_name]
        root_history_utilization_latest[root_name] += history_utilization[dates_and_timestamps[0][0]]
        root_history_utilization_oldest[root_name] += history_utilization[dates_and_timestamps[-1][0]] 
        root_history_utilization_recent[root_name] += history_utilization[dates_and_timestamps[6][0]]

        history_utilization.clear()
    
    for key,value in pool_rule.items():#pool_rule: key---pool_name value---ruleset_id
        ruleset_name=rule_id[value]
        root_name=rule_info[ruleset_name] #rule_info: key---sata1_ruleset  value---root-sata1
        if root_name not in root_utilization_trend:
           root_utilization_trend[root_name]=pool_utilization_trend[key]
        else:
           root_utilization_trend[root_name] = [x + y for x, y in zip(root_utilization_trend[root_name],pool_utilization_trend[key])]        
    print 'root_history_utilization_latest:{}'.format(root_history_utilization_latest)
    print 'root_history_utilization_oldest:{}'.format(root_history_utilization_oldest)
    print 'root_history_utilization_recent:{}'.format(root_history_utilization_recent)
    print 'root_utilization_trend：{}'.format(root_utilization_trend)
    return root_utilization_trend,root_history_utilization_latest,root_history_utilization_oldest,root_history_utilization_recent

def get_available_time(current_root_utilization,root_capacity,capacity_remain,root_utilization_trend,root_history_utilization_latest,root_history_utilization_oldest,root_history_utilization_recent):
    #root_capacity记录的是root三副本总容量,capacity_remain是root的单副本可用容量

    popen = subprocess.Popen("hostname",stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)
    hostname = popen.stdout.read()
    cluster_name=extract_info(hostname) #rg26_hfb3
    print '集群标识:{}'.format(cluster_name)

    if 'gw' in hostname and cluster_name == 'rg1_bjxq':
        cluster_name = 'rg1_bjxq_gw'

    if cluster_name == 'rg52_csyc4':
       cluster_name = 'rg52_hfb3' 

    if 'obj'in hostname and cluster_name == 'rg1_hfb3':
       cluster_name = 'rg36_hfb3'

    cluster_flag=cluster_name+'-ceph'
    
    root_ratio=get_rule_ratio(root_capacity)
    for key, ratio in root_ratio.items():
        root_ratio_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(ratio/root_capacity[key],2))
    
    dates_and_timestamps,dates_future=get_date_timestamp()
    for key, trend in root_utilization_trend.items():
        for trend_item,time_item in zip(trend,dates_future):
            root_utilization_trend_gauge.labels(root_type=key,cluster_identity=cluster_name,future_time=time_item[0]).set(trend_item)
        usage_capacity = current_root_utilization[key] * root_capacity[key]
        #need_capacity_week = trend[6] - usage_capacity
        #need_capacity_month = trend[29] - usage_capacity
        need_capacity_week = root_history_utilization_latest[key] - root_history_utilization_recent[key]
        need_capacity_month = root_history_utilization_latest[key] - root_history_utilization_oldest[key]
        print 'root:{},usage_capacity:{}'.format(key,usage_capacity)
        print 'root:{},need_capacity_week:{}'.format(key,need_capacity_week)
        print 'root:{},need_capacity_month:{}'.format(key,need_capacity_month)
        root_need_capacity_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(need_capacity_week)
        root_need_capacity_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(need_capacity_month)
          
    for key, capacity in root_capacity.items():
        root_capacity_gauge.labels(root_type=key,cluster_identity=cluster_name).set(capacity)

    for key, utilization in current_root_utilization.items():
        root_utilization_gauge.labels(root_type=key,cluster_identity=cluster_name).set(utilization)

        if utilization < 0.7 and  key in  root_capacity:
           thread = (0.7 * root_capacity[key]) - (utilization * root_capacity[key])
           avg_month_rate=round((root_history_utilization_latest[key] - root_history_utilization_oldest[key])/30,2)
           if avg_month_rate > 0:
              root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(thread/avg_month_rate,2))
           else:
              root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(float('inf'))
           avg_week_rate=round((root_history_utilization_latest[key] - root_history_utilization_recent[key])/7,2)
           if avg_week_rate > 0:
              root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(thread/avg_week_rate,2))
           else:
              root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(float('inf'))
        else:
           root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(0)
           root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(0)
 
    for key, capacity in capacity_remain.items():
        root_capacity_remain_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(capacity*3,2))
        #if capacity == 0:
        #   root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(capacity)
        #   root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(capacity)
        #if capacity != 0:
        #   avg_month_rate=round((root_history_utilization_latest[key] - root_history_utilization_oldest[key])/30,2)
        #   if avg_month_rate > 0:
        #      root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(capacity*3/avg_month_rate,2))
        #   else:
        #      root_available_time_month_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(capacity*3/1,2))
        #      
        #   avg_week_rate=round((root_history_utilization_latest[key] - root_history_utilization_recent[key])/7,2)
        #   if avg_week_rate > 0:
        #      root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(capacity*3/avg_week_rate,2))
        #   else:
        #      root_available_time_week_gauge.labels(root_type=key,cluster_identity=cluster_name).set(round(capacity*3/1,2))
                  

if __name__ == "__main__":
    
    start_http_server(9911)
    print("Prometheus exporter running on port 9911")

    while True:
        date = datetime.now()
        print '执行时间：{}'.format(date)
        current_root_utilization,root_capacity,capacity_remain=get_cluster_capacity()
        root_utilization_trend,root_history_utilization_latest,root_history_utilization_oldest,root_history_utilization_recent=get_root_utilization_trend()
        get_available_time(current_root_utilization,root_capacity,capacity_remain,root_utilization_trend,root_history_utilization_latest,root_history_utilization_oldest,root_history_utilization_recent)    
        time.sleep(3600)


