import sys
import subprocess
import math
import time
import statistics
from openpyxl import Workbook, load_workbook
import requests

#************************************************************************************ Prometheus 查询函数 **********************************************************************************************

def query_prometheus(query):
    """
    使用 Prometheus 查询数据并返回结果
    """
    url = "http://10.10.100.6:30090/api/v1/query"  # 替换为你的 Prometheus 地址
    params = {'query': query}

    try:
        response = requests.get(url, params=params)
        response.raise_for_status()  # 如果状态码不是 200 会抛出异常
        return response.json()  # 返回解析后的 JSON 数据
    except requests.exceptions.RequestException as e:
        print(f"Error querying Prometheus: {e}")
        return None


#************************************************************************************ Monitor Component **********************************************************************************************

def Monitor():
    microservice_name = "frontend"

    # 监控当前副本数和每个副本的 CPU 使用情况
    Available_Replicas = 4
    Operational_Replicas = 0
    query_interval = 2  # 查询间隔为 2 秒
    while Available_Replicas != Operational_Replicas:
        # 使用 kubectl 查询可用副本数
        print("Checking Available Replicas...")
        try:
            Available_Replicas = subprocess.check_output(
                "kubectl get deployment frontend -n boutique --output=jsonpath='{.status.availableReplicas}'".split()
            ).decode('utf-8').strip("'")
            Available_Replicas = int(Available_Replicas)
            print(f"Available Replicas: {Available_Replicas}")
        except subprocess.CalledProcessError as e:
            print(f"Error fetching available replicas: {e}")
            Available_Replicas = 0

        # 使用 Prometheus 查询 CPU 使用率
        print("Checking CPU usage per replica...")
        cpu_query = 'max by(pod) (rate(container_cpu_usage_seconds_total{namespace="boutique", pod=~"frontend-.*", container="frontend"}[1m]))'
        cpu_usage_result = query_prometheus(cpu_query)

        if cpu_usage_result and cpu_usage_result['data']['result']:
            print(f"CPU Usage Raw Result: {cpu_usage_result}")
            cpu_usage_result = cpu_usage_result['data']['result']
            Operational_Replicas = len(cpu_usage_result)  # 使用 CPU 数据确认正在运行的副本数
            print(f"Operational Replicas (based on CPU usage data): {Operational_Replicas}")
        else:
            print("Error: No CPU usage data found or result format unexpected")
            cpu_usage_result = []
            Operational_Replicas = Available_Replicas

        current_replicas = Available_Replicas
        print(f"Current Replicas: {current_replicas}")

        # 暂停以控制查询频率
        time.sleep(query_interval)

    # 计算平均 CPU 使用率
    cpu_add = []
    for result in cpu_usage_result:
        try:
            cpu_usage_value = float(result.get('value', [None, 0])[1])
            cpu_add.append(cpu_usage_value)
            print(f"Replica CPU usage: {cpu_usage_value}")
        except (TypeError, IndexError, ValueError):
            print(f"Error: Invalid CPU usage data format for result: {result}")

    if cpu_add:
        current_cpu = math.ceil(statistics.mean(cpu_add))
        print(f"Average CPU Usage (m): {current_cpu}")
    else:
        current_cpu = 0  # 如果没有 CPU 数据，则默认设置为 0
        print("No CPU usage data found, setting current CPU to 0.")

    # 获取期望副本数
    try:
        print("Fetching desired replica count...")
        Desired_Replicas = subprocess.check_output(
            "kubectl get deployment frontend -n boutique --request-timeout=60s -o=jsonpath='{.spec.replicas}'".split()
        ).decode('utf-8').strip("'")
        Desired_Replicas = int(Desired_Replicas)
        print(f"Desired Replicas: {Desired_Replicas}")
    except subprocess.CalledProcessError as e:
        print(f"Error fetching desired replicas: {e}")
        Desired_Replicas = 0

    # 获取每个副本的 CPU 请求值
    try:
        print("Fetching CPU request for each replica...")
        cpu_request = subprocess.check_output(
            "kubectl get deployment frontend -n boutique --request-timeout=60s -o=jsonpath='{.spec.template.spec.containers[0].resources.requests.cpu}'".split()
        ).decode('utf-8').strip("'")
        cpu_request = int(cpu_request[:-1])  # 去掉 'm'
        print(f"CPU Request per Replica: {cpu_request}m")
    except subprocess.CalledProcessError as e:
        print(f"Error fetching CPU request: {e}")
        cpu_request = 0

    # 设置 SLA 指标：CPU 阈值、最小副本数、最大副本数
    target_cpu = 50  # 实验场景中使用的值 50%
    max_replica = 5
    min_replica = 1

    print("Returning monitored data...\n")

    return microservice_name, Desired_Replicas, current_replicas, current_cpu, target_cpu, cpu_request, max_replica, min_replica


#***************************************************************************** Analyze Component *****************************************************************************************

def Analyse(Desired_Replicas, current_replicas, current_cpu, target_cpu, cpu_request, min_replica):

    if cpu_request > 0:
        cpu_percentage = (current_cpu / cpu_request) * 100  # 计算 CPU 使用率百分比
    else:
        cpu_percentage = 0

    previous_desired_replicas = Desired_Replicas  # 保存之前的副本数，避免重复执行相同的伸缩决策

    # 基于阈值的伸缩策略
    desired_replica = math.ceil(int(current_replicas) * (int(cpu_percentage) / int(target_cpu)))

    # Scale up
    if (previous_desired_replicas != desired_replica) and (desired_replica > current_replicas) and (desired_replica >= min_replica):
        scaling_action = "scale up"

    # Scale down
    elif (previous_desired_replicas != desired_replica) and (desired_replica < current_replicas) and (desired_replica >= min_replica):
        scaling_action = "scale down"

    # No Scaling
    else:
        scaling_action = "no scale"

    return cpu_percentage, scaling_action, desired_replica


#********************************************************************** Microservice_Main_Function ********************************************************************************

def frontend(Test_Time, folder_name):
    microservice_name, P_Desired_Replicas, current_replicas, current_cpu, target_cpu, cpu_request, max_replica, min_replica = Monitor()
    cpu_percentage, scaling_action, desired_replica = Analyse(P_Desired_Replicas, current_replicas, current_cpu, target_cpu, cpu_request, min_replica)

    frontend_data = [microservice_name, scaling_action, desired_replica, current_replicas, cpu_request, max_replica]

    #********************************************************************** Storing Data for each iteration in the Knowledge Base ********************************************************************************

    workbook = load_workbook(f'/home/micro/wmxstudy/Smart_HPA/Smart_HPA_Codebase/Knowledge_Base/{folder_name}/frontend.xlsx')
    sheet = workbook.active

    sheet.cell(row=1, column=1, value="Test Time (sec)")
    sheet.cell(row=1, column=2, value="CPU Usage Percentage")
    sheet.cell(row=1, column=3, value="Current Replicas")
    sheet.cell(row=1, column=4, value="Desired Replicas")
    sheet.cell(row=1, column=5, value="Max. Replicas")
    sheet.cell(row=1, column=6, value="Scaling Action")

    Test_Time_row_length = len(sheet['A']) + 1
    CPU_usage_row_length = len(sheet['B']) + 1
    current_replica_row_length = len(sheet['C']) + 1
    desired_replica_row_length = len(sheet['D']) + 1

    sheet[f'A{Test_Time_row_length}'] = Test_Time
    sheet[f'B{CPU_usage_row_length}'] = cpu_percentage
    sheet[f'C{current_replica_row_length}'] = current_replicas
    sheet[f'D{desired_replica_row_length}'] = desired_replica

    workbook.save(f'/home/micro/wmxstudy/Smart_HPA/Smart_HPA_Codebase/Knowledge_Base/{folder_name}/frontend.xlsx')

    return frontend_data


if __name__ == "__main__":
    Test_Time = 60  # 设置测试时间
    folder_name = "50"  # 设置文件夹名称
    frontend(Test_Time, folder_name)
