import re
import time
import requests
from motor.resources.instance import Instance, PDRole, Endpoint
from motor.coordinator.core.instance_manager import InstanceManager
from motor.utils.logger import get_logger

logger = get_logger(__name__)

class MetricsStat(object):
    stat_metrics: dict = {
        "update_time": 0,
        "inactive_pods_metrics_aggregate": {}, # only save counter/histogram/summery
        "pods_metrics_cached": {}
    }

    @staticmethod
    def check_metrics_format(metrics):
        if not isinstance(metrics, list) or not metrics:
            return False
        if not MetricsStat.stat_metrics["pods_metrics_cached"]:
            return True
        base_metrics = next(iter(MetricsStat.stat_metrics["pods_metrics_cached"].values()))["metrics"]
        if len(base_metrics) != len(metrics):
            return False

        for i in range(len(base_metrics)):
            if set(base_metrics[i].keys()) != set(metrics[i].keys()):
                return False
            for key in base_metrics[i].keys():
                if type(base_metrics[i][key]) != type(metrics[i][key]):
                    return False

        return True

    @staticmethod
    def check_and_update_metrics_cached(instance_id, instance_metrics, timestamp):
        if not MetricsStat.check_metrics_format(instance_metrics):
            return False
        MetricsStat.stat_metrics["pods_metrics_cached"][instance_id] = {
            "update_time": timestamp,
            "metrics": instance_metrics
        }
        MetricsStat.stat_metrics["update_time"] = timestamp
        return True

    @staticmethod
    def aggregate_instance_metric_by_sum(collects, single_metric, index):
        timestamp = MetricsStat.stat_metrics["update_time"]
        pods_metrics_cached = MetricsStat.stat_metrics["pods_metrics_cached"]
        inactive_pods_metrics_aggregate = MetricsStat.stat_metrics["inactive_pods_metrics_aggregate"]

        if single_metric["TYPE"] == "gauge":
            sum = 0.0
            for instance_id in collects:
                if instance_id not in pods_metrics_cached: # only aggregate active pod
                    continue
                sum += pods_metrics_cached[instance_id]["metrics"][index]["VALUE"][0]
            single_metric["VALUE"].append(sum)
        else:
            first_pod = next(iter(pods_metrics_cached.values()))
            block_num = len(first_pod["metrics"][index]["VALUE"])
            sum_arr = [0] * block_num
            for i in range(block_num):
                for pod_info in pods_metrics_cached.values():
                    sum_arr[i] += pod_info["metrics"][index]["VALUE"][i]
                if inactive_pods_metrics_aggregate:
                    sum_arr[i] += inactive_pods_metrics_aggregate[single_metric["NAME"]]["VALUE"][i]
            single_metric["VALUE"] = sum_arr

    @staticmethod
    def aggregate_metrics_all_instance(collects):
        timestamp = MetricsStat.stat_metrics["update_time"]
        pods_metrics_cached = MetricsStat.stat_metrics["pods_metrics_cached"]
        base_pod = next(iter(pods_metrics_cached.values()))

        aggregate = []
        metric_count = len(base_pod["metrics"])
        for i in range(metric_count):
            single_metric = {}
            single_metric["NAME"] = base_pod["metrics"][i]["NAME"]
            single_metric["HELP"] = base_pod["metrics"][i]["HELP"]
            single_metric["TYPE"] = base_pod["metrics"][i]["TYPE"]
            single_metric["LABEL"] = base_pod["metrics"][i]["LABEL"]
            single_metric["VALUE"] = []
            #if single_metric["NAME"] == "vllm::gpu_cache_usage_perc" or single_metric["NAME"] == "vllm:kv_cache_usage_perc":
            #    MetricsStat.aggregate_instance_cache_usage_perc(collects, single_metric, i)
            #    aggregate.append(single_metric)
            #    continue
            MetricsStat.aggregate_instance_metric_by_sum(collects, single_metric, i)
            aggregate.append(single_metric)

        return aggregate

    @staticmethod
    def clear_inactive_metrics(keep_alive_time, max_metrics_cache_pods):
        timestamp = MetricsStat.stat_metrics["update_time"]
        pods_metrics_cached = MetricsStat.stat_metrics["pods_metrics_cached"]
        inactive_pods_metrics_aggregate = MetricsStat.stat_metrics["inactive_pods_metrics_aggregate"]

        if not pods_metrics_cached:
            return
        remove_ins = []
        for instance_id in pods_metrics_cached.keys():
            if MetricsStat.stat_metrics["update_time"] - pods_metrics_cached[instance_id]["update_time"] < keep_alive_time:
                continue
            remove_ins.append(instance_id)
            for single_metric in pods_metrics_cached[instance_id]["metrics"]:
                if single_metric["TYPE"] == "gauge":
                    continue
                if single_metric["NAME"] not in inactive_pods_metrics_aggregate:
                    inactive_pods_metrics_aggregate[single_metric["NAME"]] = single_metric
                else:
                    for i in range(len(single_metric["VALUE"])):
                        inactive_pods_metrics_aggregate[single_metric["NAME"]]["VALUE"][i] += single_metric["VALUE"][i]
        for instance_id in remove_ins:
            del pods_metrics_cached[instance_id]
        

    @staticmethod
    def get_value_str(value):
        if value == float("nan"):
            return "Nan"
        elif value == float("inf"):
            return "+Inf"
        elif value == float("-inf"):
            return "-Inf"
        return str(value)

    @staticmethod
    def get_serialize_metrics(aggregate):
        lines = []
        for item in aggregate:
            lines.append("# HELP {} {}".format(item["NAME"], item["HELP"]))
            lines.append("# TYPE {} {}".format(item["NAME"], item["TYPE"]))
            for i in range(len(item["LABEL"])):
                lines.append("{} {}".format(item["LABEL"][i], MetricsStat.get_value_str(item["VALUE"][i])))
        return "\n".join(lines)

    @staticmethod
    def get_serialize_instance_metrics():
        update_time = MetricsStat.stat_metrics["update_time"]
        pods_metrics_cached = MetricsStat.stat_metrics["pods_metrics_cached"]

        instance_metrics = {}
        for ins_id in pods_metrics_cached.keys():
            if update_time == pods_metrics_cached[ins_id]["update_time"]:
                instance_metrics[ins_id] = pods_metrics_cached[ins_id]["metrics"]

        return instance_metrics

class Metrics(object):
    def parse_metric_help(self, single_metric, line):
        parts = line.split()
        if len(parts) >= 4 and parts[0] == "#" and parts[1] == "HELP":
            single_metric["NAME"] = parts[2]
            single_metric["HELP"] = " ".join(parts[3:])
            return True
        else:
            logger.error("[Metrics] Parse metric help failed.")
            return False

    def parse_metric_type(self, single_metric, line):
        parts = line.split()
        if len(parts) == 4 and parts[0] == "#" and parts[1] == "TYPE" and parts[3] in ["counter", "gauge", "histogram", "summary"]:
            single_metric["TYPE"] = parts[3]
            return True
        else:
            logger.error("[Metrics] Parse metric type failed.")
            return False

    def parse_metric_body_block(self, single_metric, line):
        parts = line.split()
        if len(parts) == 2:
            single_metric["LABEL"].append(parts[0])
            single_metric["VALUE"].append(float(parts[1]))
            return True
        else:
            logger.error("[Metrics] Parse metric body failed.")
            return False

    def parse_metric_text(self, metrics_str):
        metric_array = []
        lines = metrics_str.split("\n")
        i = 0
        while i < len(lines):
            single_metric = {}
            if i < len(lines) and not self.parse_metric_help(single_metric, lines[i]):
                return []
            i += 1
            if i < len(lines) and not self.parse_metric_type(single_metric, lines[i]):
                return []
            i += 1
            single_metric["LABEL"] = []
            single_metric["VALUE"] = []
            while i < len(lines) and lines[i][0] != "#":
                if not self.parse_metric_body_block(single_metric, lines[i]):
                    return []
                i += 1
            metric_array.append(single_metric)
        return metric_array

    def parse_metrics(self, collects):
        if not isinstance(collects, dict) or not collects:
            logger.error("[Metrics] Invalid pods metric JSON file.")
            return -1

        metric_count = 0
        for instance_id in collects.keys():
            if not isinstance(collects[instance_id], dict) or not collects[instance_id] or "endpoints" not in collects[instance_id]:
                logger.error("[Metrics] Invalid pods metric JSON file.")
                return -1

            pods = collects[instance_id]["endpoints"]
            for pod_info in pods.values():
                if "metrics_str" not in pod_info:
                    logger.error("[Metrics] Invalid 'metrics_str' in pod metrics JSON file.")
                    return -1
                parsed_metric = self.parse_metric_text(pod_info["metrics_str"])
                if metric_count == 0:
                    metric_count = len(parsed_metric)
                elif metric_count != len(parsed_metric):
                    parsed_metric = {}
                if not parsed_metric:
                    logger.error("[Metrics] Parse metric text failed.")
                    return -1
                pod_info["metrics"] = parsed_metric
        return 0

    def aggregate_metric_by_sum(self, pods, single_metric, index):
        if single_metric["TYPE"] == "gauge":
            sum = 0.0
            for pod_info in pods.values():
                sum += pod_info["metrics"][index]["VALUE"][0]
            single_metric["VALUE"].append(sum)
        else:
            first_pod = next(iter(pods.values()))
            block_num = len(first_pod["metrics"][index]["VALUE"])
            sum_arr = [0] * block_num
            for i in range(block_num):
                for pod_info in pods.values():
                    sum_arr[i] += pod_info["metrics"][index]["VALUE"][i]
            single_metric["VALUE"] = sum_arr

    def aggregate_metrics_by_instance(self, collects):
        timestamp = int(time.time())
        for instance_id in collects.keys():
            pods = collects[instance_id]["endpoints"]
            if not pods:
                continue

            first_pod = next(iter(pods.values()))
            aggregate = []
            metric_count = len(first_pod["metrics"])
            for i in range(metric_count):
                single_metric = {}
                single_metric["NAME"] = first_pod["metrics"][i]["NAME"]
                single_metric["HELP"] = first_pod["metrics"][i]["HELP"]
                single_metric["TYPE"] = first_pod["metrics"][i]["TYPE"]
                single_metric["LABEL"] = first_pod["metrics"][i]["LABEL"]
                single_metric["VALUE"] = []
                self.aggregate_metric_by_sum(pods, single_metric, i)
                aggregate.append(single_metric)
            collects[instance_id]["metrics"] = aggregate
            del collects[instance_id]["endpoints"]

            if not MetricsStat.check_and_update_metrics_cached(instance_id, collects[instance_id]["metrics"], timestamp):
                logger.error("[Metrics] Update metrics state failed.")
                return False

        return True

    def aggregate_metrics(self, collects):
        self.aggregate_metrics_by_instance(collects)

    def get_server_metrics_single(self, ip, port):
        url = f"http://{ip}:{port}/metrics"
        try:
            response = requests.get(url)

            if response.status_code == 200:
                data = response.json()
                return data["metrics"]
            else:
                logger.warning(f"[Metrics] request metrics failed: code = {response.status_code}")

        except requests.exceptions.RequestException as e:
            logger.warning(f"[Metrics] request metrics failed: {e}")

        return ""

    def get_server_metrics(self):
        ins_all = {}
        ins_all.update(InstanceManager().get_available_instances(PDRole.ROLE_P))
        ins_all.update(InstanceManager().get_available_instances(PDRole.ROLE_D))
        ins_all.update(InstanceManager().get_available_instances(PDRole.ROLE_U))

        collects = {}
        for ins_info in ins_all.values():
            ins_id = ins_info.id
            collects[ins_id] = {
                "endpoints": {}
            }

            for ens_info in ins_info.endpoints.values():
                for en_info in ens_info.values():
                    metrics_str = self.get_server_metrics_single(en_info.ip, en_info.port)
                    if not metrics_str:
                        continue
                    collects[ins_id]["endpoints"][en_info.id] = {
                        "metrics_str": metrics_str
                    }
        return collects

    def get_and_aggregate_metrics(self):
        collects = self.get_server_metrics()
        if self.parse_metrics(collects):
            logger.error("[Metrics] Parse vllm server metrics failed.")
            return None, None
        self.aggregate_metrics_by_instance(collects)
        aggregate = MetricsStat.aggregate_metrics_all_instance(collects)
        return MetricsStat.get_serialize_metrics(aggregate), MetricsStat.get_serialize_instance_metrics()

