from lib.metric_type.metric_type import Metric, MetricReader,\
    Level, InsAggregationType
from lib.metric_exception import MetricCollectException, \
    MetricSettingsException
from conf.settings import NODE_LABEL, POD_LABEL,\
    CLUSTER_LABEL, POD_METRIC_TAG


class CapacityMetric(Metric):
    def __init__(self, metric_reader: MetricReader, metric_settings,
                 level: Level):
        super().__init__(metric_reader, metric_settings, level)

    def _initalize_score_settings(self, score_setting):
        return super()._initalize_score_settings(score_setting)

    def _usage_total_process(self, is_avaliable: bool = False):
        """
        process usage/total or 1 - avaliable/total type metric
        """
        final_result = None
        usage_query_args = {}
        limit_query_args = {}
        metric_name = self.settings.collect.metric_name
        node_tag = self.settings.collect.node_tag_name
        val_0 = self.settings.collect.related_value[0]
        val_1 = self.settings.collect.related_value[1]

        if self.level == Level.Node:
            usage_query_args = {
                NODE_LABEL: self.name[Level.Node],
                node_tag: val_0
            }
            limit_query_args = {
                NODE_LABEL: self.name[Level.Node],
                node_tag: val_1
            }
        elif self.level == Level.Pod:
            usage_query_args = {
                NODE_LABEL: self.name[Level.Node],
                POD_LABEL: self.name[Level.Pod],
                POD_METRIC_TAG: val_0
            }
            limit_query_args = {
                NODE_LABEL: self.name[Level.Node],
                POD_LABEL: self.name[Level.Pod],
                POD_METRIC_TAG: val_1
            }
        else:
            usage_query_args = {
                CLUSTER_LABEL: self.name[Level.Cluster],
                node_tag: val_0
            }
            limit_query_args = {
                CLUSTER_LABEL: self.name[Level.Cluster],
                node_tag: val_1
            }
            pass

        usage_res = self._get_custom_metric(metric_name, **usage_query_args)
        limit_res = self._get_custom_metric(metric_name, **limit_query_args)
        if len(usage_res.data) <= 0 or len(limit_res.data) <= 0 or  \
                len(usage_res.data) != len(limit_res.data):
            raise MetricCollectException(
                f"Collect {metric_name},"
                f"Level: {self.level} from Prometheus failed!"
            )

        try:
            all_data_result = []
            usages = 0
            limits = 0
            for i in range(len(usage_res.data)):
                usage_data = usage_res.data[i].to_dict()["values"]
                usage_value = max([float(item[1]) for item in usage_data])

                limit_data = limit_res.data[i].to_dict()["values"]
                limit_value = max([float(item[1]) for item in limit_data])

                if self.level == Level.Cluster:
                    usages += usage_value
                    limits += limit_value
                else:
                    # 对于容器级别指标，每个容器都使用usage/limit * 100 算出单个容器使用率
                    util_result = float(usage_value) / float(limit_value) * 100
                    all_data_result.append(util_result)

            if self.level == Level.Cluster:
                # 对于集群级别的指标，cluster_util = sum(节点usage) / sum(节点limit)
                final_result = usages / limits * 100
            else:
                # 对于节点级别的指标，all_data_result只有一个元素，即节点util
                # 对于容器级别的指标，pod_utl = max(容器util) (因为pod中可能有容器有limit，有容器没有)
                final_result = self._aggregation(
                    all_data_result, InsAggregationType.Max)

        except Exception as exc:
            raise MetricCollectException() from exc

        if is_avaliable:
            # usage = total - avaliable
            final_result = 100 - final_result

        return final_result

    def _collect_process_metric(self):
        """
        We offer three standard method to process capacity metric:
        standard_type = 1: the metric is already util
        standard_type = 2: the metric is total && used
        standard_type = 3: the metric is total && avaliable
        """

        standard_type = self.settings.collect.standard_type

        if standard_type == 1:
            return super()._default_single_gauge(
                ins_agg_type=InsAggregationType.Sum
            )
        # 对于吐上来是usage && total的指标处理
        elif standard_type == 2:
            return self._usage_total_process(is_avaliable=False)
        # 对于吐上来是avaliable && total的指标处理
        elif standard_type == 3:
            return self._usage_total_process(is_avaliable=True)
        else:
            raise MetricSettingsException(
                f'illegal standard type:{standard_type}'
            )

    def metric_score(self, pod: str, node: str,
                     cluster: str, last_end_time: float) -> (float, float):
        return super().metric_score(pod, node, cluster, last_end_time)
