import utils
import math
from scaleactionfactory import ScaleActionFactory
from configloader import ConfigLoader
from component.monitor import Monitoring
from component.clustercommunication import ClusterCommunication

cluster = ClusterCommunication.get_instance()
cl = ConfigLoader.get_instance()
cl.loadConfig('config.json')

class QueueModel:
    @staticmethod
    def apply_scale_deployments(namespace="default"):
        rates = Monitoring.get_workload_rate(namespace)
        cnext = build_cnext_seq(rates)
        limits = cluster.plan_cpu_resources_by_weights(cl.weights)
        if cluster.will_over_load(cnext):
            print('before adjust:', cnext)
            cnext = adjust_cnext_when_overload(cluster.get_pods_request_cpu_resources_map(), limits, cnext)
            print('after adjust:', cnext)
        actions = ScaleActionFactory.createAction(namespace, cnext)
        ret = run_deployment_scale_action(cluster, actions)
        return ret

def service_rate(name):
    return cl.process_rates[name]

def calculate_P0(rate, container_num, process_rate):
    r = rate / process_rate
    c = container_num
    ut = rate / (c * process_rate)
    first_term = pow(r, c) / (math.factorial(c) * (1 - ut))


    n = 0
    second_term = 0
    while n < c:
        second_term = second_term + pow(r, n) / math.factorial(n)
        n = n + 1
    P0 = 1 / (first_term + second_term)
    return P0

def find_fix_container_num(rate, process_rate, t):
    c = 0
    P = 0
    r = rate / process_rate
    while P <= 0.95:
        c = c + 1
        rho = r / c
        if rho >= 1:
            continue
        P0 = calculate_P0(rate, c, process_rate)
        L = int(math.floor(t * c * process_rate + c - 1))
        i = 0
        while i <= L:
            if i >= c:
                P += pow(r, i) * P0 / (pow(c, i - c) * math.factorial(c))
            else:
                P += pow(r, i) * P0 / math.factorial(i)
            i = i + 1
    return c

def run_deployment_scale_action(cluster, action=[]):
    for a in action:
        dev = a['deployment_name']
        ns = a['namespace']
        c = a['replicas']
        ret = cluster.scale_deployment_replicas(deployment_name=dev, ns_name=ns, replicas=c)
        print(dev, " in namespace ", ns, " scale to ", c, " replicas ")
        if ret == 1:
            print("unexpected scale error")
            return 1
    return 0

def build_cnext_seq(rates):
    cnext = []
    print("workload rate seq : ", rates)
    for r in rates:
        for dev in r:
            dict = {}
            c = find_fix_container_num(rate=r[dev], process_rate=service_rate(dev), t=0.4)
            dict[dev] = c
            cnext.append(dict)
    return cnext

def adjust_cnext_when_overload(cpu_map, limits={}, cnext=[]):
    weight_map = {}
    total_weight = 0
    for k in cl.weights:
        v = cl.weights[k]
        weight_map[k] = v
        total_weight += v
    container_resources = utils.list2dict(cnext)
    for k in container_resources:
        need = container_resources[k] * cpu_map[k]
        container_resources[k] = need

    remain = 0
    for k in limits:
        remain += limits[k]

    sort_container_resource = sorted(container_resources.items(), key=lambda x: x[1])
    process_dict = {}
    for k, v in sort_container_resource:
        if v <= limits[k]:
            remain -= v
            total_weight -= weight_map[k]
            process_dict[k] = int(v / cpu_map[k])
        else:
            process_dict[k] = int(weight_map[k] * remain / (cpu_map[k] * total_weight))

    ret = utils.dict2list(process_dict)
    return ret