import time
from common import utils
from eval_lib.common.ssh import SSHPool
from eval_lib.databases.influx.influx_db import InfulxDB
from common.const import TELEGRAF_TABLE_NAME_IN_INFLUX
from common.utils import ssh_pool_default
from eval_lib.common.logger import get_logger

log = get_logger()


def get_traffic_tool_data(
    vm_ip, ssh_pool: SSHPool=ssh_pool_default
):
    result = {}
    ssh_client = ssh_pool.get(vm_ip)
    cmd = "cat traffic_result.log "
    _, stdout, stderr = ssh_client.exec_command(cmd)
    logs = stdout.readlines()
    try:
        if logs:
            result["server.latency_p50"] = format_latency(
                logs[0].split()[0], "ms"
            )
            result["server.latency_p90"] = format_latency(
                logs[1].split()[0], "ms"
            )
            result["server.rps"] = logs[2].split()[0]
        err = stderr.readlines()
        if err:
            log.error(f"cat log err :{err}")
            assert False
    except Exception as e:
        log.error(f"no found log :{e}")
        assert False
    return result

def reload_telegraf_conf(vm_ip, ssh_pool: SSHPool=ssh_pool_default):
    ssh_client = ssh_pool.get(vm_ip)
    command = "sudo mv telegraf.conf /etc/telegraf/telegraf.conf && sudo systemctl restart telegraf && sudo systemctl status telegraf"
    _, stdout, stderr = ssh_client.exec_command(command)
    output = stdout.read().decode()
    if "Active: active (running)" in output:
        log.info(f"telegraf restarted successfully and is running")
        return True
    else:
        log.error(
            f"telegraf restart failed, err: {stderr.read().decode()}"
        )
        return False
    
def get_total_memory_Kbyte(vm_ip, ssh_pool: SSHPool=ssh_pool_default):
    ssh_client = ssh_pool.get(vm_ip)
    _, stdout, stderr = ssh_client.exec_command("free |awk '/Mem/{print $2}'")
    total_mem = stdout.read().decode().strip()
    err = stderr.read().decode()
    if err:
        log.error(f"get total memory Byte err: {err}")
    return int(total_mem)

def get_process_usage_by_telegraf(vm_ip, process_name_list, start_time, end_time):
    '''
    获取进程cpu/mem在一段时间内的90th的使用率
    return {'{process_name}_max_cpu_usage': 10.0, '{process_name}_max_mem_usage': 10.0}
    '''
    influx_db = InfulxDB(
        host=vm_ip,
        database=TELEGRAF_TABLE_NAME_IN_INFLUX,
    )
    procstat_data = {}
    # memory unit Mb
    total_memory = get_total_memory_Kbyte(vm_ip)
    for process_name in process_name_list:
        procstat = influx_db.get_procstat_result(process_name, start_time, end_time)
        # 内存百分比转换为Mb
        mem_Mbyte = float(procstat["max_mem_usage"]) * total_memory / 100 / 1024
        if "-agent" in process_name:
            key = "agent"
        else:
            key = process_name.replace("-", "_").replace(".", "_")
        log.info(f"add key: {key}")
        procstat_data[f"{key}.max_cpu"] = "{:.2f}%".format(procstat["max_cpu_usage"]) 
        procstat_data[f"{key}.max_mem"] = "{:.2f}Mb".format(mem_Mbyte)
    return procstat_data

def install_istio(
    vm_ip, ssh_pool: SSHPool=ssh_pool_default
):
    utils.install_k8s(
        vm_ip=vm_ip,
        ssh_pool=ssh_pool,
    )
    ssh = ssh_pool.get(vm_ip)
    cmd1 = "sudo istio-1.17.1/bin/istioctl install --set profile=demo -y --set components.cni.enabled=true"
    log.info(f"exec cmd: {cmd1}")
    _, stdout, stderr = ssh.exec_command(cmd1)
    log.info(stdout.readlines())
    err = stderr.readlines()
    log.error(err)
    if any("5m0s" in e for e in err):
        cmd_tmp = "kubectl get pod  -n istio-system|awk 'NR>1{print $3}'"
        start_time = time.time()
        end_time = start_time + 30 * 60
        while True:
            stdin, stdout, stderr = ssh.exec_command(cmd_tmp)
            logs = stdout.readlines()
            log.info(f"istio_pod_status is {logs}")
            if all("Running" in pod_status for pod_status in logs):
                break
            elif time.time() > end_time:
                assert False
            else:
                time.sleep(30)
    cmd2 = '''sudo kubectl label namespace default istio-injection=enabled && \
        sudo kubectl apply -f istio-1.17.1/samples/bookinfo/platform/kube/bookinfo.yaml'''
    log.info(f"exec cmd: {cmd2}")
    _, stdout, stderr = ssh.exec_command(cmd2)
    log.info(stdout.readlines())
    err = stderr.readlines()
    if err:
        log.error(err)
        assert False

def init_istio(
    vm_ip, ssh_pool: SSHPool=ssh_pool_default
):
    ssh = ssh_pool.get(vm_ip)
    i = 0
    loop_count=60
    while True:
        log.info(
            'Wait for istio service status to be normal,about 300s, timeout is 600'
        )
        _, stdout, stderr = ssh.exec_command('kubectl get pods')
        logs = stdout.readlines()
        log.info(logs)
        res = True
        for k in logs[1:]:
            log.info("get pod ========= > {}".format(k))
            if 'Running' not in k.split()[2] or '2/2' not in k.split()[1]:
                res = False
                break
        if res == True:
            log.info('istio services is normal')
            break
        else:
            if i >= loop_count:
                assert False
            i += 1
            time.sleep(10)
    cmd = '''sudo kubectl exec "$(sudo kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}')" -c ratings -- curl -sS productpage:9080/productpage | grep -o "<title>.*</title>" && \
        sudo kubectl apply -f istio-1.17.1/samples/bookinfo/networking/bookinfo-gateway.yaml && \
        sudo istio-1.17.1/bin/istioctl analyze'''
    log.info(f"exec cmd: {cmd}")
    _, stdout, stderr = ssh.exec_command(cmd)
    log.info(stdout.readlines())
    log.error(stderr.readlines())

def get_istio_productpage_server_port(vm_ip, ssh_pool: SSHPool=ssh_pool_default):
    ssh_client = ssh_pool.get(vm_ip)
    _, stdout, stderr = ssh_client.exec_command("sudo kubectl get  svc istio-ingressgateway -nistio-system | awk -F'[:,/]' '/80/{print $5}'")
    output = stdout.read().decode()
    if output:
        log.info(f"get port success")
        port = output.strip()
        return port
    else:
        log.error(
            f"get port failed, err: {stderr.read().decode()}"
        )
        assert False

def deploy_traefik_by_docker_compose(vm_ip, ssh_pool: SSHPool=ssh_pool_default):
    ssh_client = ssh_pool.get(vm_ip)
    _, stdout, _ = ssh_client.exec_command("test -f docker-compose.yaml ")
    exit_status = stdout.channel.recv_exit_status()
    if exit_status == 0:
        _, stdout, stderr = ssh_client.exec_command("sudo docker-compose -f docker-compose.yaml up -d")
        output = stdout.read().decode()
        err = stderr.read().decode()
        exit_status = stdout.channel.recv_exit_status()
        if exit_status == 0:
            log.info(f"deploy traefik success, out:{output} {err}")
        else:
            log.error(f"deploy traefik failed, err: {err}")
            assert False
    else:
        log.error(f"no found docker-compose.yaml")
        assert False
    
def add_whoami_host(vm_ip, dip, ssh_pool: SSHPool=ssh_pool_default):
    ssh_client = ssh_pool.get(vm_ip)
    _, stdout, _ = ssh_client.exec_command(f"echo '{dip} whoami.fw.com' | sudo tee -a /etc/hosts")
    exit_status = stdout.channel.recv_exit_status()
    if exit_status == 0:
        log.info(f"add host success")
    else:
        log.error(f"add host failed")

def init_go_server(vm_ip, ssh_pool: SSHPool=ssh_pool_default):
    ssh = ssh_pool.get(vm_ip)
    cmd = "cd go-server-sample-master&&docker-compose up -d&&sleep 3&&docker ps"
    log.info(f"exec cmd: {cmd}")
    _, stdout, stderr = ssh.exec_command(cmd)
    log.info(stdout.readlines())
    err = stderr.readlines()
    if err:
        log.error(f"init go server error: {err}")
