
import time
import requests

from datetime import datetime
from urllib.parse import urlencode

from platform_tools.aliyun import ali_const
from common import utils
from deploy.utils import DeployUtils
from deploy.workers.deploy_base import DeployBase
from deploy import const
from common import const as common_const
from eval_lib.common.logger import get_logger
from common.utils import ssh_pool_default

log = get_logger()


class DeployDeepflowCE(DeployBase):

    type = const.ENV_NAME_MAP[const.ENV_TYPE_DEEPFLOW_CE]

    def __init__(
        self, uuid, instance_name="", 
        df_server_image_tag="latest",
        df_agent_image_tag="latest"
    ):
        super().__init__(uuid)
        self._ssh_pool = ssh_pool_default
        self.Platform = utils.choose_platform()
        self.mgt_ip = ""
        self.server_query_port = 0
        self.server_controller_port = 0
        self.instance_name = f"{instance_name}-{self.uuid[:8]}" if self.uuid[:8] not in instance_name else instance_name
        self.df_server_image_tag = df_server_image_tag
        self.df_agent_image_tag = df_agent_image_tag
        self.deploy_utils = DeployUtils(
            df_mgt_ip=self.mgt_ip, 
            df_server_image_tag=df_server_image_tag, 
            df_agent_image_tag=df_agent_image_tag,
            ssh_pool=self._ssh_pool
        )
        self.status = "init"

    def create_instance(self, instance_name: str) -> str:
        self.status = "creating aliyun instance"
        instance_info=self.Platform.create_instances(
            instance_names=[instance_name],
            image_id=ali_const.ali_image_id_deepflow_default,
            instance_type=ali_const.ali_instance_type_t5_c1m2_2x_large
        )
        self.mgt_ip = instance_info[instance_name]
        self.deploy_utils.df_mgt_ip = self.mgt_ip
        self.status = f"create aliyun instance complete"
        log.info(f"create instance, ip: {self.mgt_ip}")
        return self.mgt_ip

    def to_redis_envs_info(self):
        return {
            "name": self.instance_name,
            "deploy_status": self.status,
            "mgt_ip": self.mgt_ip,
            "server_query_port": self.server_query_port,
            "server_controller_port": self.server_controller_port,
            "type": self.type,
        }

    def install_deepflow_ce_latest(
        self, deepflow_mgt_ip= "", 
        df_server_image_tag="",
        df_agent_image_tag="",
        ):
        res = False
        deepflow_mgt_ip = deepflow_mgt_ip if deepflow_mgt_ip else self.mgt_ip
        # assigned feature branch
        utils.upload_files(
            vm_ip=deepflow_mgt_ip,
            local_path=f"{common_const.LOCAL_PATH}/deploy/file/values-custom-latest.yaml",
            remote_path="values-custom-latest.yaml",
            ssh_pool=self._ssh_pool
        )
        ssh_client = self._ssh_pool.get(deepflow_mgt_ip)
        if df_server_image_tag:
            ssh_client.exec_command(
                f''' sed -i "7s/latest/{df_server_image_tag}/" values-custom-latest.yaml'''
            )
        if df_agent_image_tag:
            ssh_client.exec_command(
                f'''sed -i "10s/latest/{df_agent_image_tag}/" values-custom-latest.yaml'''
            )
        self.status = "start install deepflow-ce"
        version = ""
        if "v6.1" in df_server_image_tag or "v6.1" in df_agent_image_tag:
            version = "--version 6.1.8"
        elif "v6.2" in df_server_image_tag or "v6.2" in df_agent_image_tag:
            version = "--version 6.2.6"
        elif "v6.3" in df_server_image_tag or "v6.3" in df_agent_image_tag:
            version = "--version 6.3.9"
        elif "v6.4" in df_server_image_tag or "v6.4" in df_agent_image_tag:
            version = "--version 6.4.9"
        try:
            log.info(f'Start install deepflow-ce ip:{deepflow_mgt_ip}')
            cmds = [
                "helm repo update deepflow_stable",
                f"helm install deepflow -n deepflow deepflow_stable/deepflow {version} --create-namespace --set mysql.service.type=NodePort -f values-custom-latest.yaml"
            ]
            for cmd in cmds:
                _, stdout, stderr = ssh_client.exec_command(cmd)
                err = stderr.readlines()
                log.info(f"exec cmd: {cmd}")
                if len(err) > 0:
                    log.error(f'Install Deepflow-ce Error: {err}')
                else:
                    log.info(stdout.readlines())
        except Exception as e:
            log.error(f'Install Error: {e}')
        self.status = "waiting deepflow services normal"
        # if df_agent_image_tag in [
        #     "v6.1", "v6.2", "v6.3"
        # ] or df_server_image_tag in ["v6.1", "v6.2", "v6.3"]:
        #     self.deploy_utils.add_deepflow_server_dns()
        try:
            log.info(
                'DeepFlow is completed, waiting for the service status to be normal'
            )
            wait_num = int(10 * 60 / 10)
            while wait_num:
                log.info(
                    'Wait for DeepFlow service status to be normal,about 10s, timeout is 600'
                )
                wait_num -= 1
                time.sleep(10)
                stdin, stdout, stderr = ssh_client.exec_command(
                    'kubectl get pods -n deepflow'
                )
                logs = stdout.readlines()
                res = True
                for k in logs[1:]:
                    log.info("get pod ========= > {}".format(k))
                    if 'Running' not in k.split(
                    )[2] and 'deepflow-server' not in k.split()[2]:
                        pass
                    if 'Running' not in k.split()[2] or '0/' in k.split()[1]:
                        res = False
                        break
                if res == True:
                    log.info('DeepFlow services is normal')
                    self.status = "install deepflow-ce complete"
                    self.end_wait_running_time = datetime.now()
                    break
        except Exception as err:
            log.error(err)
            assert False
        return res

    def upgrade_deepflow_ce(
        self,
        df_server_image_tag=None,
        df_agent_image_tag=None
        ):
        res = False
        if not df_server_image_tag:
            df_server_image_tag = "latest"
        if not df_agent_image_tag:
            df_agent_image_tag = "latest"
        version = ""
        if "v6.1" in df_server_image_tag or "v6.1" in df_agent_image_tag:
            version = "--version 6.1.8"
        elif "v6.2" in df_server_image_tag or "v6.2" in df_agent_image_tag:
            version = "--version 6.2.6"
        elif "v6.3" in df_server_image_tag or "v6.3" in df_agent_image_tag:
            version = "--version 6.3.9"
        elif "v6.4" in df_server_image_tag or "v6.4" in df_agent_image_tag:
            version = "--version 6.4.9"
        ssh_client = self._ssh_pool.get(self.mgt_ip)
        _, _, stderr = ssh_client.exec_command(
            f'''sed -i "18s/v6.1/latest/g" values-custom-stable.yaml'''
        )
        err = stderr.readlines()
        if err:
            log.error(err)

        _, _, stderr = ssh_client.exec_command(
            f'''sed -i "16s/v6.1/{df_server_image_tag}/g" values-custom-stable.yaml'''
        )
        err = stderr.readlines()
        if err:
            log.error(err)

        _, _, stderr = ssh_client.exec_command(
            f'''sed -i "21s/v6.1/{df_agent_image_tag}/g" values-custom-stable.yaml'''
        )
        err = stderr.readlines()
        if err:
            log.error(err)
        _, _, stderr = ssh_client.exec_command(
            f'''sed -i "\$a\server:\\n  nameservers:\\n  - {common_const.ext_dns_server}" values-custom-stable.yaml'''
        )
        err = stderr.readlines()
        if err:
            log.error(err)
        extraVolumeMounts = '''  extraVolumeMounts:\\n  - name: log-volume\\n    mountPath: /var/log/deepflow\\n    readOnly: false\\n    hostPath: /root/deepflow'''
        _, _, stderr_log = ssh_client.exec_command(
            f'''sed -i "\$a\{extraVolumeMounts}" values-custom-stable.yaml && mkdir deepflow'''
        )
        err_log = stderr_log.readlines()
        if err_log:
            log.error(err_log)
        log.info(f'Start upgrade deepflow-ce ip:{self.mgt_ip}')
        _, stdout, stderr = ssh_client.exec_command(
            f'''helm repo update deepflow_stable && helm upgrade deepflow {version} -n deepflow deepflow_stable/deepflow -f values-custom-stable.yaml'''
        )
        err = stderr.readlines()
        if err:
            log.error(f'Install Deepflow-ce Error: {err}')
            assert False
        try:
            if 'Grafana auth: admin:deepflow' in stdout.readlines()[-1]:
                log.info(
                    'DeepFlow is completed, waiting for the service status to be normal'
                )
            wait_num = int(30 * 60 / 10)
            while wait_num:
                log.info(
                    'Wait for DeepFlow service status to be normal,about 1s, timeout is 1800'
                )
                wait_num -= 1
                time.sleep(10)
                _, stdout, stderr = ssh_client.exec_command(
                    'kubectl get pods -n deepflow'
                )
                logs = stdout.readlines()
                res = True
                for k in logs[1:-1]:
                    log.info("get pod ========= > {}".format(k))
                    if 'Running' not in k.split(
                    )[2] and 'deepflow-server' not in k.split()[2]:
                        pass
                    if 'Running' not in k.split()[2]:
                        res = False
                        break
                if res == True:
                    log.info('DeepFlow services is normal')
                    # self.end_wait_running_time = datetime.now()
                    break

        except Exception as err:
            log.error(err)
            assert False
        return res

    def query_first_data(self, filters=''):
        """
        kwargs: 数据库，表名，以及查询的命令
        """
        headers = {'Content-Type': 'application/x-www-form-urlencoded'}
        data = {
            'db': "flow_metrics",
            'sql': f"select pod_node from vtap_flow_port {filters} order by time limit 1",
            'data_precision': "1s"
        }
        data = urlencode(data, encoding='utf-8')
        response = requests.post(
            url='http://%s:%s/v1/query/' %
            (self.mgt_ip, self.server_query_port), headers=headers, data=data
        )
        return response.json(), response.status_code

    def get_query_api_port(self):
        port = self.deploy_utils.get_query_port()
        log.info("用于查询DF-Querier的APi所用端口为{}".format(port))
        return port

    def get_controller_api_port(self):
        port = self.deploy_utils.get_controller_port()
        log.info("用于查询DF-Controller的APi所用端口为{}".format(port))
        return port

    def check_first_data(self, filters=''):
        checked = False
        self.status = "check first data"
        self.server_query_port = self.get_query_api_port()
        self.server_controller_port = self.get_controller_api_port()
        log.info(
            f"Get Server Port, querier: {self.server_query_port}, controller: {self.server_controller_port}"
        )
        time_count = 0
        loop_num = 240
        while loop_num:
            try:
                loop_num -= 1
                time.sleep(5)
                time_count += 5
                if time_count % 10 == 0:
                    log.info(f"第{time_count}秒尝试获取数据")
                    self.status = f"check first data has waiting {time_count}s"
                response, code = self.query_first_data(filters=filters)
                if code != 200:
                    log.info(
                        f"check_first_data failed, code {code} response {response}"
                    )
                    continue
                log.info(
                    'check_first_data::response_json["result"]["values"] ==> {}'
                    .format(response["result"]["values"])
                )
                if code == 200:
                    checked = True
                    break
            except Exception as e:
                log.error(e)
                pass
        return checked

    def add_aliyun_platform(self):
        self.status = "add aliyun platform"
        if not self.deploy_utils.check_aliyun_cloud_isexist():
            self.deploy_utils.add_aliyun_cloud_platform()
            self.status = "check aliyun cloud status"
            return self.deploy_utils.check_aliyun_cloud_status()
        else:
            return True

    def install_deepflow_ce(self):
        with self.step("create instance"):
            self.mgt_ip = self.create_instance(self.instance_name)
            log.info(f"DeepFlow IP is {self.mgt_ip}")
            time.sleep(20)

        with self.step("install k8s"):
            self.status = "install k8s"
            utils.install_k8s(self.mgt_ip)

        with self.step("install deepflow ce"):
            self.install_deepflow_ce_latest(
                deepflow_mgt_ip=self.mgt_ip,
                df_server_image_tag=self.df_server_image_tag,
                df_agent_image_tag=self.df_agent_image_tag
            )
        with self.step("check first data"):
            if self.check_first_data() is False:
                log.error("check_first_data failed! deploy failed!")
                self.status = "check first data failed"
                return
        with self.step("install deepflow ctl"):
            self.deploy_utils.install_deepflow_ctl()
        with self.step("add aliyun platform"):
            if self.add_aliyun_platform() is False:
                log.error("add_aliyun_platform failed! deploy failed!")
                self.status = "add aliyun platform failed"
                return
        self.status = "complete"
        log.info("install deepflow ce complete!")

    def run(self):
        self.install_deepflow_ce()


