import os
import time
from typing import Tuple

import IPy
from kubernetes.utils import create_from_yaml
from lucommon.logger import lu_logger

import agent
import conf
from async_task.services import register_job, AsyncTask
from cert.views import gen_ca_cert, gen_kubeconfig
from deploy.impl import check_agent_status, check_port, check_concurrence_result, AsyncTaskFinishCreateCluster
from deploy.impl.bash_completion import AsyncTaskDeployBashCompletion
from deploy.models import Cluster, ClusterTypeEnum, ClusterStatusEnum
from deploy.services import kubernetes as KUBERNETES
from deploy.services import plugin as PLUGIN
from k8s.core_api import CoreAPI
from k8s.rbac_api import RbacAPI
from utils import gen_static_dir, get_static_url, concurrence_requests, clean_path


def check_install(node_list, check_node=False, check_docker=False, check_etcd=False):
    # 检查agent状态
    err_ips = check_agent_status(node_list)
    if err_ips:
        return -1, err_ips

    # todo 检查节点是否符合k8s安装条件
    if check_node:
        pass
    # todo 检查docker状态
    if check_docker:
        pass
    # todo 检查etcd状态、证书
    if check_etcd:
        pass

    return 0, []


class AsyncTaskDeployApiserver(AsyncTask):
    def task_impl(self, pkg,
                  config_dir, application_dir, cert_dir,
                  master_nodes,
                  token_path, apiserver_service_path, audit_config_path, admin_kubeconfig_path,
                  k8s_ca_cert, proxy_ca_cert, etcd_certs) -> Tuple[int, any]:
        # 部署kube-apiserver
        kubectl_env = "export KUBECONFIG={}\n".format(
            clean_path("{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_ADMIN)))
        apiserver_request_infos = {}
        for ip in master_nodes:
            apiserver_request_infos[str(ip)] = {
                "url": "http://{}:{}{}".format(ip, agent.AGENT_PORT, agent.INSTALL_KUBE_API_SERVER),
                "method": "POST",
                "json": {
                    "pkg_url": {"url": pkg.url, "save_path": "", "extract_type": pkg.extract_type, },
                    "application_dir": application_dir,
                    "kubectl_env": kubectl_env,
                    "depend_files": [
                        {
                            "url": get_static_url(token_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.TOKEN_NAME))
                        },  # token file
                        {
                            "url": get_static_url(apiserver_service_path),
                            "save_path": clean_path("{}{}".format(conf.SERVICE_DIR, KUBERNETES.APISERVER_SERVICE_NAME))
                        },  # service file
                        {
                            "url": get_static_url(audit_config_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.AUDIT_CONFIG_NAME))
                        },  # audit config file
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["ca"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.K8S_CA_NAME))
                        },  # k8s-ca
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["kube-apiserver"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.SERVER_CERT_NAME))
                        },  # server-cert
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["kube-apiserver-key"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.SERVER_KEY_NAME))
                        },  # server-key
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["sa"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.SA_CERT_NAME))
                        },  # sa-cert
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["sa-key"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.SA_KEY_NAME))
                        },  # sa-key
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["apiserver-to-kubelet-client"]),
                            "save_path": clean_path(
                                "{}/{}".format(cert_dir, KUBERNETES.APISERVER_TO_KUBELET_CLIENT_CERT_NAME))
                        },  # apiserver-to-kubelet-client-cert
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["apiserver-to-kubelet-client-key"]),
                            "save_path": clean_path(
                                "{}/{}".format(cert_dir, KUBERNETES.APISERVER_TO_KUBELET_CLIENT_KEY_NAME))
                        },  # apiserver-to-kubelet-client-key
                        {
                            "url": get_static_url(proxy_ca_cert["cert"]["ca"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.PROXY_CA_NAME))
                        },  # proxy-ca
                        {
                            "url": get_static_url(proxy_ca_cert["cert"]["proxy-client"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.PROXY_CLIENT_CERT_NAME))
                        },  # proxy-client-cert
                        {
                            "url": get_static_url(proxy_ca_cert["cert"]["proxy-client-key"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.PROXY_CLIENT_KEY_NAME))
                        },  # proxy-client-key
                        {
                            "url": etcd_certs["ca"],
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.ETCD_CA_NAME))
                        },  # etcd-ca
                        {
                            "url": etcd_certs["cert"],
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.ETCD_CLIENT_CERT_NAME))
                        },  # etcd-client-cert
                        {
                            "url": etcd_certs["key"],
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.ETCD_CLIENT_KEY_NAME))
                        },  # etcd-client-key
                        {
                            "url": get_static_url(admin_kubeconfig_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_ADMIN))
                        },  # admin-kubeconfig
                    ],
                }
            }
        lu_logger.debug("apiserver_request_infos:{}".format(apiserver_request_infos))
        apiserver_result = concurrence_requests(apiserver_request_infos)
        if check_concurrence_result(apiserver_result):
            return 0, apiserver_result
        return -1, apiserver_result

    def verify_impl(self, master_nodes, apiserver_port, k8s_ca_cert) -> Tuple[int, any]:
        connected_list, disconnected_list = [], []

        for i in range(5):
            for server_ip in set(master_nodes) - set(connected_list):
                attr = {
                    "ca": k8s_ca_cert["cert"]['ca'],
                    "cert": k8s_ca_cert["cert"]['kube-admin-client'],
                    "key": k8s_ca_cert["cert"]['kube-admin-client-key'],
                    "apiserver": "https://{}:{}".format(server_ip, apiserver_port)
                }
                try:
                    with CoreAPI(**attr):
                        connected_list.append(server_ip)
                    break
                except Exception as e:
                    lu_logger.debug(e)
                    lu_logger.info("apiserver {}:{} is not ready".format(server_ip, apiserver_port))

            disconnected_list = list(set(master_nodes) - set(connected_list))
            if not disconnected_list:
                return 0, []

            time.sleep(3)
        return -1, disconnected_list


class AsyncTaskDeployRbac(AsyncTask):
    def task_impl(self, cluster_connect_attr) -> Tuple[int, any]:
        # 集群权限配置
        with RbacAPI(**cluster_connect_attr) as api:
            # kubelet-bootstrap用户绑定system:node-bootstrapper角色，使kubelet有权限创建csr
            _, err = api.create_cluster_role_binding(
                role="system:node-bootstrapper",
                role_binding="kubelet-bootstrap",
                subjects=[api.client.V1Subject(**{
                    "api_group": api.api_group,
                    "kind": "User",
                    "name": KUBERNETES.BOOTSTRAP_USER,
                })],
            )
            if err:
                return -1, err

            # apiserver访问kubelet权限
            apiserver_to_kubelet_role, err = api.create_cluster_role(
                "system:kube-apiserver-to-kubelet",
                [api.client.V1PolicyRule(**{
                    "api_groups": [""],
                    "resources": ["nodes/proxy", "nodes/stats", "nodes/log", "nodes/spec", "nodes/metrics", "pods/log"],
                    "verbs": ["*"],
                }), ]
            )
            if err:
                return -1, err

            _, err = api.create_cluster_role_binding(
                role=apiserver_to_kubelet_role.metadata.name,
                role_binding="system:kube-apiserver-to-kubelet",
                subjects=[api.client.V1Subject(**{
                    "api_group": api.api_group,
                    "kind": "User",
                    "name": KUBERNETES.APISERVER_TO_KUBELET_USER,
                })],
            )
            if err:
                return -1, err

            # 自动批准kubelet首次与apiserver通信的csr(nodeclient csr)
            _, err = api.create_cluster_role_binding(
                role="system:certificates.k8s.io:certificatesigningrequests:nodeclient",
                role_binding="node-client-auto-approve-csr",
                subjects=[api.client.V1Subject(**{
                    "api_group": api.api_group,
                    "kind": "Group",
                    "name": KUBERNETES.BOOTSTRAP_GROUP,
                })],
            )
            if err:
                return -1, err

            # 自动批准kubelet后续renew用于与apiserver通信的csr(selfnodeclient csr)
            _, err = api.create_cluster_role_binding(
                role="system:certificates.k8s.io:certificatesigningrequests:selfnodeclient",
                role_binding="node-client-auto-renew-crt",
                subjects=[api.client.V1Subject(**{
                    "api_group": api.api_group,
                    "kind": "Group",
                    "name": "system:nodes",
                })],
            )
            if err:
                return -1, err

            # 自动批准kubelet后续renew用于10250端口的权限的csr(selfnodeserver csr)
            selfnodeserver_role, err = api.create_cluster_role(
                "system:certificates.k8s.io:certificatesigningrequests:selfnodeserver",
                [api.client.V1PolicyRule(**{
                    "api_groups": ["certificates.k8s.io"],
                    "resources": ["certificatesigningrequests/selfnodeserver"],
                    "verbs": ["create"],
                })]
            )
            if err:
                return -1, err

            _, err = api.create_cluster_role_binding(
                role=selfnodeserver_role.metadata.name,
                role_binding="node-server-auto-renew-crt",
                subjects=[api.client.V1Subject(**{
                    "api_group": api.api_group,
                    "kind": "Group",
                    "name": "system:nodes",
                })],
            )
            if err:
                return -1, err

        return 0, ""


class AsyncTaskDeployControllerManager(AsyncTask):
    def task_impl(self, pkg,
                  config_dir, application_dir, cert_dir,
                  master_nodes,
                  controller_service_path, controller_manager_kubeconfig_path,
                  k8s_ca_cert) -> Tuple[int, any]:
        # 部署kube-controller-manager
        controller_request_infos = {}
        for ip in master_nodes:
            controller_request_infos[str(ip)] = {
                "url": "http://{}:{}{}".format(ip, agent.AGENT_PORT, agent.INSTALL_KUBE_CONTROLLER_MANAGER),
                "method": "POST",
                "json": {
                    "pkg_url": {"url": pkg.url, "save_path": "", "extract_type": pkg.extract_type, },
                    "application_dir": application_dir,
                    "depend_files": [
                        {
                            "url": get_static_url(controller_service_path),
                            "save_path": clean_path(
                                "{}/{}".format(conf.SERVICE_DIR, KUBERNETES.CONTROLLER_MANAGER_SERVICE_NAME))
                        },  # controller manager service
                        {
                            "url": get_static_url(controller_manager_kubeconfig_path),
                            "save_path": clean_path(
                                "{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_CONTROLLER_MANAGER))
                        },  # controller manager kubeconfig
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["ca-key"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.K8S_CA_KEY_NAME))
                        },  # k8s-ca-key file
                    ],
                }
            }
        lu_logger.debug("controller_request_infos:{}".format(controller_request_infos))
        controller_result = concurrence_requests(controller_request_infos)
        if check_concurrence_result(controller_result):
            return 0, controller_result
        return -1, controller_result


class AsyncTaskDeployScheduler(AsyncTask):
    def task_impl(self, pkg,
                  config_dir, application_dir,
                  master_nodes,
                  scheduler_service_path, scheduler_kubeconfig_path) -> Tuple[int, any]:
        # 部署kube-scheduler
        scheduler_request_infos = {}
        for ip in master_nodes:
            scheduler_request_infos[str(ip)] = {
                "url": "http://{}:{}{}".format(ip, agent.AGENT_PORT, agent.INSTALL_KUBE_SCHEDULER),
                "method": "POST",
                "json": {
                    "pkg_url": {"url": pkg.url, "save_path": "", "extract_type": pkg.extract_type, },
                    "application_dir": application_dir,
                    "depend_files": [
                        {
                            "url": get_static_url(scheduler_service_path),
                            "save_path": clean_path("{}/{}".format(conf.SERVICE_DIR, KUBERNETES.SCHEDULER_SERVICE_PATH))
                        },  # scheduler service
                        {
                            "url": get_static_url(scheduler_kubeconfig_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_SCHEDULER))
                        },  # scheduler kubeconfig
                    ],
                }
            }
        lu_logger.debug("scheduler_request_infos:{}".format(scheduler_request_infos))
        scheduler_result = concurrence_requests(scheduler_request_infos)
        if check_concurrence_result(scheduler_result):
            return 0, scheduler_result
        return -1, scheduler_result


class AsyncTaskDeployKubelet(AsyncTask):
    def task_impl(self, pkg,
                  config_dir, application_dir, cert_dir,
                  worker_nodes,
                  kubelet_bootstrap_kubeconfig_path, kubelet_config_path_map, kubelet_service_path_map,
                  k8s_ca_cert) -> Tuple[int, any]:
        # 部署kubelet
        kubelet_request_infos = {}
        for ip in worker_nodes:
            config_path = kubelet_config_path_map[ip]
            service_path = kubelet_service_path_map[ip]
            kubelet_request_infos[str(ip)] = {
                "url": "http://{}:{}{}".format(ip, agent.AGENT_PORT, agent.INSTALL_KUBELET),
                "method": "POST",
                "json": {
                    "pkg_url": {"url": pkg.url, "save_path": "", "extract_type": pkg.extract_type, },
                    "application_dir": application_dir,
                    "depend_files": [
                        {
                            "url": get_static_url(service_path),
                            "save_path": clean_path("{}/{}".format(conf.SERVICE_DIR, KUBERNETES.KUBELET_SERVICE_NAME))
                        },  # kubelet service
                        {
                            "url": get_static_url(kubelet_bootstrap_kubeconfig_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_BOOTSTRAP_KUBELET))
                        },  # kubelet bootstrap kubeconfig
                        {
                            "url": get_static_url(config_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.KUBELET_CONFIG_NAME))
                        },  # kubelet-config file
                        {
                            "url": get_static_url(k8s_ca_cert["cert"]["ca"]),
                            "save_path": clean_path("{}/{}".format(cert_dir, KUBERNETES.K8S_CA_NAME))
                        },  # k8s-ca
                    ],
                }
            }
        lu_logger.debug("kubelet_request_infos:{}".format(kubelet_request_infos))
        kubelet_result = concurrence_requests(kubelet_request_infos)
        if check_concurrence_result(kubelet_result):
            return 0, kubelet_result
        return -1, kubelet_result

    def verify_impl(self, worker_nodes, cluster_connect_attr) -> Tuple[int, any]:
        # 查看节点是否加入集群
        # 一般情况下，节点加入后，由于无网络插件配置，状态为not ready
        # 如果/etc/cni目录下存在相关配置的话(旧的配置未删除等)，节点状态可能会显示ready

        invalid_nodes = []
        for i in range(6):
            time.sleep(5)
            with CoreAPI(**cluster_connect_attr) as api:
                valid_nodes = api.get_nodes().keys()
                lu_logger.debug("valid nodes:{}".format(",".join(valid_nodes)))
                invalid_nodes_set = set(worker_nodes) - set(valid_nodes)
                invalid_nodes = list(invalid_nodes_set)
                if invalid_nodes_set:
                    lu_logger.info("invalid nodes:{}".format(",".join(invalid_nodes_set)))
                else:
                    return 0, "all nodes added in cluster"

        return -1, "{}".join(invalid_nodes)


class AsyncTaskInitNode(AsyncTask):

    def task_impl(self, cluster_connect_attr, master_nodes, worker_nodes) -> Tuple[int, any]:
        # 节点添加label、taint
        with CoreAPI(**cluster_connect_attr) as api:
            for node in master_nodes:
                _, err = api.label_node(node, "node-role.kubernetes.io/master", "")
                if err:
                    return -1, err
                _, err = api.taint_node(node, "NoSchedule", "node-role.kubernetes.io/master")
                if err:
                    return -1, err

            for node in worker_nodes:
                _, err = api.label_node(node, "node-role.kubernetes.io/node", "")
                if err:
                    return -1, err
        return 0, ""


class AsyncTaskDeployProxy(AsyncTask):
    def task_impl(self, pkg,
                  config_dir, application_dir,
                  worker_nodes,
                  kube_proxy_service_path_map, proxy_kubeconfig_path):
        # 部署kube-proxy
        proxy_request_infos = {}
        for ip in worker_nodes:
            service_path = kube_proxy_service_path_map[ip]
            proxy_request_infos[str(ip)] = {
                "url": "http://{}:{}{}".format(ip, agent.AGENT_PORT, agent.INSTALL_KUBE_PROXY),
                "method": "POST",
                "json": {
                    "pkg_url": {"url": pkg.url, "save_path": "", "extract_type": pkg.extract_type, },
                    "application_dir": application_dir,
                    "depend_files": [
                        {
                            "url": get_static_url(service_path),
                            "save_path": clean_path(
                                "{}/{}".format(conf.SERVICE_DIR, KUBERNETES.KUBE_PROXY_SERVICE_NAME))
                        },  # proxy service file
                        {
                            "url": get_static_url(proxy_kubeconfig_path),
                            "save_path": clean_path("{}/{}".format(config_dir, KUBERNETES.KUBECONFIG_PROXY))
                        },  # proxy kubeconfig
                    ],
                }
            }
        lu_logger.debug("proxy_request_infos:{}".format(proxy_request_infos))
        proxy_result = concurrence_requests(proxy_request_infos)
        if check_concurrence_result(proxy_result):
            return 0, proxy_result
        return -1, proxy_result


class AsyncTaskDeployPlugin(AsyncTask):
    def task_impl(self, cluster_connect_attr, yaml_path):
        with CoreAPI(**cluster_connect_attr) as api:
            create_from_yaml(
                k8s_client=api._api_client,
                yaml_file=yaml_path,
                verbose=True
            )

        return 0, ""


"""***********************************************job*********************************************"""


def job_deploy_kubernetes(params):
    """
    1、对于etcd，维护一套独立的ca、客户端证书
    2、对于kube-apiserver维护如下：
        ①client-ca，用于k8s各组件的客户端证书签发
        ②proxy-client-ca，用于代理转发到自定义server的客户端证书签发
    """

    cluster_name = params["cluster_name"]
    proxy_server = params.get("proxy_server", "")
    masters = params["masters"]
    nodes = params["nodes"]
    proxy_hosts = params["proxy_hosts"]
    etcd_clusters = params["etcd_clusters"]
    etcd_certs = params["etcd_certs"]
    cluster_cidr = params["cluster_cidr"]
    service_cluster_ip_range = params["service_cluster_ip_range"]
    service_node_port_range = params["service_node_port_range"]
    cluster_dns_service_ip_list = params["cluster_dns_service_ip_list"]
    application_dir = params["application_dir"]
    data_dir = params["data_dir"]
    cert_dir = params["cert_dir"]
    config_dir = params["config_dir"]
    audit_dir = params["audit_dir"]
    network_interface = params["network_interface"]
    calico_mode = params["calico_mode"]
    dns_resolve_list = params["dns_resolve_list"]
    upper_dns_server = params["upper_dns_server"]
    version = params["version"]

    all_nodes = list(set(masters + nodes))
    apiserver_ip = masters[0]
    apiserver_port = "6443"

    if len(masters) > 1:
        if not proxy_server:
            return -1, "多master节点需要提供代理服务", {}

        apiserver_ip, apiserver_port = proxy_server.split(":")
        if apiserver_ip in masters:
            return -1, "代理服务ip不能与master节点ip重合", {}

        if not check_port(apiserver_ip, apiserver_port):
            return -1, "{} 端口检查失败，请确认连接通畅".format(proxy_server), {}

    if not masters:
        return -1, "未指定masters", {}

    svc_ip_range, cluster_ip_range = IPy.IP(service_cluster_ip_range), IPy.IP(cluster_cidr)
    if svc_ip_range.len() <= 1:
        return -1, "service_cluster_ip_range 太小", {}
    if cluster_ip_range.len() <= 1:
        return -1, "cluster_cidr 太小", {}

    for ip in cluster_dns_service_ip_list:
        if IPy.IP(ip) not in svc_ip_range:
            return -1, "cluster_dns service ip {} not in service_cluster_ip_range".format(ip), {}
    if calico_mode not in PLUGIN.MODE_ENUM:
        return -1, "calico仅支持{}".format(",".join(PLUGIN.MODE_ENUM)), {}

    pkg = conf.KUBERNETES_PACKAGE.get(version)
    if not pkg:
        return -1, "无适用的kubernetes package,valid_version:{}".format(
            ",".join(list(conf.KUBERNETES_PACKAGE.keys()))), {}

    code, err_nodes = check_install(all_nodes)
    if code != 0:
        return code, "节点检查失败,{}".format(["{}-{}".format(n, str(e)) for n, e in err_nodes.items()]), {}

    # 生成配置存放目录
    static_dir = gen_static_dir()
    lu_logger.debug("static dir:{}".format(static_dir))
    for ip in all_nodes:
        node_dir = static_dir + "/" + ip + "/"
        os.mkdir(node_dir)

    # 各组件配置文件生成
    token_path, token = KUBERNETES.gen_token_file(static_dir)
    audit_config_path = KUBERNETES.gen_audit_config(static_dir)
    apiserver_service_path = KUBERNETES.gen_apiserver_service(
        out_dir=static_dir,
        k8s_version=version,
        cert_dir=cert_dir,
        config_dir=config_dir,
        etcd_clusters=etcd_clusters,
        service_cluster_ip_range=service_cluster_ip_range,
        service_node_port_range=service_node_port_range,
        audit_dir=audit_dir
    )
    scheduler_service_path = KUBERNETES.gen_scheduler_service(static_dir, config_dir)
    controller_service_path = KUBERNETES.gen_controller_manager_service(
        static_dir,
        cert_dir,
        config_dir,
        service_cluster_ip_range,
        cluster_cidr,
    )
    kubelet_service_path_map = KUBERNETES.gen_kubelet_service_map(
        static_dir,
        cert_dir,
        config_dir,
        data_dir,
        list(set(all_nodes)),
        conf.PAUSE_IMAGE["3.9"]["pause"]
    )
    kubelet_config_path_map = KUBERNETES.gen_kubelet_config_map(
        static_dir,
        cert_dir,
        list(set(all_nodes)),
        cluster_dns_service_ip_list
    )
    kube_proxy_service_path_map = KUBERNETES.gen_kube_proxy_service_map(
        static_dir,
        config_dir,
        list(set(all_nodes)),
        cluster_cidr
    )

    k8s_default_allow_hosts = [
        "localhost",
        "127.0.0.1",
        svc_ip_range[1].strNormal(),
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
    ]
    k8s_allow_hosts = k8s_default_allow_hosts + masters + proxy_hosts + [apiserver_ip]

    # 集群内各组件ca及证书签发
    k8s_ca_cert, err = gen_ca_cert({
        "ca": {
            "CN": "kubernetes",  # common name
            "expiry": 876000,  # ca有效时间
            "profiles": [
                {
                    "profile": "kubernetes",
                    "expiry": 876000,  # 签发证书有效期
                    "usages": ["signing", "key encipherment", "server auth", "client auth"],  # 签发证书用途
                }
            ],  # 指定不同签发证书使用场景
        },
        "certs": [
            {
                "name": "kube-apiserver",
                "CN": "kube-apiserver",
                "hosts": k8s_allow_hosts,
                "O": "",
                "profile": "kubernetes",
            },  # apiserver服务端证书
            {
                "name": "apiserver-to-kubelet-client",
                "CN": KUBERNETES.APISERVER_TO_KUBELET_USER,
                "hosts": [],
                "O": "",
                "profile": "kubernetes",
            },  # apiserver访问kubelet的客户端证书
            {
                "name": "sa",
                "CN": "sa",
                "hosts": [],
                "O": "",
                "profile": "kubernetes",
            },  # 签署sa token的证书
            {
                "name": "kube-admin-client",
                "CN": "system:masters",
                "hosts": [],
                "O": "system:masters",
                "profile": "kubernetes",
            },  # 具有超级管理员权限的客户端证书
            {
                "name": "kube-controller-manager-client",
                "CN": "system:kube-controller-manager",
                "hosts": [],
                "O": "",
                "profile": "kubernetes",
            },  # 具有kube-controller-manager权限的客户端证书
            {
                "name": "kube-scheduler-client",
                "CN": "system:kube-scheduler",
                "hosts": [],
                "O": "",
                "profile": "kubernetes",
            },  # 具有kube-scheduler权限的客户端证书
            {
                "name": "kube-proxy-client",
                "CN": "system:kube-proxy",
                "hosts": [],
                "O": "",
                "profile": "kubernetes",
            },  # 具有kube-proxy权限的客户端证书
        ],
    })
    if err:
        return -1, err, {}

    # 扩展API等外部程序ca及证书签发
    proxy_ca_cert, err = gen_ca_cert({
        "ca": {
            "CN": "kubernetes-proxy",  # common name
            "expiry": 876000,  # ca有效时间
            "profiles": [
                {
                    "profile": "kubernetes-proxy",
                    "expiry": 876000,  # 签发证书有效期
                    "usages": ["signing", "key encipherment", "server auth", "client auth"],  # 签发证书用途
                }
            ],  # 指定不同签发证书使用场景
        },
        "certs": [
            {
                "name": "proxy-client",
                "CN": "proxy-client",
                "hosts": [],
                "O": "",
                "profile": "kubernetes-proxy",
            },  # apiserver作为代理访问扩展API的客户端证书
        ],
    })
    if err:
        return -1, err, {}

    # admin-kubeconfig生成
    admin_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "admin",  # 默认的上下文
        "config_name": "k8s-admin.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",  # 集群名称
                "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port),
                "user": "kube-admin",  # 用户名
                "context": "admin",  # 上下文
                "ca": k8s_ca_cert["cert"]["ca"],
                "cert": k8s_ca_cert["cert"]["kube-admin-client"],
                "cert_key": k8s_ca_cert["cert"]["kube-admin-client-key"],
            }
        ]
    })
    if err:
        return -1, err, {}

    # scheduler-kubeconfig生成
    scheduler_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "scheduler",  # 默认的上下文
        "config_name": "kube-scheduler.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",
                "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port),
                "user": "kube-scheduler",
                "context": "scheduler",
                "ca": k8s_ca_cert["cert"]["ca"],
                "cert": k8s_ca_cert["cert"]["kube-scheduler-client"],
                "cert_key": k8s_ca_cert["cert"]["kube-scheduler-client-key"],
            }
        ]
    })
    if err:
        return -1, err, {}

    # controller-manager-kubeconfig生成
    controller_manager_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "controller-manager",  # 默认的上下文
        "config_name": "kube-controller-manager.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",
                "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port),
                "user": "kube-controller-manager",
                "context": "controller-manager",
                "ca": k8s_ca_cert["cert"]["ca"],
                "cert": k8s_ca_cert["cert"]["kube-controller-manager-client"],
                "cert_key": k8s_ca_cert["cert"]["kube-controller-manager-client-key"],
            }
        ]
    })
    if err:
        return -1, err, {}

    # kubelet-bootstrap-kubeconfig生成
    kubelet_bootstrap_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "kubelet-bootstrap",  # 默认的上下文
        "config_name": "kubelet-bootstrap.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",
                "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port),
                "user": KUBERNETES.BOOTSTRAP_USER,
                "context": "kubelet-bootstrap",
                "ca": k8s_ca_cert["cert"]["ca"],
                "token": token,
            }
        ]
    })
    if err:
        return -1, err, {}

    # proxy-kubeconfig生成
    proxy_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "kube-proxy",  # 默认的上下文
        "config_name": "kube-proxy.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",
                "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port),
                "user": "kube-proxy",
                "context": "kube-proxy",
                "ca": k8s_ca_cert["cert"]["ca"],
                "cert": k8s_ca_cert["cert"]["kube-proxy-client"],
                "cert_key": k8s_ca_cert["cert"]["kube-proxy-client-key"],
            }
        ]
    })
    if err:
        return -1, err, {}

    cluster_connect_attr = {
        "ca": k8s_ca_cert["cert"]['ca'],
        "cert": k8s_ca_cert["cert"]['kube-admin-client'],
        "key": k8s_ca_cert["cert"]['kube-admin-client-key'],
        "apiserver": "https://{}:{}".format(apiserver_ip, apiserver_port)
    }  # 集群连接属性

    # 生成calico yaml
    calico_yaml_path = PLUGIN.gen_calico_yaml(
        static_dir,
        version,
        cluster_cidr,
        apiserver_ip,
        apiserver_port,
        network_interface,
        mode=calico_mode,
    )

    # 生成coreDNS yaml
    core_dns_yaml_path = PLUGIN.gen_core_dns_yaml(
        static_dir,
        len(masters),
        cluster_dns_service_ip_list[0],  # todo yaml module仅支持一个svc ip
        dns_resolve_list,
        upper_dns_server
    )

    # 生成nginx-ingress-controller yaml
    nginx_ingress_controller_yaml_path = PLUGIN.gen_nginx_ingress_controller_yaml(
        static_dir,
        version,
        len(masters),
    )
    config = {
        "version": version,
        "apiserver_ip": apiserver_ip,
        "apiserver_port": apiserver_port,
        "cluster_cidr": cluster_cidr,
        "service_cluster_ip_range": service_cluster_ip_range,
        "service_node_port_range": service_node_port_range,
        "cluster_dns_service_ip_list": cluster_dns_service_ip_list,
        "network_interface": network_interface,
        "calico_mode": calico_mode,
        "upper_dns_server": upper_dns_server,
        "token_path": token_path,
        "audit_config_path": audit_config_path,
        "apiserver_service_path": apiserver_service_path,
        "scheduler_service_path": scheduler_service_path,
        "controller_service_path": controller_service_path,
        "kubelet_service_path_map": kubelet_service_path_map,
        "kubelet_config_path_map": kubelet_config_path_map,
        "kube_proxy_service_path_map": kube_proxy_service_path_map,
        "k8s_ca_cert": k8s_ca_cert,
        "proxy_ca_cert": proxy_ca_cert,
        "admin_kubeconfig_path": admin_kubeconfig_path,
        "scheduler_kubeconfig_path": scheduler_kubeconfig_path,
        "controller_manager_kubeconfig_path": controller_manager_kubeconfig_path,
        "kubelet_bootstrap_kubeconfig_path": kubelet_bootstrap_kubeconfig_path,
        "proxy_kubeconfig_path": proxy_kubeconfig_path,
        "cluster_connect_attr": cluster_connect_attr,
        "etcd_clusters": etcd_clusters,
        "etcd_certs": etcd_certs,
        "calico_yaml_path": calico_yaml_path,
        "core_dns_yaml_path": core_dns_yaml_path,
        "nginx_ingress_controller_yaml_path": nginx_ingress_controller_yaml_path,
    }
    cluster = Cluster(
        name=cluster_name,
        config=config,
        type=ClusterTypeEnum.KUBERNETES
    )
    cluster.save()

    job = register_job(
        "部署k8s",
        [
            [
                AsyncTaskDeployApiserver(
                    "部署apiserver",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "cert_dir": cert_dir,
                        "master_nodes": masters,
                        "token_path": token_path,
                        "apiserver_service_path": apiserver_service_path,
                        "audit_config_path": audit_config_path,
                        "admin_kubeconfig_path": admin_kubeconfig_path,
                        "k8s_ca_cert": k8s_ca_cert,
                        "proxy_ca_cert": proxy_ca_cert,
                        "etcd_certs": etcd_certs,
                    },
                    {
                        "master_nodes": masters,
                        "apiserver_port": apiserver_port,
                        "k8s_ca_cert": k8s_ca_cert,
                    }

                ),
            ],
            [
                AsyncTaskDeployBashCompletion(
                    "部署bash_completion",
                    {
                        "nodes": masters,
                        "pkg": conf.BASH_COMPLETION_PACKAGE["2.11"],
                        "application_dir": application_dir,
                    }
                ),
            ],
            [
                AsyncTaskDeployRbac(
                    "创建集群rbac",
                    {
                        "cluster_connect_attr": cluster_connect_attr,
                    }
                ),
            ],
            [
                AsyncTaskDeployScheduler(
                    "部署scheduler",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "master_nodes": masters,
                        "scheduler_service_path": scheduler_service_path,
                        "scheduler_kubeconfig_path": scheduler_kubeconfig_path,
                    }
                ),
            ],
            [
                AsyncTaskDeployControllerManager(
                    "部署controller-manager",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "cert_dir": cert_dir,
                        "master_nodes": masters,
                        "controller_service_path": controller_service_path,
                        "controller_manager_kubeconfig_path": controller_manager_kubeconfig_path,
                        "k8s_ca_cert": k8s_ca_cert,
                    }
                ),
            ],
            [
                AsyncTaskDeployKubelet(
                    "部署kubelet",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "cert_dir": cert_dir,
                        "worker_nodes": all_nodes,
                        "kubelet_bootstrap_kubeconfig_path": kubelet_bootstrap_kubeconfig_path,
                        "kubelet_config_path_map": kubelet_config_path_map,
                        "kubelet_service_path_map": kubelet_service_path_map,
                        "k8s_ca_cert": k8s_ca_cert,
                    },
                    {
                        "worker_nodes": all_nodes,
                        "cluster_connect_attr": cluster_connect_attr,
                    }
                )
            ],
            [
                AsyncTaskInitNode(
                    "初始化节点标记",
                    {
                        "cluster_connect_attr": cluster_connect_attr,
                        "master_nodes": masters,
                        "worker_nodes": all_nodes,
                    }
                ),
            ],
            [
                AsyncTaskDeployProxy(
                    "部署proxy",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "worker_nodes": all_nodes,
                        "kube_proxy_service_path_map": kube_proxy_service_path_map,
                        "proxy_kubeconfig_path": proxy_kubeconfig_path,
                    }
                ),
            ],
            [
                AsyncTaskDeployPlugin(
                    "部署calico plugin",
                    {
                        "cluster_connect_attr": cluster_connect_attr,
                        "yaml_path": calico_yaml_path,
                    }
                ),
            ],
            [
                AsyncTaskDeployPlugin(
                    "部署coreDNS plugin",
                    {
                        "cluster_connect_attr": cluster_connect_attr,
                        "yaml_path": core_dns_yaml_path,
                    }
                ),
            ],
            [
                AsyncTaskDeployPlugin(
                    "部署nginx-ingress-controller plugin",
                    {
                        "cluster_connect_attr": cluster_connect_attr,
                        "yaml_path": nginx_ingress_controller_yaml_path,
                    }
                ),
            ],
            [
                AsyncTaskFinishCreateCluster(
                    "完成kubernetes集群部署",
                    {
                        "cluster": cluster
                    }
                ),
            ],
        ]
    )

    data = {
        "job_id": job.id,
        "config": get_static_url(config),
    }

    return 0, "", data


def job_add_node_to_kubernetes(params):
    nodes = params["nodes"]
    cluster_id = params["cluster_id"]
    application_dir = params["application_dir"]
    data_dir = params["data_dir"]
    cert_dir = params["cert_dir"]
    config_dir = params["config_dir"]

    nodes = list(set(nodes))

    try:
        cluster = Cluster.objects.get(id=cluster_id)
    except Exception as e:
        return -1, str(e), {}
    if cluster.status != ClusterStatusEnum.DONE:
        return -1, "集群状态异常", {}
    if cluster.type != ClusterTypeEnum.KUBERNETES:
        return -1, "不可加入k8s节点到非k8s集群", {}

    code, err_nodes = check_install(nodes)
    if code != 0:
        return code, "节点检查失败,{}".format(["{}-{}".format(n, str(e)) for n, e in err_nodes.items()]), {}

    cluster_config = cluster.config
    pkg = conf.KUBERNETES_PACKAGE[cluster_config["version"]]

    # 生成配置存放目录
    static_dir = gen_static_dir()
    lu_logger.debug("static dir:{}".format(static_dir))
    for ip in nodes:
        node_dir = os.path.join(static_dir, ip)
        os.mkdir(node_dir)

    kubelet_service_path_map = KUBERNETES.gen_kubelet_service_map(
        static_dir,
        cert_dir,
        config_dir,
        data_dir,
        nodes,
        conf.PAUSE_IMAGE["3.9"]["pause"]
    )
    kubelet_config_path_map = KUBERNETES.gen_kubelet_config_map(
        static_dir,
        cert_dir,
        nodes,
        cluster_config["cluster_dns_service_ip_list"],
    )

    kube_proxy_service_path_map = KUBERNETES.gen_kube_proxy_service_map(
        static_dir,
        config_dir,
        nodes,
        cluster_config["cluster_cidr"]
    )
    proxy_kubeconfig_path, err = gen_kubeconfig({
        "default_context": "kube-proxy",  # 默认的上下文
        "config_name": "kube-proxy.kubeconfig",  # kubeconfig文件名称
        "clusters": [
            {
                "cluster": "kubernetes",
                "apiserver": "https://{}:{}".format(cluster_config["apiserver_ip"], cluster_config["apiserver_port"]),
                "user": "kube-proxy",
                "context": "kube-proxy",
                "ca": cluster_config["k8s_ca_cert"]["cert"]["ca"],
                "cert": cluster_config["k8s_ca_cert"]["cert"]["kube-proxy-client"],
                "cert_key": cluster_config["k8s_ca_cert"]["cert"]["kube-proxy-client-key"],
            }
        ]
    })
    if err:
        return -1, err, {}

    config = {
        "kubelet_service_path_map": kubelet_service_path_map,
        "kubelet_config_path_map": kubelet_config_path_map,
        "kube_proxy_service_path_map": kube_proxy_service_path_map,
        "proxy_kubeconfig_path": proxy_kubeconfig_path,
    }

    job = register_job(
        "部署k8s",
        [
            [
                AsyncTaskDeployKubelet(
                    "部署kubelet",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "cert_dir": cert_dir,
                        "worker_nodes": nodes,
                        "kubelet_bootstrap_kubeconfig_path": cluster_config["kubelet_bootstrap_kubeconfig_path"],
                        "kubelet_config_path_map": kubelet_config_path_map,
                        "kubelet_service_path_map": kubelet_service_path_map,
                        "k8s_ca_cert": cluster_config["k8s_ca_cert"],
                    },
                    {
                        "worker_nodes": nodes,
                        "cluster_connect_attr": cluster_config["cluster_connect_attr"],
                    }
                )
            ],
            [
                AsyncTaskInitNode(
                    "初始化节点标记",
                    {
                        "cluster_connect_attr": cluster_config["cluster_connect_attr"],
                        "master_nodes": [],
                        "worker_nodes": nodes,
                    }
                ),
            ],
            [
                AsyncTaskDeployProxy(
                    "部署proxy",
                    {
                        "pkg": pkg,
                        "config_dir": config_dir,
                        "application_dir": application_dir,
                        "worker_nodes": nodes,
                        "kube_proxy_service_path_map": kube_proxy_service_path_map,
                        "proxy_kubeconfig_path": proxy_kubeconfig_path,
                    }
                ),
            ],
        ]
    )

    data = {
        "job_id": job.id,
        "config": get_static_url(config),
    }

    return 0, "", data
