import os
import time
import uuid
import fabric
import socket
import paramiko.ssh_exception
from flask import current_app
from celery.result import AsyncResult
from flask import send_from_directory

from utils.common_import import *
from .model import *
from common.ssh.utils import SSHManager

from utils.ext import inner_db, db_add_record, db_transaction_context, db_transaction, db_update_record, db_query_record


def _check_ssh_connection(node_info):
    connect_kwargs = {"password": node_info["password"]}
    conn = fabric.Connection(host=node_info["hostip"],
                             port=node_info["sshport"],
                             user=node_info["username"],
                             connect_kwargs=connect_kwargs,
                             connect_timeout=2
                             )
    conn.config.sudo.password = node_info["password"]

    try:
        conn.open()
    except socket.timeout as e:
        logger.info("connect time out ")
        conn.close()
        return {"ok": False, "error": str(e)}
    except paramiko.ssh_exception.AuthenticationException as e:
        logger.info("Authentication failed")
        conn.close()
        return {"ok": False, "error": str(e)}
    except Exception as e:
        logger.info(str(e))
        conn.close()
        return {"ok": False, "error": str(e)}

    return {"ok": True, "error": "", "connection": conn}


def _check_node_exist(node_info):
    nodes_list = cluster_client.kube_client.get_nodes().items
    for node in nodes_list:
        host_ip = ''
        for address in node.status.addresses:
            if address.type == 'InternalIP':
                host_ip = address.address
            if node_info['host_ip'] == host_ip:
                return node.metadata.name

    return None

def label_node(node_info, label_info):
    node_name = _check_node_exist(node_info)
    if not node_name:
        return

    body = {
        "metadata": {
            "labels": label_info
        }
    }
    try:
        cluster_client.kube_client.corev1_client.patch_node(node_name, body)
    except Exception as err:
        logger.info("label_node {} {} error: {}".format(node_info, label_info, err))

class MachineConfig(Resource):
    key_list = ["host_ip", "ssh_port", "username", "password", "machine_type"]
    def _check_node_exist(self, node_info):
        nodes_list = cluster_client.kube_client.get_nodes().items
        for node in nodes_list:
            host_ip = ''
            for address in node.status.addresses:
                if address.type == 'InternalIP':
                    host_ip = address.address
                if node_info['host_ip'] == host_ip:
                    return node.metadata.name

        return None

    @exception_response()
    def get(self):
        result = db_query_record(NodeConfig)

        return make_success_response(result)

    @permission_auth("admin")
    @request_arg_check(MachineConfigAddSchema(), many=False)
    @exception_response()
    @db_transaction(inner_db.session)
    def post(self):
        node_config = NodeConfig.query.filter_by(node_ip=current_schema_instance["host_ip"]).first()
        if node_config:
            return make_error_response(status=500,
                                       code=500,
                                       message="node config already exist")
        result = _check_ssh_connection(dict(hostip=current_schema_instance["host_ip"],
                                        sshport=current_schema_instance["ssh_port"],
                                        username=current_schema_instance["username"],
                                        password=current_schema_instance["password"]))
        if not result["ok"]:
            return make_error_response(status=500, code=500, message=result["error"])

        # conn = result["connection"]
        # cmd_result = conn.run("echo $(. /etc/os-release; echo ${PRETTY_NAME})")
        # if cmd_result.exited != 0:
        #     make_error_response(status=500, code=500, message="Error happened in get os info of node")
        # conn.close()

        node_config = dict(node_ip=current_schema_instance["host_ip"],
                           ssh_port=current_schema_instance["ssh_port"],
                           ssh_user=current_schema_instance["username"],
                           ssh_password=current_schema_instance["password"],
                           )

        if "machine_type" in current_schema_instance:
            node_config["machine_type"] = current_schema_instance["machine_type"]
            label_node(current_schema_instance, dict(resourceType=node_config["machine_type"]))

        db_add_record(inner_db.session,
                      NodeConfig,
                      dict(node_ip=node_config["node_ip"]),
                      node_config)


        return make_no_data_response(status=200, code=0, message="success")

    @permission_auth("admin")
    @request_arg_check(MachineConfigAddSchema(), many=False)
    @exception_response()
    @db_transaction(inner_db.session)
    def put(self):
        node_config = NodeConfig.query.filter_by(node_ip=current_schema_instance["host_ip"]).first()
        if not node_config:
            return make_error_response(status=500,
                                       code=500,
                                       message="node config does not  exist")

        result = _check_ssh_connection(dict(hostip=current_schema_instance["host_ip"],
                                        sshport=current_schema_instance["ssh_port"],
                                        username=current_schema_instance["username"],
                                        password=current_schema_instance["password"]))
        if not result["ok"]:
            make_error_response(status=500, code=500, message=result["error"])

        conn = result["connection"]
        conn.close()

        node_config = {}
        for key in self.key_list:
            if key in current_schema_instance:
                node_config[key] = current_schema_instance[key]

        db_update_record(inner_db.session,
                      NodeConfig,
                      dict(node_ip=node_config["host_ip"]),
                      node_config)

        if "machine_type" in current_schema_instance:
            node_config["machine_type"] = current_schema_instance["machine_type"]
            label_node(node_config, dict(resourceType=node_config["machine_type"]))

        return make_no_data_response(status=200, code=0, message="success")

    @permission_auth("admin")
    @request_arg_check(MachineConfigDeleteSchema(), many=False)
    @exception_response()
    @db_transaction(inner_db.session)
    def delete(self):
        node_config = NodeConfig.query.filter_by(node_ip=current_schema_instance["host_ip"]).first()
        if not node_config:
            return make_error_response(status=500,
                                       code=500,
                                       message="node config does not exist")
        inner_db.session.delete(node_config)

        return make_no_data_response(status=200, code=0, message="success")

class K8sMachines(Resource):
    # @request_arg_check(MachineStateSchema(), many=False)
    @exception_response()
    def get(self):
        nodes = []
        least_task = query_least_task()
        for node_task in least_task:
            if not node_task["finished"]:
                nodes.append(dict(node_id=node_task["node_ip"], status="adding", create_time=node_task["create_time"]))

        nodes_list = cluster_client.kube_client.get_nodes().items
        for node in nodes_list:
            for address in node.status.addresses:
                if address.type == 'InternalIP':
                    nodes.append(dict(node_id=address.address, status="added", create_time=node.metadata.creation_timestamp))
                    continue
        return make_success_response(nodes)

    def _check_node_exist(self, node_info):
        nodes_list = cluster_client.kube_client.get_nodes().items
        for node in nodes_list:
            host_ip = ''
            for address in node.status.addresses:
                if address.type == 'InternalIP':
                    host_ip = address.address
                if node_info['host_ip'] == host_ip:
                    return node.metadata.name

        return None

    def _check_not_finished_task(self):
        time_stamp = time.time()*1000
        exist_running_task = False
        need_update_db = False

        records = NodeOpertaion.query.filter_by(finished=False).all()
        for record in records:
            result = AsyncResult(record.node_task_id, app=current_app.celery)
            if result.ready():
                record.finished = True
                record.exit_code = result.result
                record.update_time = time_stamp
                need_update_db = True
            else:
                exist_running_task = True

        if need_update_db:
            try:
                logger.info("enter db_transaction_context before commit")
                inner_db.session.commit()
            except Exception as error:
                logger.info(error)
                inner_db.session.rollback()

        return exist_running_task

    def _add_one_node(self, node_para, task_id, time_stamp):
        with db_transaction_context(inner_db.session):
            dir_path = "/node_operation/log/{0}".format(node_para["host_ip"])
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            local_path = "/node_operation/log/{0}/log-{0}".format(node_para["host_ip"])

            task_info = dict(task_id=task_id,
                             node_ip=node_para["host_ip"],
                             operation="ADD",
                             create_time=time_stamp)
            logger.info("adding node {}".format(node_para["host_ip"]))

            node_name = self._check_node_exist(node_para)
            # 节点已经在当前要加入的集群内
            if node_name:
                task_info['node_task_id'] = str(uuid.uuid4())
                task_info['finished'] = True
                task_info['exit_code'] = 500
                db_add_record(inner_db.session,
                              NodeOpertaion,
                              dict(node_task_id=task_info['node_task_id']),
                              task_info)

                with open(local_path, "w") as f:
                    f.write("Node alreay exist in cluster \n")

                return

            # 判断是否已经存在
            node_config = NodeConfig.query.filter_by(node_ip=node_para["host_ip"]).first()
            if not node_config:
                task_info['node_task_id'] = str(uuid.uuid4())
                task_info['finished'] = True
                task_info['exit_code'] = 500
                db_add_record(inner_db.session,
                              NodeOpertaion,
                              dict(node_task_id=task_info['node_task_id']),
                              task_info)

                with open(local_path, "w") as f:
                    f.write("can not find config of host \n")
                return

            node_info = dict(hostip=node_para["host_ip"],
                             sshport=node_config.ssh_port,
                             password=node_config.ssh_password,
                             username=node_config.ssh_user,
                             force=node_para["force"])

            # 检查机器是否可以登录
            result = _check_ssh_connection(node_info)
            if not result["ok"]:
                task_info['node_task_id'] = str(uuid.uuid4())
                task_info['finished'] = True
                task_info['exit_code'] = 500
                db_add_record(inner_db.session,
                              NodeOpertaion,
                              dict(node_task_id=task_info['node_task_id']),
                              task_info)

                with open(local_path, "w") as f:
                    f.write("can not collect to host \n")
                return

            # 检查是否已经加入到其他集群中
            # 获取节点上的/etc/kubernetes/kubelet.conf然后获取节点
            conn = result["connection"]
            kubelet_result = conn.run('which kubelet', hide=True)
            if kubelet_result.exited == 0 and not node_info["force"]:
                task_info['node_task_id'] = str(uuid.uuid4())
                task_info['finished'] = True
                task_info['exit_code'] = 500
                db_add_record(inner_db.session,
                              NodeOpertaion,
                              dict(node_task_id=task_info['node_task_id']),
                              task_info)

                with open(local_path, "w") as f:
                    f.write("kubelet already exist in host \n")

                return

            # 清除之前的安装
            ssh_manager = SSHManager(host=node_info["hostip"],
                                     port=node_info["sshport"],
                                     usr=node_info["username"],
                                     passwd=node_info["password"])
            if kubelet_result.exited == 0:
                #conn.sudo("kubeadm reset -f")
                ssh_manager.ssh_exec_cmd("kubeadm reset -f", sudo=True)

            # 传送脚本到待加入节点
            file_path = os.path.dirname(os.path.abspath(__file__))
            tar_file_name = "worker_node_add.tar.gz"
            local_tmp_path = os.path.join(file_path, tar_file_name)
            # conn.local('tar -czf {} -C {} {}'.format(local_tmp_path, file_path, "worker_node_add"))

            remote_dir = "/home/{}".format(node_info['username'])
            remote_file_path = "{}/{}".format(remote_dir, tar_file_name)
            conn.put(local_tmp_path, remote_file_path)

            conn.run('tar -xzvf {} -C {}'.format(remote_file_path, remote_dir))
            # conn.local('rm -f {}'.format(local_tmp_path))

            # 判断机器规格是否存在
            import copy
            node_info = copy.deepcopy(node_info)
            async_result = current_app.celery.send_task('app.machine.add_node', args=(node_info,))
            logger.info(async_result.id)

            task_info["node_task_id"] = async_result.id
            db_add_record(inner_db.session,
                          NodeOpertaion,
                          dict(node_task_id=task_info["node_task_id"]),
                          task_info)


    @permission_auth("admin")
    @request_arg_check(MachineAddSchema(), many=True)
    @exception_response()
    def post(self):
        # 判断是否有未结束的任务，当前只允许同时有一个任务在运行
        if self._check_not_finished_task():
            return make_error_response(status=500,
                                       code=500,
                                       message="some task is still running, please check")

        time_stamp = time.time() * 1000
        task_id = str(uuid.uuid4())
        for node_info in current_schema_instance:
            self._add_one_node(node_info, task_id, time_stamp)

        return make_no_data_response(status=200, code=0, message="submit adding node task successfully")

    # @jwt_required()
    @permission_auth("admin")
    @request_arg_check(MachineDeleteSchema(), many=False)
    @exception_response()
    @db_transaction(inner_db.session)
    def delete(self):
        node_name = self._check_node_exist(current_schema_instance)
        # 节点不在集群内
        if not node_name:
            return make_error_response(status=500,
                                       code=500,
                                       message="node:{} does not exist".format(
                                           current_schema_instance["hostip"]))

        # 不允许删除k8s-master节点
        master_nodes = cluster_client.kube_client.get_nodes(label_selector="node-role.kubernetes.io/master")
        master_nodes_list = master_nodes.items
        for node in master_nodes_list:
            for address in node.status.addresses:
                if address.type == 'InternalIP' \
                        and address.address == current_schema_instance['host_ip']:
                    return make_error_response(status=500, code=500,
                                               message="k8s master node can not be deleted")

        # 节点复原
        node_config = NodeConfig.query.filter_by(node_ip=current_schema_instance['host_ip']).first()
        if not node_config:
            return make_error_response(status=500, code=500,
                                       message="can not find config info")

        ssh_manager = SSHManager(host=node_config.node_ip,
                                 port=node_config.ssh_port,
                                 usr=node_config.ssh_user,
                                 passwd=node_config.ssh_password)

        try:
            ssh_manager.ssh_exec_cmd("kubeadm reset -f", sudo=True)
        except Exception as e:
            return make_error_response(status=500,
                                       code=500,
                                       message=str(e))

        # 删除Node资源记录
        cluster_client.kube_client.corev1_client.delete_node(node_name)

        return make_no_data_response(status=200, code=0, message="success")

class LogDownload(Resource):
    @request_arg_check(DownloadArgsSchema(), many=False, location="args")
    @exception_response()
    def get(self):
        request_info = current_schema_instance
        record = NodeOpertaion.query.filter_by(id=request_info["id"]).first()
        if not record:
            return make_error_response(status=403, code=403, message="record id:{} not found".format(request_info["id"]))

        log_dir = "/node_operation/log/{0}".format(record.node_ip)
        log_path = "/node_operation/log/{0}/log-{0}".format(record.node_ip)
        if os.path.exists(log_path):
            logger.info("sending file {}".format(log_path))
            return send_from_directory(log_dir, "log-{0}".format(record.node_ip), as_attachment=True)

        logger.info("file {} not found".format(log_path))
        return make_error_response(status=403, code=403, message="file not found")


def query_least_task():
    # 当前只展示最新一次增加节点的任务
    # 判断是否有未结束的任务，当前只允许同时有一个任务在运行
    need_update_db = False
    time_stamp = time.time() * 1000

    records = NodeOpertaion.query.filter_by(finished=False).all()
    for record in records:
        result = AsyncResult(record.node_task_id, app=current_app.celery)
        if not result.ready():
            continue
        record.finished = True
        record.exit_code = result.result
        record.update_time = time_stamp
        need_update_db = True

        if  record.exit_code != 0:
            continue
        node_config = NodeConfig.query.filter_by(node_ip=record.node_ip).first()
        if node_config:
            label_node(dict(host_ip=record.node_ip), dict(resourceType=node_config.machine_type))

    if need_update_db:
        try:
            logger.info("enter db_transaction_context before commit")
            inner_db.session.commit()
        except Exception as error:
            logger.info(error)
            inner_db.session.rollback()

    last_record = NodeOpertaion.query.order_by(NodeOpertaion.id.desc()).first()
    if last_record:
        records = NodeOpertaion.query.filter_by(task_id=last_record.task_id).all()

        nodes_task = []
        for record in records:
            nodes_task.append(dict(id=record.id,
                                   create_time=record.create_time,
                                   node_ip=record.node_ip,
                                   finished=record.finished,
                                   exit_code=record.exit_code))

        return nodes_task

    return []

class K8sNodesTask(Resource):
    @exception_response()
    def get(self):
        least_task = query_least_task()

        return make_success_response(data=least_task)