#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import os
import shutil
import subprocess
import re
import time
from typing import Any, Dict, Iterator, List, Optional, Union

import yaml
from kubernetes import client
from kubernetes.client.rest import ApiException
from kubernetes.watch import watch

from airflow import AirflowException
from airflow.kubernetes.kube_client import get_kube_appsv1api
from airflow.patsnap.store.resource_sync import S3Resource
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults

try:
    import pwd
except ImportError:
    pass


class ArgumentParserError(Exception):
    pass


class IDataArgumentParser(argparse.ArgumentParser):
    def error(self, message):
        raise ArgumentParserError(message)


def default_args():
    content = '''
    -m yarn-cluster
    -yjm 1024
    -ytm 1024
    -ys 1
    -p 2
    '''
    return content


def parse_args(cmd: list):
    parser = IDataArgumentParser(description='flink arg parser.')
    parser.add_argument('-jc', '--jobManagerCpu', type=int, nargs='?', default=1)
    parser.add_argument('-tc', '--taskManagerCpu', type=int, nargs='?', default=1)
    parser.add_argument('-jm', '--jobManagerMemory', type=str, nargs='?', default="1G")
    parser.add_argument('-tm', '--taskManagerMemory', type=str, nargs='?', default="1G")
    parser.add_argument('-ts', '--slots', type=int, nargs='?', default=1)
    parser.add_argument('-p', '--parallelism', type=int, nargs='?', default=2)
    parser.add_argument('-c', '--class', nargs='?')
    parser.add_argument('-s', '--savepoint', type=str, nargs='?')
    parser.add_argument('-kd', action='append')
    parser.add_argument('application', nargs='+')

    args = parser.parse_args(cmd)
    data = dict()
    for key, value in args.__dict__.items():
        if key == 'yD' and value:
            config = dict()
            for pair in value:
                config[pair.split('=')[0]] = pair.split('=')[1]
            data['yD'] = config
        elif value:
            if key == 'application':
                aps = value[0].split(' ')
                data['application'] = aps[0]
                if len(aps) > 1:
                    data['application_args'] = aps[1:]
            else:
                data[key] = value
    return data


def get_user_id(name):
    for user in pwd.getpwall():
        if user[0] == name:
            return user[2]


def demote(uid, gid):
    def exe_cmd():
        os.setgid(uid)
        os.setuid(gid)

    return exe_cmd


class FlinkSubmitOperatorBak(BaseOperator):

    @apply_defaults
    def __init__(
            self,
            *,
            jobmanager: Optional[str] = None,
            jobManagerCpu: Optional[int] = None,
            taskManagerCpu: Optional[int] = None,
            jobManagerMemory: Optional[str] = None,
            taskManagerMemory: Optional[str] = None,
            slots: Optional[int] = None,
            parallelism: Optional[int] = None,
            java_class: Optional[str] = None,
            name: Optional[str] = None,
            kd: Optional[List[Dict[str, Any]]] = None,
            yarnqueue: Optional[str] = None,
            jars: Optional[str] = None,
            remote_files=None,
            proxy_user: Optional[str] = None,
            flink_binary: Optional[str] = None,
            application_args: Optional[List[Any]] = None,
            application: str = '',
            keytab: Optional[str] = None,
            principal: Optional[str] = None,
            namespace: Optional[str] = None,
            savepoint: Optional[str] = None,
            **kwargs: Any,
    ) -> None:
        super().__init__(**kwargs)
        self._master = jobmanager
        self._jobmanager_memory = jobManagerMemory
        self._taskmanager_memory = taskManagerMemory
        self._jobmanager_cpu = jobManagerCpu
        self._taskmanager_cpu = taskManagerCpu
        self._slots = slots
        self._parallelism = parallelism
        self._java_class = java_class
        self._name = name
        self._jars = jars
        self._conf = kd
        self._queue = yarnqueue
        self._remote_files = remote_files
        self._proxy_user = proxy_user
        self._flink_binary = flink_binary
        self._application_args = application_args
        self._application = application
        self._keytab = keytab
        self._principal = principal
        self._namespace = namespace
        self._savepoint = savepoint
        self._is_yarn = False
        self._is_kubernetes = True
        self._submit_fl: Optional[Any] = None
        self._yarn_application_id: Optional[str] = None
        self._should_track_driver_status = None
        self._driver_id: Optional[str] = None
        self._driver_status: Optional[str] = None
        self._flink_exit_code: Optional[int] = None

    def execute(self, context: Dict[str, Any]) -> None:
        task_id = self.task_id
        self._clean_pvc()
        target_dir = "/opt/jar_pvc/{}".format(task_id)
        if not os.path.exists(target_dir):
            os.mkdir(target_dir)
        if self._remote_files:
            sr = S3Resource()
            for file in self._remote_files:
                sr.download(file, './')
                source = file
                if "/" in file:
                    source = file.split("/")[-1]
                shutil.move(source, "/opt/jar_pvc/{}/{}".format(task_id, source))
        hostname = context.get("ti").hostname
        if self._conf is None:
            self._conf = []
        self._conf.append('kubernetes.jobmanager.labels="daemon:' + hostname + '"')
        self._conf.append('kubernetes.taskmanager.labels="daemon:' + hostname + '"')
        try:
            self.submit(self._application)
        except AirflowException as e:
            self.on_kill()
            raise AirflowException(
                "submit flink error".format(e)
            )

    def on_kill(self) -> None:
        """Kill Spark submit command"""
        self.log.debug("Kill Command is being called")
        cluster_id = self._get_cluster_id()
        try:
            v1: client.AppsV1Api = get_kube_appsv1api()
            v1.read_namespaced_deployment(name=cluster_id, namespace=self._namespace)
            v1.delete_namespaced_deployment(name=cluster_id, namespace=self._namespace)
        except ApiException as e:
            if e.status == 404:
                print(f"Deployment '{cluster_id}' does not exist in namespace '{self._namespace}'.")
            else:
                raise AirflowException(
                    "read deployment error app: {}. e is: {}.is not exist".format(
                        cluster_id, e
                    )
                )

    def get_flink_submit_command(self):
        return self.build_flink_submit_command(self._application)

    def update_application_id(self, app_id):
        if self.task_instance:
            self.task_instance.update_application_id(app_id)

    def submit(self, application: str = "", **kwargs: Any) -> None:
        """
        Remote Popen to execute the flink-submit job

        :param application: Submitted application, jar or py file
        :type application: str
        :param kwargs: extra arguments to Popen (see subprocess.Popen)
        """
        flink_submit_cmd = self.build_flink_submit_command(application)

        cwd = os.getcwd()
        res = subprocess.run('useradd %s' % self._proxy_user, shell=True)
        pid = pwd.getpwnam(self._proxy_user)
        uid = pid.pw_uid  # 获取用户id
        gid = pid.pw_gid  # 获取用户组id
        fc = demote(uid, gid)

        self._submit_fl = subprocess.Popen(
            flink_submit_cmd,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            bufsize=-1,
            # preexec_fn=fc,
            universal_newlines=True,
            cwd=cwd
        )
        self._process_flink_submit_log(iter(self._submit_fl.stdout))  # type: ignore

        time.sleep(2)
        # 监听deployment状态
        v1: client.AppsV1Api = get_kube_appsv1api()
        cluster_id = self._get_cluster_id()

        try:
            # 查询指定的 Deployment
            deployment = v1.read_namespaced_deployment(name=cluster_id, namespace=self._namespace)
        except ApiException as e:
            raise AirflowException(
                "read deployment error app: {}. e is: {}.".format(
                    cluster_id, e
                )
            )

        w = watch.Watch()
        stop = False
        while(True):
            try:
                rv = '0'
                retry_timestamp = int(time.time())
                j_retry = 0
                watch_event = w.stream(v1.list_namespaced_deployment, namespace=self._namespace,
                                       label_selector="app=" + cluster_id,
                                       resource_version=rv )
                for event in watch_event:
                    ready_replicas = event['object'].status.ready_replicas
                    event_type = event['type']
                    self.log.info("flink app running status:{}, ready_replicas:{}".format(event_type, ready_replicas))
                    if event_type == 'ERROR':
                        rv = '0'
                        self.log.info("update resource version")
                        continue
                    new_version = event['object'].metadata.resource_version
                    if not rv or int(rv) < int(new_version):
                        rv = new_version
                    if event_type == 'DELETED':
                        w.stop()
                        raise AirflowException(
                            "flink deployment DELETED app: {}.replicas:{}".format(cluster_id, ready_replicas)
                        )
                    else:
                        current_timestamp = int(time.time())
                        retry_interval = current_timestamp - retry_timestamp
                        if j_retry > 4 and retry_interval < 1800:
                            # 重试超过5次,事件间隔再半小时内杀死进程
                            # v1.delete_namespaced_deployment(namespace=self._namespace, name=cluster_id)
                            w.stop()
                            raise AirflowException(
                                "deployment is not ready: {}.".format(
                                    cluster_id
                                )
                            )
                        if event_type == 'MODIFIED' and ready_replicas == None:
                            if retry_interval > 1800:
                                retry_timestamp = current_timestamp
                                j_retry = 0
                            j_retry += 1
                if stop:
                    break
            except ApiException as e:
                self.log.error("watch deployment error app: {}".format(cluster_id))
        self.log.info("watch resource end")
        w.stop()
        self.log.debug("Should track driver: %s", self._should_track_driver_status)

    def build_flink_submit_command(self, application: str) -> str:
        """
        Construct the flink-submit command to execute.

        :param application: command to append to the flink-submit command
        :type application: str
        :return: full command to be executed
        """
        cluster_id = self._get_cluster_id()
        connection_cmd = self._get_flink_binary_path()
        connection_cmd += ["-Dkubernetes.cluster-id=" + cluster_id]
        connection_cmd += ["-Dkubernetes.namespace=" + self._namespace]
        connection_cmd += ["-Dkubernetes.service-account=flink-service-account"]
        connection_cmd += ["-Dkubernetes.pod-template-file.jobmanager=/opt/tpl/flink-pod-template.yaml"]
        connection_cmd += ["-Dkubernetes.pod-template-file.taskmanager=/opt/tpl/flink-pod-template.yaml"]
        connection_cmd += ["-Dkubernetes.rest-service.exposed.type=ClusterIP"]
        if self._jobmanager_cpu:
            j_virtual_cpu = self._jobmanager_cpu * 0.125
            connection_cmd += ["-Dkubernetes.jobmanager.cpu=" + str(j_virtual_cpu)]
        if self._taskmanager_cpu:
            t_virtual_cpu = self._taskmanager_cpu * 0.125
            connection_cmd += ["-Dkubernetes.taskmanager.cpu=" + str(t_virtual_cpu)]
        if self._jobmanager_memory:
            connection_cmd += ["-Djobmanager.memory.process.size=" + self._jobmanager_memory]
        if self._taskmanager_memory:
            connection_cmd += ["-Dtaskmanager.memory.process.size=" + self._taskmanager_memory]
        if self._slots:
            connection_cmd += ["-Dtaskmanager.numberOfTaskSlots=" + str(self._slots)]
        if self._parallelism:
            connection_cmd += ["-Dparallelism.default=" + str(self._parallelism)]
        if self._conf:
            modified_array = ['-D' + str(element) for element in self._conf]
            connection_cmd += modified_array
        if self._savepoint:
            connection_cmd += ["-s", str(self._savepoint)]
        if self._java_class:
            connection_cmd += ["-class", self._java_class]
        # The actual script to execute
        application = "local:///opt/jar_pvc/{}/{}".format(self.task_id, application)
        connection_cmd += [application]
        # Append any application arguments
        if self._application_args:
            connection_cmd += self._application_args
        self.log.info("Flink-Submit cmd: %s", self._mask_cmd(connection_cmd))
        return self._mask_cmd(connection_cmd)

    def _get_flink_binary_path(self) -> List[str]:
        connection_cmd = [self._flink_binary, "run-application", "--target", "kubernetes-application"]
        return connection_cmd

    def _process_flink_submit_log(self, itr: Iterator[Any]) -> None:
        """
        Processes the log files and extracts useful information out of it.

        If the deploy-mode is 'client', log the output of the submit command as those
        are the output logs of the flink worker directly.

        Remark: If the driver needs to be tracked for its status, the log-level of the
        flink deploy needs to be at least INFO (log4j.logger.org.apache.flink.deploy=INFO)

        :param itr: An iterator which iterates over the input of the subprocess
        """
        # Consume the iterator
        for line in itr:

            line = line.strip()
            # If we run yarn cluster mode, we want to extract the application id from
            # the logs so we can kill the application when we stop it unexpectedly
            if self._is_yarn:
                match = re.search('(application[0-9_]+)', line)
                if match and self._yarn_application_id is None:
                    self._yarn_application_id = match.groups()[0]
                    self.update_application_id(self._yarn_application_id)
                    self.log.info("Identified flink driver id: %s", self._yarn_application_id)

            # If we run Kubernetes cluster mode, we want to extract the driver pod id
            # from the logs so we can kill the application when we stop it unexpectedly
            elif self._is_kubernetes:
                match = re.search(r'successfully, JobManager Web Interface', line)
                if match:
                    self._kubernetes_driver_pod = self._get_cluster_id()
                    self.log.info("flink submit success clusterId: %s", self._kubernetes_driver_pod)
                else:
                    self._flink_exit_code = 1

            # if we run in standalone cluster mode and we want to track the driver status
            # we need to extract the driver id from the logs. This allows us to poll for
            # the status using the driver id. Also, we can kill the driver when needed.
            elif self._should_track_driver_status and not self._driver_id:
                match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
                if match_driver_id:
                    self._driver_id = match_driver_id.groups()[0]
                    self.log.info("identified flink driver id: %s", self._driver_id)

            self.log.info(line)

    def _mask_cmd(self, connection_cmd: Union[str, List[str]]) -> str:
        # Mask any password related fields in application args with key value pair
        # where key contains password (case insensitive), e.g. HivePassword='abc'
        connection_cmd_masked = re.sub(
            r"("
            r"\S*?"  # Match all non-whitespace characters before...
            r"(?:secret|password)"  # ...literally a "secret" or "password"
            # word (not capturing them).
            r"\S*?"  # All non-whitespace characters before either...
            r"(?:=|\s+)"  # ...an equal sign or whitespace characters
            # (not capturing them).
            r"(['\"]?)"  # An optional single or double quote.
            r")"  # This is the end of the first capturing group.
            r"(?:(?!\2\s).)*"  # All characters between optional quotes
            # (matched above); if the value is quoted,
            # it may contain whitespace.
            r"(\2)",  # Optional matching quote.
            r'\1******\3',
            ' '.join(connection_cmd),
            flags=re.I,
        )
        return connection_cmd_masked

    def _get_cluster_id(self):
        return "{}-flink-cluster".format(self.task_id)

    def _clean_pvc(self):
        # 删除pvc文件
        source_dir = "/opt/jar_pvc/{}".format(self.task_id)
        if os.path.exists(source_dir):
            self.log.info("clean pvc jar:{}".format(source_dir))
            shutil.rmtree(source_dir)
        else:
            self.log.info("source path is not exist:{}".format(source_dir))
