#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import os
import subprocess
import re
from typing import Any, Dict, Iterator, List, Optional, Union
from airflow.configuration import conf as airflow_conf
from airflow import AirflowException
from airflow.patsnap.store.resource_sync import S3Resource
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.security.kerberos import renew_from_kt

try:
    import pwd
except ImportError:
    pass


class ArgumentParserError(Exception):
    pass


class IDataArgumentParser(argparse.ArgumentParser):
    def error(self, message):
        raise ArgumentParserError(message)


def default_args():
    content = '''
    -m yarn-cluster
    -yjm 1024
    -ytm 1024
    -ys 1
    -p 2
    '''
    return content


def parse_args(cmd: list):
    parser = IDataArgumentParser(description='flink arg parser.')

    parser.add_argument('-m', '--jobmanager', nargs='?', default='yarn-cluster')
    parser.add_argument('-yjm', '--yarnjobManagerMemory', type=int, nargs='?', default=1024)
    parser.add_argument('-ytm', '--yarntaskManagerMemory', type=int, nargs='?', default=1024)
    parser.add_argument('-ys', '--yarnslots', type=int, nargs='?', default=1)
    parser.add_argument('-p', '--parallelism', type=int , nargs='?', default=2)
    parser.add_argument('-c', '--class', nargs='?')
    parser.add_argument('-ynm', '--yarnname', nargs='?')
    parser.add_argument('-yD', nargs='?', action='append')
    parser.add_argument('-yqu', '--yarnqueue', nargs='?')
    parser.add_argument('application', nargs='+')

    args = parser.parse_args(cmd)
    data = dict()
    for key, value in args.__dict__.items():
        if key == 'yD' and value:
            config = dict()
            for pair in value:
                config[pair.split('=')[0]] = pair.split('=')[1]
            data['yD'] = config
        elif value:
            if key == 'application':
                data['application'] = value[0]
                if len(value) > 1:
                    data['application_args'] = value[1:]
            else:
                data[key] = value

    return data


def get_user_id(name):
    for user in pwd.getpwall():
        if user[0] == name:
            return user[2]


def demote(uid, gid):
    def exe_cmd():
        os.setgid(uid)
        os.setuid(gid)

    return exe_cmd


class DataFactoryOperator(BaseOperator):

    @apply_defaults
    def __init__(
            self,
            *,
            jobmanager: Optional[str] = None,
            yarnjobManagerMemory: Optional[int] = None,
            yarntaskManagerMemory: Optional[int] = None,
            yarnslots: Optional[int] = None,
            parallelism: Optional[int] = None,
            java_class: Optional[str] = None,
            yarnname: Optional[str] = None,
            yD: Optional[Dict[str, Any]] = None,
            yarnqueue: Optional[str] = None,
            jars: Optional[str] = None,
            remote_files=None,
            proxy_user: Optional[str] = None,
            flink_binary: Optional[str] = None,
            application_args: Optional[List[Any]] = None,
            application: str = '',
            keytab: Optional[str] = None,
            principal: Optional[str] = None,
            **kwargs: Any,
    ) -> None:
        super().__init__(**kwargs)
        self._master = jobmanager
        self._jobmanager_memory = yarnjobManagerMemory
        self._taskmanager_memory = yarntaskManagerMemory
        self._slots = yarnslots
        self._parallelism = parallelism
        self._java_class = java_class
        self._name = yarnname
        self._jars = jars
        self._conf = yD
        self._queue = yarnqueue
        self._remote_files = remote_files
        self._proxy_user = proxy_user
        self._flink_binary = flink_binary
        self._application_args = application_args
        self._application = application
        self._keytab = keytab
        self._principal = principal
        self._is_yarn = True
        self._is_kubernetes = None
        self._submit_fl: Optional[Any] = None
        self._yarn_application_id: Optional[str] = None
        self._should_track_driver_status = None
        self._driver_id: Optional[str] = None
        self._driver_status: Optional[str] = None
        self._flink_exit_code: Optional[int] = None

    def execute(self, context: Dict[str, Any]) -> None:
        if self._remote_files:
            sr = S3Resource()
            for file in self._remote_files:
                sr.download(file, './')
        self.submit(self._application)

    def on_kill(self) -> None:
        """Kill Spark submit command"""
        self.log.debug("Kill Command is being called")

        if self._submit_fl and self._submit_fl.poll() is None:
            self.log.info('Sending kill signal to %s', '')
            self._submit_fl.kill()
            if self._yarn_application_id:
                kill_cmd = f"yarn application -kill {self._yarn_application_id}".split()
                env = None
                if self._keytab is not None and self._principal is not None:
                    # we are ignoring renewal failures from renew_from_kt
                    # here as the failure could just be due to a non-renewable ticket,
                    # we still attempt to kill the yarn application
                    renew_from_kt(self._principal, self._keytab, exit_on_fail=False)
                    env = os.environ.copy()
                    env["KRB5CCNAME"] = airflow_conf.get('kerberos', 'ccache')
                yarn_kill = subprocess.Popen(
                    kill_cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
                )

                self.log.info("YARN app killed with return code: %s", yarn_kill.wait())
            else:
                self.log.error('Killing pod %s only support yarn', self._kubernetes_driver_pod)

        else:
            self.log.error("_submit_fl is None")

    def get_flink_submit_command(self):
        return self.build_flink_submit_command(self._application)

    def update_application_id(self, app_id):
        if self.task_instance:
            self.task_instance.update_application_id(app_id)

    def submit(self, application: str = "", **kwargs: Any) -> None:
        """
        Remote Popen to execute the flink-submit job

        :param application: Submitted application, jar or py file
        :type application: str
        :param kwargs: extra arguments to Popen (see subprocess.Popen)
        """
        flink_submit_cmd = self.build_flink_submit_command(application)

        cwd = os.getcwd()
        uid = get_user_id(self._proxy_user)
        fc = demote(1000, 1000)

        if self._proxy_user and uid:
            fc = demote(uid, uid)

        self._submit_fl = subprocess.Popen(
            flink_submit_cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            bufsize=-1,
            preexec_fn=fc,
            universal_newlines=True,
            cwd=cwd
        )

        self._process_flink_submit_log(iter(self._submit_fl.stdout))  # type: ignore
        returncode = self._submit_fl.wait()

        # Check flink-submit return code. In Kubernetes mode, also check the value
        # of exit code in the log, as it may differ.
        if returncode or (self._is_kubernetes and self._flink_exit_code != 0):
            if self._is_kubernetes:
                raise AirflowException(
                    "Cannot execute: {}. Error code is: {}. Kubernetes flink exit code is: {}".format(
                        self._mask_cmd(flink_submit_cmd), returncode, self._flink_exit_code
                    )
                )
            else:
                raise AirflowException(
                    "Cannot execute: {}. Error code is: {}.".format(
                        self._mask_cmd(flink_submit_cmd), returncode
                    )
                )

        self.log.debug("Should track driver: %s", self._should_track_driver_status)

    def build_flink_submit_command(self, application: str) -> List[str]:
        """
        Construct the flink-submit command to execute.

        :param application: command to append to the flink-submit command
        :type application: str
        :return: full command to be executed
        """
        connection_cmd = self._get_flink_binary_path()

        if self._master:
            connection_cmd += ["-m", self._master]
        else:
            connection_cmd += ["-m", "yarn-cluster"]

        if self._conf:
            tmpl = "{}={}"
            # Allow dynamic setting of hadoop/yarn configuration environments
            for key in self._conf:
                connection_cmd += ["-yD", tmpl.format(key, str(self._conf[key]))]

        if self._jobmanager_memory:
            connection_cmd += ["-yjm", str(self._jobmanager_memory)]
        if self._taskmanager_memory:
            connection_cmd += ["-ytm", str(self._taskmanager_memory)]
        if self._slots:
            connection_cmd += ["-ys", str(self._slots)]
        if self._parallelism:
            connection_cmd += ["-p", str(self._parallelism)]
        if self._java_class:
            connection_cmd += ["-c", self._java_class]
        if self._name:
            connection_cmd += ["-ynm", self._name]
        if self._queue:
            connection_cmd += ["-yqu", self._queue]
        if self._jars:
            connection_cmd += ["-yj", self._jars]
        # The actual script to execute
        connection_cmd += [application]
        # Append any application arguments
        if self._application_args:
            connection_cmd += self._application_args
        self.log.info("Flink-Submit cmd: %s", self._mask_cmd(connection_cmd))
        return connection_cmd

    def _get_flink_binary_path(self) -> List[str]:
        connection_cmd = [self._flink_binary, "run"]
        return connection_cmd

    def _process_flink_submit_log(self, itr: Iterator[Any]) -> None:
        """
        Processes the log files and extracts useful information out of it.

        If the deploy-mode is 'client', log the output of the submit command as those
        are the output logs of the flink worker directly.

        Remark: If the driver needs to be tracked for its status, the log-level of the
        flink deploy needs to be at least INFO (log4j.logger.org.apache.flink.deploy=INFO)

        :param itr: An iterator which iterates over the input of the subprocess
        """
        # Consume the iterator
        for line in itr:

            line = line.strip()
            # If we run yarn cluster mode, we want to extract the application id from
            # the logs so we can kill the application when we stop it unexpectedly
            if self._is_yarn:
                match = re.search('(application[0-9_]+)', line)
                if match and self._yarn_application_id is None:
                    self._yarn_application_id = match.groups()[0]
                    self.update_application_id(self._yarn_application_id)
                    self.log.info("Identified flink driver id: %s", self._yarn_application_id)

            # If we run Kubernetes cluster mode, we want to extract the driver pod id
            # from the logs so we can kill the application when we stop it unexpectedly
            elif self._is_kubernetes:
                match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
                if match:
                    self._kubernetes_driver_pod = match.groups()[0]
                    self.log.info("Identified flink driver pod: %s", self._kubernetes_driver_pod)

                # Store the flink Exit code
                match_exit_code = re.search(r'\s*[eE]xit code: (\d+)', line)
                if match_exit_code:
                    self._flink_exit_code = int(match_exit_code.groups()[0])

            # if we run in standalone cluster mode and we want to track the driver status
            # we need to extract the driver id from the logs. This allows us to poll for
            # the status using the driver id. Also, we can kill the driver when needed.
            elif self._should_track_driver_status and not self._driver_id:
                match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
                if match_driver_id:
                    self._driver_id = match_driver_id.groups()[0]
                    self.log.info("identified flink driver id: %s", self._driver_id)

            self.log.info(line)

    def _mask_cmd(self, connection_cmd: Union[str, List[str]]) -> str:
        # Mask any password related fields in application args with key value pair
        # where key contains password (case insensitive), e.g. HivePassword='abc'
        connection_cmd_masked = re.sub(
            r"("
            r"\S*?"  # Match all non-whitespace characters before...
            r"(?:secret|password)"  # ...literally a "secret" or "password"
            # word (not capturing them).
            r"\S*?"  # All non-whitespace characters before either...
            r"(?:=|\s+)"  # ...an equal sign or whitespace characters
            # (not capturing them).
            r"(['\"]?)"  # An optional single or double quote.
            r")"  # This is the end of the first capturing group.
            r"(?:(?!\2\s).)*"  # All characters between optional quotes
            # (matched above); if the value is quoted,
            # it may contain whitespace.
            r"(\2)",  # Optional matching quote.
            r'\1******\3',
            ' '.join(connection_cmd),
            flags=re.I,
        )
