code_raw
stringlengths 157
9.69k
| code_clean
stringlengths 115
4.07k
| docstring
stringlengths 8
6.05k
|
---|---|---|
def delete_function(self, name):
"""
Deletes the specified Cloud Function.
:param name: The name of the function.
:type name: str
:return: None
"""
response = self.get_conn().projects().locations().functions().delete(
name=name).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name) | def delete_function(self, name):
response = self.get_conn().projects().locations().functions().delete(
name=name).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name) | Deletes the specified Cloud Function.
:param name: The name of the function.
:type name: str
:return: None |
def get_dep_statuses(self, ti, session, dep_context=None):
"""
Wrapper around the private _get_dep_statuses method that contains some global
checks for all dependencies.
:param ti: the task instance to get the dependency status for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: the context for which this dependency should be evaluated for
:type dep_context: DepContext
"""
# this avoids a circular dependency
from airflow.ti_deps.dep_context import DepContext
if dep_context is None:
dep_context = DepContext()
if self.IGNOREABLE and dep_context.ignore_all_deps:
yield self._passing_status(
reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and dep_context.ignore_task_deps:
yield self._passing_status(
reason="Context specified all task dependencies should be ignored.")
return
for dep_status in self._get_dep_statuses(ti, session, dep_context):
yield dep_status | def get_dep_statuses(self, ti, session, dep_context=None):
# this avoids a circular dependency
from airflow.ti_deps.dep_context import DepContext
if dep_context is None:
dep_context = DepContext()
if self.IGNOREABLE and dep_context.ignore_all_deps:
yield self._passing_status(
reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and dep_context.ignore_task_deps:
yield self._passing_status(
reason="Context specified all task dependencies should be ignored.")
return
for dep_status in self._get_dep_statuses(ti, session, dep_context):
yield dep_status | Wrapper around the private _get_dep_statuses method that contains some global
checks for all dependencies.
:param ti: the task instance to get the dependency status for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: the context for which this dependency should be evaluated for
:type dep_context: DepContext |
def is_met(self, ti, session, dep_context=None):
"""
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) | def is_met(self, ti, session, dep_context=None):
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) | Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext |
def get_failure_reasons(self, ti, session, dep_context=None):
"""
Returns an iterable of strings that explain why this dependency wasn't met.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
for dep_status in self.get_dep_statuses(ti, session, dep_context):
if not dep_status.passed:
yield dep_status.reason | def get_failure_reasons(self, ti, session, dep_context=None):
for dep_status in self.get_dep_statuses(ti, session, dep_context):
if not dep_status.passed:
yield dep_status.reason | Returns an iterable of strings that explain why this dependency wasn't met.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext |
def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
config = configparser.ConfigParser()
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
except Exception:
logging.warning("Option Error in parsing s3 config file")
raise
return access_key, secret_key | def _parse_s3_config(config_file_name, config_format='boto', profile=None):
config = configparser.ConfigParser()
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
except Exception:
logging.warning("Option Error in parsing s3 config file")
raise
return access_key, secret_key | Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str |
def get_credentials(self, region_name=None):
"""Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token.
"""
session, _ = self._get_credentials(region_name)
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
return session.get_credentials().get_frozen_credentials() | def get_credentials(self, region_name=None):
session, _ = self._get_credentials(region_name)
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
return session.get_credentials().get_frozen_credentials() | Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token. |
def get_conn(self):
"""
Returns verticaql connection object
"""
conn = self.get_connection(self.vertica_conn_id)
conn_config = {
"user": conn.login,
"password": conn.password or '',
"database": conn.schema,
"host": conn.host or 'localhost'
}
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
conn = connect(**conn_config)
return conn | def get_conn(self):
conn = self.get_connection(self.vertica_conn_id)
conn_config = {
"user": conn.login,
"password": conn.password or '',
"database": conn.schema,
"host": conn.host or 'localhost'
}
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
conn = connect(**conn_config)
return conn | Returns verticaql connection object |
def flush(self):
"""
Ensure all logging output has been flushed
"""
if len(self._buffer) > 0:
self.logger.log(self.level, self._buffer)
self._buffer = str() | def flush(self):
if len(self._buffer) > 0:
self.logger.log(self.level, self._buffer)
self._buffer = str() | Ensure all logging output has been flushed |
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc | def correct_maybe_zipped(fileloc):
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc | If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned. |
def list_py_file_paths(directory, safe_mode=True,
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns += [re.compile(p) for p in f.read().split('\n') if p]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths | def list_py_file_paths(directory, safe_mode=True,
include_examples=None):
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns += [re.compile(p) for p in f.read().split('\n') if p]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths | Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode] |
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti | def construct_task_instance(self, session=None, lock_for_update=False):
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti | Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed. |
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._process = self._launch_process(self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._child_signal_conn,
self._stat_queue,
self._result_queue,
self._async_mode)
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid) | def start(self):
self._process = self._launch_process(self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._child_signal_conn,
self._stat_queue,
self._result_queue,
self._async_mode)
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid) | Launch DagFileProcessorManager processor and start DAG parsing loop in manager. |
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
self.log.info("Sending termination message to manager.")
self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER) | def terminate(self):
self.log.info("Sending termination message to manager.")
self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER) | Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors. |
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK) | def _exit_gracefully(self, signum, frame):
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK) | Helper method to clean up DAG file processors to avoid leaving orphan processes. |
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
if self._async_mode:
self.log.debug("Starting DagFileProcessorManager in async mode")
self.start_in_async()
else:
self.log.debug("Starting DagFileProcessorManager in sync mode")
self.start_in_sync() | def start(self):
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
if self._async_mode:
self.log.debug("Starting DagFileProcessorManager in async mode")
self.start_in_async()
else:
self.log.debug("Starting DagFileProcessorManager in sync mode")
self.start_in_sync() | Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code. |
def start_in_async(self):
"""
Parse DAG files repeatedly in a standalone loop.
"""
while True:
loop_start_time = time.time()
if self._signal_conn.poll():
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
max_runs_reached,
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length)
time.sleep(sleep_length) | def start_in_async(self):
while True:
loop_start_time = time.time()
if self._signal_conn.poll():
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
max_runs_reached,
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length)
time.sleep(sleep_length) | Parse DAG files repeatedly in a standalone loop. |
def start_in_sync(self):
"""
Parse DAG files in a loop controlled by DagParsingSignal.
Actual DAG parsing loop will run once upon receiving one
agent heartbeat message and will report done when finished the loop.
"""
while True:
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
self.max_runs_reached(),
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
self.wait_until_finished()
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
break | def start_in_sync(self):
while True:
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
self.max_runs_reached(),
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
self.wait_until_finished()
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
break | Parse DAG files in a loop controlled by DagParsingSignal.
Actual DAG parsing loop will run once upon receiving one
agent heartbeat message and will report done when finished the loop. |
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors") | def _refresh_dag_dir(self):
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors") | Refresh file paths from dag dir if we haven't done it for too long. |
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow() | def _print_stat(self):
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow() | Occasionally print out stats about how fast the files are getting processed |
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit() | def clear_nonexistent_import_errors(self, session):
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit() | Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session |
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str) | def _log_file_processing_stats(self, known_file_paths):
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str) | Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None |
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors | def set_file_paths(self, new_file_paths):
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors | Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None |
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1) | def wait_until_finished(self):
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1) | Sleeps until all the processors are done. |
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
zombies = self._find_zombies()
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, zombies)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags | def heartbeat(self):
finished_processors = {}
:type : dict[unicode, AbstractDagFileProcessor]
running_processors = {}
:type : dict[unicode, AbstractDagFileProcessor]
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
zombies = self._find_zombies()
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, zombies)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags | This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag] |
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait() | def end(self):
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait() | Kill all child processes on exit since we don't want to leave
them as orphaned. |
def get_conn(self):
"""
Opens a ssh connection to the remote host.
:rtype: paramiko.client.SSHClient
"""
self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id)
client = paramiko.SSHClient()
if not self.allow_host_key_change:
self.log.warning('Remote Identification Change is not verified. '
'This wont protect against Man-In-The-Middle attacks')
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning('No Host Key Verification. This wont protect '
'against Man-In-The-Middle attacks')
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password and self.password.strip():
client.connect(hostname=self.remote_host,
username=self.username,
password=self.password,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
else:
client.connect(hostname=self.remote_host,
username=self.username,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
if self.keepalive_interval:
client.get_transport().set_keepalive(self.keepalive_interval)
self.client = client
return client | def get_conn(self):
self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id)
client = paramiko.SSHClient()
if not self.allow_host_key_change:
self.log.warning('Remote Identification Change is not verified. '
'This wont protect against Man-In-The-Middle attacks')
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning('No Host Key Verification. This wont protect '
'against Man-In-The-Middle attacks')
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password and self.password.strip():
client.connect(hostname=self.remote_host,
username=self.username,
password=self.password,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
else:
client.connect(hostname=self.remote_host,
username=self.username,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
if self.keepalive_interval:
client.get_transport().set_keepalive(self.keepalive_interval)
self.client = client
return client | Opens a ssh connection to the remote host.
:rtype: paramiko.client.SSHClient |
def create_transfer_job(self, body):
"""
Creates a transfer job that runs periodically.
:param body: (Required) A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:type body: dict
:return: transfer job.
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:rtype: dict
"""
body = self._inject_project_id(body, BODY, PROJECT_ID)
return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries) | def create_transfer_job(self, body):
body = self._inject_project_id(body, BODY, PROJECT_ID)
return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries) | Creates a transfer job that runs periodically.
:param body: (Required) A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:type body: dict
:return: transfer job.
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:rtype: dict |
def get_transfer_job(self, job_name, project_id=None):
"""
Gets the latest state of a long-running operation in Google Storage
Transfer Service.
:param job_name: (Required) Name of the job to be fetched
:type job_name: str
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the GCP
connection is used.
:type project_id: str
:return: Transfer Job
:rtype: dict
"""
return (
self.get_conn()
.transferJobs()
.get(jobName=job_name, projectId=project_id)
.execute(num_retries=self.num_retries)
) | def get_transfer_job(self, job_name, project_id=None):
return (
self.get_conn()
.transferJobs()
.get(jobName=job_name, projectId=project_id)
.execute(num_retries=self.num_retries)
) | Gets the latest state of a long-running operation in Google Storage
Transfer Service.
:param job_name: (Required) Name of the job to be fetched
:type job_name: str
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the GCP
connection is used.
:type project_id: str
:return: Transfer Job
:rtype: dict |
def list_transfer_job(self, filter):
"""
Lists long-running operations in Google Storage Transfer
Service that match the specified filter.
:param filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
:type filter: dict
:return: List of Transfer Jobs
:rtype: list[dict]
"""
conn = self.get_conn()
filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID)
request = conn.transferJobs().list(filter=json.dumps(filter))
jobs = []
while request is not None:
response = request.execute(num_retries=self.num_retries)
jobs.extend(response[TRANSFER_JOBS])
request = conn.transferJobs().list_next(previous_request=request, previous_response=response)
return jobs | def list_transfer_job(self, filter):
conn = self.get_conn()
filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID)
request = conn.transferJobs().list(filter=json.dumps(filter))
jobs = []
while request is not None:
response = request.execute(num_retries=self.num_retries)
jobs.extend(response[TRANSFER_JOBS])
request = conn.transferJobs().list_next(previous_request=request, previous_response=response)
return jobs | Lists long-running operations in Google Storage Transfer
Service that match the specified filter.
:param filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
:type filter: dict
:return: List of Transfer Jobs
:rtype: list[dict] |
def update_transfer_job(self, job_name, body):
"""
Updates a transfer job that runs periodically.
:param job_name: (Required) Name of the job to be updated
:type job_name: str
:param body: A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:type body: dict
:return: If successful, TransferJob.
:rtype: dict
"""
body = self._inject_project_id(body, BODY, PROJECT_ID)
return (
self.get_conn()
.transferJobs()
.patch(jobName=job_name, body=body)
.execute(num_retries=self.num_retries)
) | def update_transfer_job(self, job_name, body):
body = self._inject_project_id(body, BODY, PROJECT_ID)
return (
self.get_conn()
.transferJobs()
.patch(jobName=job_name, body=body)
.execute(num_retries=self.num_retries)
) | Updates a transfer job that runs periodically.
:param job_name: (Required) Name of the job to be updated
:type job_name: str
:param body: A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:type body: dict
:return: If successful, TransferJob.
:rtype: dict |
def delete_transfer_job(self, job_name, project_id):
"""
Deletes a transfer job. This is a soft delete. After a transfer job is
deleted, the job and all the transfer executions are subject to garbage
collection. Transfer jobs become eligible for garbage collection
30 days after soft delete.
:param job_name: (Required) Name of the job to be deleted
:type job_name: str
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the GCP
connection is used.
:type project_id: str
:rtype: None
"""
return (
self.get_conn()
.transferJobs()
.patch(
jobName=job_name,
body={
PROJECT_ID: project_id,
TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED},
TRANSFER_JOB_FIELD_MASK: STATUS1,
},
)
.execute(num_retries=self.num_retries)
) | def delete_transfer_job(self, job_name, project_id):
return (
self.get_conn()
.transferJobs()
.patch(
jobName=job_name,
body={
PROJECT_ID: project_id,
TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED},
TRANSFER_JOB_FIELD_MASK: STATUS1,
},
)
.execute(num_retries=self.num_retries)
) | Deletes a transfer job. This is a soft delete. After a transfer job is
deleted, the job and all the transfer executions are subject to garbage
collection. Transfer jobs become eligible for garbage collection
30 days after soft delete.
:param job_name: (Required) Name of the job to be deleted
:type job_name: str
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the GCP
connection is used.
:type project_id: str
:rtype: None |
def cancel_transfer_operation(self, operation_name):
"""
Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries) | def cancel_transfer_operation(self, operation_name):
self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries) | Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None |
def pause_transfer_operation(self, operation_name):
"""
Pauses an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries) | def pause_transfer_operation(self, operation_name):
self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries) | Pauses an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None |
def resume_transfer_operation(self, operation_name):
"""
Resumes an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries) | def resume_transfer_operation(self, operation_name):
self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries) | Resumes an transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:rtype: None |
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60):
"""
Waits until the job reaches the expected state.
:param job: Transfer job
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:type job: dict
:param expected_statuses: State that is expected
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:type expected_statuses: set[str]
:param timeout:
:type timeout: time in which the operation must end in seconds
:rtype: None
"""
while timeout > 0:
operations = self.list_transfer_operations(
filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]}
)
if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses):
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
timeout -= TIME_TO_SLEEP_IN_SECONDS
raise AirflowException("Timeout. The operation could not be completed within the allotted time.") | def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60):
while timeout > 0:
operations = self.list_transfer_operations(
filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]}
)
if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses):
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
timeout -= TIME_TO_SLEEP_IN_SECONDS
raise AirflowException("Timeout. The operation could not be completed within the allotted time.") | Waits until the job reaches the expected state.
:param job: Transfer job
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:type job: dict
:param expected_statuses: State that is expected
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:type expected_statuses: set[str]
:param timeout:
:type timeout: time in which the operation must end in seconds
:rtype: None |
def find_for_task_instance(task_instance, session):
"""
Returns all task reschedules for the task instance and try number,
in ascending order.
:param task_instance: the task instance to find task reschedules for
:type task_instance: airflow.models.TaskInstance
"""
TR = TaskReschedule
return (
session
.query(TR)
.filter(TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.execution_date == task_instance.execution_date,
TR.try_number == task_instance.try_number)
.order_by(asc(TR.id))
.all()
) | def find_for_task_instance(task_instance, session):
TR = TaskReschedule
return (
session
.query(TR)
.filter(TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.execution_date == task_instance.execution_date,
TR.try_number == task_instance.try_number)
.order_by(asc(TR.id))
.all()
) | Returns all task reschedules for the task instance and try number,
in ascending order.
:param task_instance: the task instance to find task reschedules for
:type task_instance: airflow.models.TaskInstance |
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
from airflow.models.taskinstance import \
TaskInstance as TI # Avoid circular import
used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter(
TI.state.in_([State.RUNNING, State.QUEUED])).scalar()
return self.slots - used_slots | def open_slots(self, session):
from airflow.models.taskinstance import \
TaskInstance as TI # Avoid circular import
used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter(
TI.state.in_([State.RUNNING, State.QUEUED])).scalar()
return self.slots - used_slots | Returns the number of slots open at the moment |
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore')
for stream in process.communicate()]
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output | def run_command(command):
process = subprocess.Popen(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore')
for stream in process.communicate()]
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output | Runs command and returns stdout |
def remove_option(self, section, option, remove_default=True):
"""
Remove an option if it exists in config from a file or
default config. If both of config have the same option, this removes
the option in both configs unless remove_default=False.
"""
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option) | def remove_option(self, section, option, remove_default=True):
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option) | Remove an option if it exists in config from a file or
default config. If both of config have the same option, this removes
the option in both configs unless remove_default=False. |
def getsection(self, section):
"""
Returns the section as a dict. Values are converted to int, float, bool
as required.
:param section: section from the config
:rtype: dict
"""
if (section not in self._sections and
section not in self.airflow_defaults._sections):
return None
_section = copy.deepcopy(self.airflow_defaults._sections[section])
if section in self._sections:
_section.update(copy.deepcopy(self._sections[section]))
section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper())
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, '').lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in iteritems(_section):
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
if val.lower() in ('t', 'true'):
val = True
elif val.lower() in ('f', 'false'):
val = False
_section[key] = val
return _section | def getsection(self, section):
if (section not in self._sections and
section not in self.airflow_defaults._sections):
return None
_section = copy.deepcopy(self.airflow_defaults._sections[section])
if section in self._sections:
_section.update(copy.deepcopy(self._sections[section]))
section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper())
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, '').lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in iteritems(_section):
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
if val.lower() in ('t', 'true'):
val = True
elif val.lower() in ('f', 'false'):
val = False
_section[key] = val
return _section | Returns the section as a dict. Values are converted to int, float, bool
as required.
:param section: section from the config
:rtype: dict |
def allocate_ids(self, partial_keys):
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list
"""
conn = self.get_conn()
resp = (conn
.projects()
.allocateIds(projectId=self.project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys'] | def allocate_ids(self, partial_keys):
conn = self.get_conn()
resp = (conn
.projects()
.allocateIds(projectId=self.project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys'] | Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list |
def begin_transaction(self):
"""
Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:return: a transaction handle.
:rtype: str
"""
conn = self.get_conn()
resp = (conn
.projects()
.beginTransaction(projectId=self.project_id, body={})
.execute(num_retries=self.num_retries))
return resp['transaction'] | def begin_transaction(self):
conn = self.get_conn()
resp = (conn
.projects()
.beginTransaction(projectId=self.project_id, body={})
.execute(num_retries=self.num_retries))
return resp['transaction'] | Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:return: a transaction handle.
:rtype: str |
def commit(self, body):
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:return: the response body of the commit request.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.commit(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def commit(self, body):
conn = self.get_conn()
resp = (conn
.projects()
.commit(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:return: the response body of the commit request.
:rtype: dict |
def lookup(self, keys, read_consistency=None, transaction=None):
"""
Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:return: the response body of the lookup request.
:rtype: dict
"""
conn = self.get_conn()
body = {'keys': keys}
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn
.projects()
.lookup(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def lookup(self, keys, read_consistency=None, transaction=None):
conn = self.get_conn()
body = {'keys': keys}
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn
.projects()
.lookup(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:return: the response body of the lookup request.
:rtype: dict |
def rollback(self, transaction):
"""
Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str
"""
conn = self.get_conn()
conn.projects().rollback(
projectId=self.project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries) | def rollback(self, transaction):
conn = self.get_conn()
conn.projects().rollback(
projectId=self.project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries) | Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str |
def run_query(self, body):
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:return: the batch of query results.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.runQuery(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch'] | def run_query(self, body):
conn = self.get_conn()
resp = (conn
.projects()
.runQuery(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch'] | Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:return: the batch of query results.
:rtype: dict |
def get_operation(self, name):
"""
Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp | def get_operation(self, name):
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp | Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict |
def delete_operation(self, name):
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | def delete_operation(self, name):
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict |
def poll_operation_until_done(self, name, polling_interval_in_seconds):
"""
Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict
"""
while True:
result = self.get_operation(name)
state = result['metadata']['common']['state']
if state == 'PROCESSING':
self.log.info('Operation is processing. Re-polling state in {} seconds'
.format(polling_interval_in_seconds))
time.sleep(polling_interval_in_seconds)
else:
return result | def poll_operation_until_done(self, name, polling_interval_in_seconds):
while True:
result = self.get_operation(name)
state = result['metadata']['common']['state']
if state == 'PROCESSING':
self.log.info('Operation is processing. Re-polling state in {} seconds'
.format(polling_interval_in_seconds))
time.sleep(polling_interval_in_seconds)
else:
return result | Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict |
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.export(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None):
admin_conn = self.get_conn()
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.export(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict |
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.import_(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):
admin_conn = self.get_conn()
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file]))
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
}
resp = (admin_conn
.projects()
.import_(projectId=self.project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:return: a resource operation instance.
:rtype: dict |
def publish_to_target(self, target_arn, message):
"""
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
"""
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) | def publish_to_target(self, target_arn, message):
conn = self.get_conn()
messages = {
'default': message
}
return conn.publish(
TargetArn=target_arn,
Message=json.dumps(messages),
MessageStructure='json'
) | Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str |
def get_hostname():
"""
Fetch the hostname using the callable from the config or using
`socket.getfqdn` as a fallback.
"""
# First we attempt to fetch the callable path from the config.
try:
callable_path = conf.get('core', 'hostname_callable')
except AirflowConfigException:
callable_path = None
# Then we handle the case when the config is missing or empty. This is the
# default behavior.
if not callable_path:
return socket.getfqdn()
# Since we have a callable path, we try to import and run it next.
module_path, attr_name = callable_path.split(':')
module = importlib.import_module(module_path)
callable = getattr(module, attr_name)
return callable() | def get_hostname():
# First we attempt to fetch the callable path from the config.
try:
callable_path = conf.get('core', 'hostname_callable')
except AirflowConfigException:
callable_path = None
# Then we handle the case when the config is missing or empty. This is the
# default behavior.
if not callable_path:
return socket.getfqdn()
# Since we have a callable path, we try to import and run it next.
module_path, attr_name = callable_path.split(':')
module = importlib.import_module(module_path)
callable = getattr(module, attr_name)
return callable() | Fetch the hostname using the callable from the config or using
`socket.getfqdn` as a fallback. |
def get_conn(self):
"""
Retrieves connection to Cloud Natural Language service.
:return: Cloud Natural Language service object
:rtype: google.cloud.language_v1.LanguageServiceClient
"""
if not self._conn:
self._conn = LanguageServiceClient(credentials=self._get_credentials())
return self._conn | def get_conn(self):
if not self._conn:
self._conn = LanguageServiceClient(credentials=self._get_credentials())
return self._conn | Retrieves connection to Cloud Natural Language service.
:return: Cloud Natural Language service object
:rtype: google.cloud.language_v1.LanguageServiceClient |
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
) | Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse |
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None):
"""
A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param features: The enabled features.
If a dict is provided, it must be of the same form as the protobuf message Features
:type features: dict or google.cloud.language_v1.enums.Features
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnnotateTextResponse
"""
client = self.get_conn()
return client.annotate_text(
document=document,
features=features,
encoding_type=encoding_type,
retry=retry,
timeout=timeout,
metadata=metadata,
) | def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.annotate_text(
document=document,
features=features,
encoding_type=encoding_type,
retry=retry,
timeout=timeout,
metadata=metadata,
) | A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param features: The enabled features.
If a dict is provided, it must be of the same form as the protobuf message Features
:type features: dict or google.cloud.language_v1.enums.Features
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.types.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnnotateTextResponse |
def classify_text(self, document, retry=None, timeout=None, metadata=None):
"""
Classifies a document into categories.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
client = self.get_conn()
return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata) | def classify_text(self, document, retry=None, timeout=None, metadata=None):
client = self.get_conn()
return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata) | Classifies a document into categories.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or class google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse |
def get_template_field(env, fullname):
"""
Gets template fields for specific operator class.
:param fullname: Full path to operator class.
For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator``
:return: List of template field
:rtype: list[str]
"""
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException("Error loading %s module." % (modname, ))
clazz = getattr(mod, classname)
if not clazz:
raise RoleException("Error finding %s class in %s module." % (classname, modname))
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(
"Could not find the template fields for %s class in %s module." % (classname, modname)
)
return list(template_fields) | def get_template_field(env, fullname):
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException("Error loading %s module." % (modname, ))
clazz = getattr(mod, classname)
if not clazz:
raise RoleException("Error finding %s class in %s module." % (classname, modname))
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(
"Could not find the template fields for %s class in %s module." % (classname, modname)
)
return list(template_fields) | Gets template fields for specific operator class.
:param fullname: Full path to operator class.
For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator``
:return: List of template field
:rtype: list[str] |
def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], [] | def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], [] | A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles) |
def dispose_orm():
""" Properly close pooled database connections """
log.debug("Disposing DB connection pool (PID %s)", os.getpid())
global engine
global Session
if Session:
Session.remove()
Session = None
if engine:
engine.dispose()
engine = None | def dispose_orm():
log.debug("Disposing DB connection pool (PID %s)", os.getpid())
global engine
global Session
if Session:
Session.remove()
Session = None
if engine:
engine.dispose()
engine = None | Properly close pooled database connections |
def prepare_classpath():
"""
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
"""
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) | def prepare_classpath():
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) | Ensures that certain subfolders of AIRFLOW_HOME are on the classpath |
def _check_task_id(self, context):
"""
Gets the returned Celery result from the Airflow task
ID provided to the sensor, and returns True if the
celery result has been finished execution.
:param context: Airflow's execution context
:type context: dict
:return: True if task has been executed, otherwise False
:rtype: bool
"""
ti = context['ti']
celery_result = ti.xcom_pull(task_ids=self.target_task_id)
return celery_result.ready() | def _check_task_id(self, context):
ti = context['ti']
celery_result = ti.xcom_pull(task_ids=self.target_task_id)
return celery_result.ready() | Gets the returned Celery result from the Airflow task
ID provided to the sensor, and returns True if the
celery result has been finished execution.
:param context: Airflow's execution context
:type context: dict
:return: True if task has been executed, otherwise False
:rtype: bool |
def detect_conf_var():
"""Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
ticket_cache = configuration.conf.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read() | def detect_conf_var():
ticket_cache = configuration.conf.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read() | Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it. |
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d | def alchemy_to_dict(obj):
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d | Transforms a SQLAlchemy model instance into a dictionary |
def chunks(items, chunk_size):
"""
Yield successive chunks of a given size from a list of items
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i:i + chunk_size] | def chunks(items, chunk_size):
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i:i + chunk_size] | Yield successive chunks of a given size from a list of items |
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0):
"""
Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer) | def reduce_in_chunks(fn, iterable, initializer, chunk_size=0):
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer) | Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer |
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task) | def chain(*tasks):
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task) | Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4) |
def pprinttable(rows):
"""Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers
"""
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s | def pprinttable(rows):
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s | Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers |
def render_log_filename(ti, try_number, filename_template):
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number) | def render_log_filename(ti, try_number, filename_template):
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number) | Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template |
def get_conn(self):
"""Returns a Google Cloud Dataproc service object."""
http_authorized = self._authorize()
return build(
'dataproc', self.api_version, http=http_authorized,
cache_discovery=False) | def get_conn(self):
http_authorized = self._authorize()
return build(
'dataproc', self.api_version, http=http_authorized,
cache_discovery=False) | Returns a Google Cloud Dataproc service object. |
def wait(self, operation):
"""Awaits for Google Cloud Dataproc Operation to complete."""
submitted = _DataProcOperation(self.get_conn(), operation,
self.num_retries)
submitted.wait_for_done() | def wait(self, operation):
submitted = _DataProcOperation(self.get_conn(), operation,
self.num_retries)
submitted.wait_for_done() | Awaits for Google Cloud Dataproc Operation to complete. |
def _deep_string_coerce(content, json_path='json'):
"""
Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values.
"""
c = _deep_string_coerce
if isinstance(content, six.string_types):
return content
elif isinstance(content, six.integer_types + (float,)):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: c(v, '{0}[{1}]'.format(json_path, k))
for k, v in list(content.items())}
else:
param_type = type(content)
msg = 'Type {0} used for parameter {1} is not a number or a string' \
.format(param_type, json_path)
raise AirflowException(msg) | def _deep_string_coerce(content, json_path='json'):
c = _deep_string_coerce
if isinstance(content, six.string_types):
return content
elif isinstance(content, six.integer_types + (float,)):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: c(v, '{0}[{1}]'.format(json_path, k))
for k, v in list(content.items())}
else:
param_type = type(content)
msg = 'Type {0} used for parameter {1} is not a number or a string' \
.format(param_type, json_path)
raise AirflowException(msg) | Coerces content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values. |
def _handle_databricks_operator_execution(operator, hook, log, context):
"""
Handles the Airflow + Databricks lifecycle logic for a Databricks operator
:param operator: Databricks operator being handled
:param context: Airflow context
"""
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info('Run submitted with run_id: %s', operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
while True:
run_state = hook.get_run_state(operator.run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info('%s completed successfully.', operator.task_id)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
return
else:
error_message = '{t} failed with terminal state: {s}'.format(
t=operator.task_id,
s=run_state)
raise AirflowException(error_message)
else:
log.info('%s in run state: %s', operator.task_id, run_state)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
log.info('Sleeping for %s seconds.', operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds) | def _handle_databricks_operator_execution(operator, hook, log, context):
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info('Run submitted with run_id: %s', operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push:
context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
while True:
run_state = hook.get_run_state(operator.run_id)
if run_state.is_terminal:
if run_state.is_successful:
log.info('%s completed successfully.', operator.task_id)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
return
else:
error_message = '{t} failed with terminal state: {s}'.format(
t=operator.task_id,
s=run_state)
raise AirflowException(error_message)
else:
log.info('%s in run state: %s', operator.task_id, run_state)
log.info('View run status, Spark UI, and logs at %s', run_page_url)
log.info('Sleeping for %s seconds.', operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds) | Handles the Airflow + Databricks lifecycle logic for a Databricks operator
:param operator: Databricks operator being handled
:param context: Airflow context |
def run_cli(self, pig, verbose=True):
"""
Run an pig script using the pig cli
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;")
>>> ("hdfs://" in result)
True
"""
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout | def run_cli(self, pig, verbose=True):
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout | Run an pig script using the pig cli
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;")
>>> ("hdfs://" in result)
True |
def fetch_celery_task_state(celery_task):
"""
Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param celery_task: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type celery_task: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state of the task
:rtype: tuple[str, str]
"""
try:
with timeout(seconds=2):
# Accessing state property of celery task will make actual network request
# to get the current state of the task.
res = (celery_task[0], celery_task[1].state)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0],
traceback.format_exc())
res = ExceptionWithTraceback(e, exception_traceback)
return res | def fetch_celery_task_state(celery_task):
try:
with timeout(seconds=2):
# Accessing state property of celery task will make actual network request
# to get the current state of the task.
res = (celery_task[0], celery_task[1].state)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0],
traceback.format_exc())
res = ExceptionWithTraceback(e, exception_traceback)
return res | Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param celery_task: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type celery_task: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state of the task
:rtype: tuple[str, str] |
def _num_tasks_per_send_process(self, to_send_count):
"""
How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism))) | def _num_tasks_per_send_process(self, to_send_count):
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism))) | How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int |
def _num_tasks_per_fetch_process(self):
"""
How many Celery tasks should be sent to each worker process.
:return: Number of tasks that should be used per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism))) | def _num_tasks_per_fetch_process(self):
return max(1,
int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism))) | How many Celery tasks should be sent to each worker process.
:return: Number of tasks that should be used per process
:rtype: int |
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj | def setdefault(cls, key, default, deserialize_json=False):
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj | Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: str
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed |
def get_conn(self):
"""
Returns a Google MLEngine service object.
"""
authed_http = self._authorize()
return build('ml', 'v1', http=authed_http, cache_discovery=False) | def get_conn(self):
authed_http = self._authorize()
return build('ml', 'v1', http=authed_http, cache_discovery=False) | Returns a Google MLEngine service object. |
def create_job(self, project_id, job, use_existing_job_fn=None):
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) | def create_job(self, project_id, job, use_existing_job_fn=None):
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) | Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict |
def _get_job(self, project_id, job_id):
"""
Gets a MLEngine job based on the job name.
:return: MLEngine job object if succeed.
:rtype: dict
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned from server
"""
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._mlengine.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error('Failed to get MLEngine job: {}'.format(e))
raise | def _get_job(self, project_id, job_id):
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = self._mlengine.projects().jobs().get(name=job_name)
while True:
try:
return request.execute()
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error('Failed to get MLEngine job: {}'.format(e))
raise | Gets a MLEngine job based on the job name.
:return: MLEngine job object if succeed.
:rtype: dict
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned from server |
def _wait_for_job_done(self, project_id, job_id, interval=30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job
"""
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval) | def _wait_for_job_done(self, project_id, job_id, interval=30):
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval) | Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job |
def create_version(self, project_id, model_name, version_spec):
"""
Creates the Version on Google Cloud ML Engine.
Returns the operation if the version was created successfully and
raises an error otherwise.
"""
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
create_request = self._mlengine.projects().models().versions().create(
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | def create_version(self, project_id, model_name, version_spec):
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
create_request = self._mlengine.projects().models().versions().create(
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | Creates the Version on Google Cloud ML Engine.
Returns the operation if the version was created successfully and
raises an error otherwise. |
def set_default_version(self, project_id, model_name, version_name):
"""
Sets a version to be the default. Blocks until finished.
"""
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = self._mlengine.projects().models().versions().setDefault(
name=full_version_name, body={})
try:
response = request.execute()
self.log.info('Successfully set version: %s to default', response)
return response
except HttpError as e:
self.log.error('Something went wrong: %s', e)
raise | def set_default_version(self, project_id, model_name, version_name):
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = self._mlengine.projects().models().versions().setDefault(
name=full_version_name, body={})
try:
response = request.execute()
self.log.info('Successfully set version: %s to default', response)
return response
except HttpError as e:
self.log.error('Something went wrong: %s', e)
raise | Sets a version to be the default. Blocks until finished. |
def list_versions(self, project_id, model_name):
"""
Lists all available versions of a model. Blocks until finished.
"""
result = []
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().versions().list(
parent=full_parent_name, pageSize=100)
response = request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
while next_page_token is not None:
next_request = self._mlengine.projects().models().versions().list(
parent=full_parent_name,
pageToken=next_page_token,
pageSize=100)
response = next_request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
time.sleep(5)
return result | def list_versions(self, project_id, model_name):
result = []
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().versions().list(
parent=full_parent_name, pageSize=100)
response = request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
while next_page_token is not None:
next_request = self._mlengine.projects().models().versions().list(
parent=full_parent_name,
pageToken=next_page_token,
pageSize=100)
response = next_request.execute()
next_page_token = response.get('nextPageToken', None)
result.extend(response.get('versions', []))
time.sleep(5)
return result | Lists all available versions of a model. Blocks until finished. |
def delete_version(self, project_id, model_name, version_name):
"""
Deletes the given version of a model. Blocks until finished.
"""
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = self._mlengine.projects().models().versions().delete(
name=full_name)
response = delete_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | def delete_version(self, project_id, model_name, version_name):
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = self._mlengine.projects().models().versions().delete(
name=full_name)
response = delete_request.execute()
get_request = self._mlengine.projects().operations().get(
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None) | Deletes the given version of a model. Blocks until finished. |
def create_model(self, project_id, model):
"""
Create a Model. Blocks until finished.
"""
if not model['name']:
raise ValueError("Model name must be provided and "
"could not be an empty string")
project = 'projects/{}'.format(project_id)
request = self._mlengine.projects().models().create(
parent=project, body=model)
return request.execute() | def create_model(self, project_id, model):
if not model['name']:
raise ValueError("Model name must be provided and "
"could not be an empty string")
project = 'projects/{}'.format(project_id)
request = self._mlengine.projects().models().create(
parent=project, body=model)
return request.execute() | Create a Model. Blocks until finished. |
def get_model(self, project_id, model_name):
"""
Gets a Model. Blocks until finished.
"""
if not model_name:
raise ValueError("Model name must be provided and "
"it could not be an empty string")
full_model_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().get(name=full_model_name)
try:
return request.execute()
except HttpError as e:
if e.resp.status == 404:
self.log.error('Model was not found: %s', e)
return None
raise | def get_model(self, project_id, model_name):
if not model_name:
raise ValueError("Model name must be provided and "
"it could not be an empty string")
full_model_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = self._mlengine.projects().models().get(name=full_model_name)
try:
return request.execute()
except HttpError as e:
if e.resp.status == 404:
self.log.error('Model was not found: %s', e)
return None
raise | Gets a Model. Blocks until finished. |
def write_batch_data(self, items):
"""
Write batch items to dynamodb table with provisioned throughout capacity.
"""
dynamodb_conn = self.get_conn()
try:
table = dynamodb_conn.Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(
'Failed to insert items in dynamodb, error: {error}'.format(
error=str(general_error)
)
) | def write_batch_data(self, items):
dynamodb_conn = self.get_conn()
try:
table = dynamodb_conn.Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(
'Failed to insert items in dynamodb, error: {error}'.format(
error=str(general_error)
)
) | Write batch items to dynamodb table with provisioned throughout capacity. |
def _integrate_plugins():
"""Integrate plugins to the context."""
from airflow.plugins_manager import executors_modules
for executors_module in executors_modules:
sys.modules[executors_module.__name__] = executors_module
globals()[executors_module._name] = executors_module | def _integrate_plugins():
from airflow.plugins_manager import executors_modules
for executors_module in executors_modules:
sys.modules[executors_module.__name__] = executors_module
globals()[executors_module._name] = executors_module | Integrate plugins to the context. |
def get_default_executor():
"""Creates a new instance of the configured executor if none exists and returns it"""
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = configuration.conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR | def get_default_executor():
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = configuration.conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR | Creates a new instance of the configured executor if none exists and returns it |
def _get_executor(executor_name):
"""
Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins
"""
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name)) | def _get_executor(executor_name):
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name)) | Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins |
def on_error(self, error, items):
"""
Handles error callbacks when using Segment with segment_debug_mode set to True
"""
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error)) | def on_error(self, error, items):
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error)) | Handles error callbacks when using Segment with segment_debug_mode set to True |
def get_conn(self):
"""
Returns a mssql connection object
"""
conn = self.get_connection(self.mssql_conn_id)
conn = pymssql.connect(
server=conn.host,
user=conn.login,
password=conn.password,
database=self.schema or conn.schema,
port=conn.port)
return conn | def get_conn(self):
conn = self.get_connection(self.mssql_conn_id)
conn = pymssql.connect(
server=conn.host,
user=conn.login,
password=conn.password,
database=self.schema or conn.schema,
port=conn.port)
return conn | Returns a mssql connection object |
def trigger_dag(dag_id):
"""
Trigger a new dag run for a Dag with an execution date of now unless
specified in the data.
"""
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
if getattr(g, 'user', None):
_log.info("User %s created %s", g.user, dr)
response = jsonify(message="Created {}".format(dr))
return response | def trigger_dag(dag_id):
data = request.get_json(force=True)
run_id = None
if 'run_id' in data:
run_id = data['run_id']
conf = None
if 'conf' in data:
conf = data['conf']
execution_date = None
if 'execution_date' in data and data['execution_date'] is not None:
execution_date = data['execution_date']
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'
.format(execution_date))
_log.info(error_message)
response = jsonify({'error': error_message})
response.status_code = 400
return response
try:
dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
if getattr(g, 'user', None):
_log.info("User %s created %s", g.user, dr)
response = jsonify(message="Created {}".format(dr))
return response | Trigger a new dag run for a Dag with an execution date of now unless
specified in the data. |
def delete_dag(dag_id):
"""
Delete all DB records related to the specified Dag.
"""
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(message="Removed {} record(s)".format(count), count=count) | def delete_dag(dag_id):
try:
count = delete.delete_dag(dag_id)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
return jsonify(message="Removed {} record(s)".format(count), count=count) | Delete all DB records related to the specified Dag. |
def task_info(dag_id, task_id):
"""Returns a JSON with a task's public instance variables. """
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields) | def task_info(dag_id, task_id):
try:
info = get_task(dag_id, task_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
# JSONify and return.
fields = {k: str(v)
for k, v in vars(info).items()
if not k.startswith('_')}
return jsonify(fields) | Returns a JSON with a task's public instance variables. |
def get_pools():
"""Get all pools."""
try:
pools = pool_api.get_pools()
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify([p.to_json() for p in pools]) | def get_pools():
try:
pools = pool_api.get_pools()
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify([p.to_json() for p in pools]) | Get all pools. |
def create_pool():
"""Create a pool."""
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | def create_pool():
params = request.get_json(force=True)
try:
pool = pool_api.create_pool(**params)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | Create a pool. |
def delete_pool(name):
"""Delete pool."""
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | def delete_pool(name):
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | Delete pool. |