INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
---|---|
Replaces invalid MLEngine job_id characters with _. | def _normalize_mlengine_job_id(job_id):
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation.
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ''
for m in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:m.start()])
cleansed_job_id += job[m.start():m.end()]
tracker = m.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id |
Extract error code from ftp exception | def _get_error_code(self, e):
"""Extract error code from ftp exception"""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e |
Integrate plugins to the context | def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import sensors_modules
for sensors_module in sensors_modules:
sys.modules[sensors_module.__name__] = sensors_module
globals()[sensors_module._name] = sensors_module |
Remove any existing DAG runs for the perf test DAGs. | def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr) |
Remove any existing task instances for the perf test DAGs. | def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit() |
Toggle the pause state of the DAGs in the test. | def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit() |
Print operational metrics for the scheduler test. | def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit() |
Override the scheduler heartbeat to determine when the test is complete | def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit() |
Invoke Lambda Function | def invoke_lambda(self, payload):
"""
Invoke Lambda Function
"""
awslambda_conn = self.get_conn()
response = awslambda_conn.invoke(
FunctionName=self.function_name,
InvocationType=self.invocation_type,
LogType=self.log_type,
Payload=payload,
Qualifier=self.qualifier
)
return response |
Return the task object identified by the given dag_id and task_id. | def get_dag_run_state(dag_id, execution_date):
"""Return the task object identified by the given dag_id and task_id."""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
# Get DAG object and check Task Exists
dag = dagbag.get_dag(dag_id)
# Get DagRun object and check that it exists
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = ('Dag Run for date {} not found in dag {}'
.format(execution_date, dag_id))
raise DagRunNotFound(error_message)
return {'state': dagrun.get_state()} |
Creates Operators needed for model evaluation and returns. | def create_evaluate_ops(task_prefix,
data_format,
input_paths,
prediction_path,
metric_fn_and_keys,
validate_fn,
batch_prediction_job_id=None,
project_id=None,
region=None,
dataflow_options=None,
model_uri=None,
model_name=None,
version_name=None,
dag=None):
"""
Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as <prefix>-prediction, <prefix>-summary, and <prefix>-validation,
respectively.
(<prefix> should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst['input_label'])
classes = float(inst['classes']) # 0 or 1
err = abs(classes-label)
squared_err = math.pow(classes-label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ['err', 'mse'] # key order must match.
def validate_err_and_count(summary):
if summary['err'] > 0.2:
raise ValueError('Too high err>0.2; summary=%s' % summary)
if summary['mse'] > 0.05:
raise ValueError('Too high mse>0.05; summary=%s' % summary)
if summary['count'] < 1000:
raise ValueError('Too few instances<1000; summary=%s' % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:type task_prefix: str
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:type data_format: str
:param input_paths: a list of input paths to be sent to BatchPrediction.
:type input_paths: list[str]
:param prediction_path: GCS path to put the prediction results in.
:type prediction_path: str
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:type metric_fn_and_keys: tuple of a function and a list[str]
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:type validate_fn: function
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:type batch_prediction_job_id: str
:param project_id: the Google Cloud Platform project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:type project_id: str
:param region: the Google Cloud Platform region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:type region: str
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:type dataflow_options: dictionary
:param model_uri: GCS path of the model exported by Tensorflow using
tensorflow.estimator.export_savedmodel(). It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:type model_uri: str
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:type model_name: str
:param version_name: Used to indicate a model version to use for prediction,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:type version_name: str
:param dag: The `DAG` to use for all Operators.
:type dag: airflow.models.DAG
:returns: a tuple of three operators, (prediction, summary, validation)
:rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,
PythonOperator)
"""
# Verify that task_prefix doesn't have any special characters except hyphen
# '-', which is the only allowed non-alphanumeric character by Dataflow.
if not re.match(r"^[a-zA-Z][-A-Za-z0-9]*$", task_prefix):
raise AirflowException(
"Malformed task_id for DataFlowPythonOperator (only alphanumeric "
"and hyphens are allowed but got: " + task_prefix)
metric_fn, metric_keys = metric_fn_and_keys
if not callable(metric_fn):
raise AirflowException("`metric_fn` param must be callable.")
if not callable(validate_fn):
raise AirflowException("`validate_fn` param must be callable.")
if dag is not None and dag.default_args is not None:
default_args = dag.default_args
project_id = project_id or default_args.get('project_id')
region = region or default_args.get('region')
model_name = model_name or default_args.get('model_name')
version_name = version_name or default_args.get('version_name')
dataflow_options = dataflow_options or \
default_args.get('dataflow_default_options')
evaluate_prediction = MLEngineBatchPredictionOperator(
task_id=(task_prefix + "-prediction"),
project_id=project_id,
job_id=batch_prediction_job_id,
region=region,
data_format=data_format,
input_paths=input_paths,
output_path=prediction_path,
uri=model_uri,
model_name=model_name,
version_name=version_name,
dag=dag)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True))
evaluate_summary = DataFlowPythonOperator(
task_id=(task_prefix + "-summary"),
py_options=["-m"],
py_file="airflow.contrib.utils.mlengine_prediction_summary",
dataflow_default_options=dataflow_options,
options={
"prediction_path": prediction_path,
"metric_fn_encoded": metric_fn_encoded,
"metric_keys": ','.join(metric_keys)
},
dag=dag)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, **kwargs):
prediction_path = kwargs["templates_dict"]["prediction_path"]
scheme, bucket, obj, _, _ = urlsplit(prediction_path)
if scheme != "gs" or not bucket or not obj:
raise ValueError("Wrong format prediction_path: %s",
prediction_path)
summary = os.path.join(obj.strip("/"),
"prediction.summary.json")
gcs_hook = GoogleCloudStorageHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(
task_id=(task_prefix + "-validation"),
python_callable=apply_validate_fn,
provide_context=True,
templates_dict={"prediction_path": prediction_path},
dag=dag)
evaluate_validation.set_upstream(evaluate_summary)
return evaluate_prediction, evaluate_summary, evaluate_validation |
Creates the directory specified by path creating intermediate directories as necessary. If directory already exists this is a no - op. | def mkdirs(path, mode):
"""
Creates the directory specified by path, creating intermediate directories
as necessary. If directory already exists, this is a no-op.
:param path: The directory to create
:type path: str
:param mode: The mode to give to the directory e.g. 0o755, ignores umask
:type mode: int
"""
try:
o_umask = os.umask(0)
os.makedirs(path, mode)
except OSError:
if not os.path.isdir(path):
raise
finally:
os.umask(o_umask) |
A small helper function to convert a string to a numeric value if appropriate | def _convert_to_float_if_possible(s):
"""
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
"""
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret |
Get the current date and time in UTC: return: | def utcnow():
"""
Get the current date and time in UTC
:return:
"""
# pendulum utcnow() is not used as that sets a TimezoneInfo object
# instead of a Timezone. This is not pickable and also creates issues
# when using replace()
d = dt.datetime.utcnow()
d = d.replace(tzinfo=utc)
return d |
Gets the epoch in the users timezone: return: | def utc_epoch():
"""
Gets the epoch in the users timezone
:return:
"""
# pendulum utcnow() is not used as that sets a TimezoneInfo object
# instead of a Timezone. This is not pickable and also creates issues
# when using replace()
d = dt.datetime(1970, 1, 1)
d = d.replace(tzinfo=utc)
return d |
Returns the datetime with the default timezone added if timezone information was not associated: param value: datetime: return: datetime with tzinfo | def convert_to_utc(value):
"""
Returns the datetime with the default timezone added if timezone
information was not associated
:param value: datetime
:return: datetime with tzinfo
"""
if not value:
return value
if not is_localized(value):
value = pendulum.instance(value, TIMEZONE)
return value.astimezone(utc) |
Make a naive datetime. datetime in a given time zone aware. | def make_aware(value, timezone=None):
"""
Make a naive datetime.datetime in a given time zone aware.
:param value: datetime
:param timezone: timezone
:return: localized datetime in settings.TIMEZONE or timezone
"""
if timezone is None:
timezone = TIMEZONE
# Check that we won't overwrite the timezone of an aware datetime.
if is_localized(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
if hasattr(value, 'fold'):
# In case of python 3.6 we want to do the same that pendulum does for python3.5
# i.e in case we move clock back we want to schedule the run at the time of the second
# instance of the same clock time rather than the first one.
# Fold parameter has no impact in other cases so we can safely set it to 1 here
value = value.replace(fold=1)
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value)
elif hasattr(timezone, 'convert'):
# For pendulum
return timezone.convert(value)
else:
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone) |
Make an aware datetime. datetime naive in a given time zone. | def make_naive(value, timezone=None):
"""
Make an aware datetime.datetime naive in a given time zone.
:param value: datetime
:param timezone: timezone
:return: naive datetime
"""
if timezone is None:
timezone = TIMEZONE
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
o = value.astimezone(timezone)
# cross library compatibility
naive = dt.datetime(o.year,
o.month,
o.day,
o.hour,
o.minute,
o.second,
o.microsecond)
return naive |
Wrapper around datetime. datetime that adds settings. TIMEZONE if tzinfo not specified | def datetime(*args, **kwargs):
"""
Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified
:return: datetime.datetime
"""
if 'tzinfo' not in kwargs:
kwargs['tzinfo'] = TIMEZONE
return dt.datetime(*args, **kwargs) |
Sets the environment variable GOOGLE_APPLICATION_CREDENTIALS with either: | def _set_env_from_extras(self, extras):
"""
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
"""
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and not keyfile_json_str:
self.log.info('Using gcloud with application default credentials.')
elif key_path:
os.environ[G_APP_CRED] = key_path
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str)
os.environ[G_APP_CRED] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key |
Fetches a field from extras and returns it. This is some Airflow magic. The google_cloud_platform hook type adds custom UI elements to the hook page which allow admins to specify service_account key_path etc. They get formatted as shown below. | def _get_field(self, extras, field, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default |
Establish a connection to druid broker. | def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid broker on %s', conn.host)
return druid_broker_conn |
Returns http session for use with requests | def get_conn(self, headers=None):
"""
Returns http session for use with requests
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
session = requests.Session()
if self.http_conn_id:
conn = self.get_connection(self.http_conn_id)
if "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
self.base_url = schema + "://" + conn.host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port)
if conn.login:
session.auth = (conn.login, conn.password)
if conn.extra:
try:
session.headers.update(conn.extra_dejson)
except TypeError:
self.log.warn('Connection to %s has invalid extra field.', conn.host)
if headers:
session.headers.update(headers)
return session |
Performs the request | def run(self, endpoint, data=None, headers=None, extra_options=None):
"""
Performs the request
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:type endpoint: str
:param data: payload to be uploaded or request parameters
:type data: dict
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
if self.base_url and not self.base_url.endswith('/') and \
endpoint and not endpoint.startswith('/'):
url = self.base_url + '/' + endpoint
else:
url = (self.base_url or '') + (endpoint or '')
req = None
if self.method == 'GET':
# GET uses params
req = requests.Request(self.method,
url,
params=data,
headers=headers)
elif self.method == 'HEAD':
# HEAD doesn't use params
req = requests.Request(self.method,
url,
headers=headers)
else:
# Others use data
req = requests.Request(self.method,
url,
data=data,
headers=headers)
prepped_request = session.prepare_request(req)
self.log.info("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options) |
Checks the status code and raise an AirflowException exception on non 2XX or 3XX status codes | def check_response(self, response):
"""
Checks the status code and raise an AirflowException exception on non 2XX or 3XX
status codes
:param response: A requests response object
:type response: requests.response
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
self.log.error("HTTP error: %s", response.reason)
if self.method not in ['GET', 'HEAD']:
self.log.error(response.text)
raise AirflowException(str(response.status_code) + ":" + response.reason) |
Grabs extra options like timeout and actually runs the request checking for the result | def run_and_check(self, session, prepped_request, extra_options):
"""
Grabs extra options like timeout and actually runs the request,
checking for the result
:param session: the session to be used to execute the request
:type session: requests.Session
:param prepped_request: the prepared request generated in run()
:type prepped_request: session.prepare_request
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX
or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
try:
response = session.send(
prepped_request,
stream=extra_options.get("stream", False),
verify=extra_options.get("verify", True),
proxies=extra_options.get("proxies", {}),
cert=extra_options.get("cert"),
timeout=extra_options.get("timeout"),
allow_redirects=extra_options.get("allow_redirects", True))
if extra_options.get('check_response', True):
self.check_response(response)
return response
except requests.exceptions.ConnectionError as ex:
self.log.warn(str(ex) + ' Tenacity will retry to execute the operation')
raise ex |
Runs Hook. run () with a Tenacity decorator attached to it. This is useful for connectors which might be disturbed by intermittent issues and should not instantly fail. | def run_with_advanced_retry(self, _retry_args, *args, **kwargs):
"""
Runs Hook.run() with a Tenacity decorator attached to it. This is useful for
connectors which might be disturbed by intermittent issues and should not
instantly fail.
:param _retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:type _retry_args: dict
:Example::
hook = HttpHook(http_conn_id='my_conn',method='GET')
retry_args = dict(
wait=tenacity.wait_exponential(),
stop=tenacity.stop_after_attempt(10),
retry=requests.exceptions.ConnectionError
)
hook.run_with_advanced_retry(
endpoint='v1/test',
_retry_args=retry_args
)
"""
self._retry_obj = tenacity.Retrying(
**_retry_args
)
self._retry_obj(self.run, *args, **kwargs) |
Contextmanager that will create and teardown a session. | def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close() |
Function decorator that provides a session if it isn t provided. If you want to reuse a session or run the function as part of a database transaction you pass it to the function if not this wrapper will create one and close it for you. | def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper |
Clear out the database | def resetdb():
"""
Clear out the database
"""
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.base.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb() |
Upload a file to Azure Blob Storage. | def execute(self, context):
"""Upload a file to Azure Blob Storage."""
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
self.log.info(
'Uploading %s to wasb://%s '
'as %s'.format(self.file_path, self.container_name, self.blob_name)
)
hook.load_file(self.file_path, self.container_name,
self.blob_name, **self.load_options) |
Returns a connection object | def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
reqkwargs = None
if db.password is not None:
reqkwargs = {'auth': HTTPBasicAuth(db.login, db.password)}
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
source=db.extra_dejson.get('source', 'airflow'),
protocol=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
requests_kwargs=reqkwargs,
schema=db.schema) |
Parses some DatabaseError to provide a better error message | def _get_pretty_exception_message(e):
"""
Parses some DatabaseError to provide a better error message
"""
if (hasattr(e, 'message') and
'errorName' in e.message and
'message' in e.message):
return ('{name}: {message}'.format(
name=e.message['errorName'],
message=e.message['message']))
else:
return str(e) |
Get a set of records from Presto | def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super().get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(self._get_pretty_exception_message(e)) |
Get a pandas dataframe from a sql query. | def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(self._get_pretty_exception_message(e))
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df |
Execute the statement against Presto. Can be used to create views. | def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super().run(self._strip_sql(hql), parameters) |
A generic way to insert a set of tuples into a table. | def insert_rows(self, table, rows, target_fields=None):
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
"""
super().insert_rows(table, rows, target_fields, 0) |
Return a cosmos db client. | def get_conn(self):
"""
Return a cosmos db client.
"""
if self.cosmos_client is not None:
return self.cosmos_client
# Initialize the Python Azure Cosmos DB client
self.cosmos_client = cosmos_client.CosmosClient(self.endpoint_uri, {'masterKey': self.master_key})
return self.cosmos_client |
Checks if a collection exists in CosmosDB. | def does_collection_exist(self, collection_name, database_name=None):
"""
Checks if a collection exists in CosmosDB.
"""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
existing_container = list(self.get_conn().QueryContainers(
get_database_link(self.__get_database_name(database_name)), {
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": collection_name}
]
}))
if len(existing_container) == 0:
return False
return True |
Creates a new collection in the CosmosDB database. | def create_collection(self, collection_name, database_name=None):
"""
Creates a new collection in the CosmosDB database.
"""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
# We need to check to see if this container already exists so we don't try
# to create it twice
existing_container = list(self.get_conn().QueryContainers(
get_database_link(self.__get_database_name(database_name)), {
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": collection_name}
]
}))
# Only create if we did not find it already existing
if len(existing_container) == 0:
self.get_conn().CreateContainer(
get_database_link(self.__get_database_name(database_name)),
{"id": collection_name}) |
Checks if a database exists in CosmosDB. | def does_database_exist(self, database_name):
"""
Checks if a database exists in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
if len(existing_database) == 0:
return False
return True |
Creates a new database in CosmosDB. | def create_database(self, database_name):
"""
Creates a new database in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# We need to check to see if this database already exists so we don't try
# to create it twice
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
# Only create if we did not find it already existing
if len(existing_database) == 0:
self.get_conn().CreateDatabase({"id": database_name}) |
Deletes an existing database in CosmosDB. | def delete_database(self, database_name):
"""
Deletes an existing database in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
self.get_conn().DeleteDatabase(get_database_link(database_name)) |
Deletes an existing collection in the CosmosDB database. | def delete_collection(self, collection_name, database_name=None):
"""
Deletes an existing collection in the CosmosDB database.
"""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
self.get_conn().DeleteContainer(
get_collection_link(self.__get_database_name(database_name), collection_name)) |
Inserts a new document ( or updates an existing one ) into an existing collection in the CosmosDB database. | def upsert_document(self, document, database_name=None, collection_name=None, document_id=None):
"""
Inserts a new document (or updates an existing one) into an existing
collection in the CosmosDB database.
"""
# Assign unique ID if one isn't provided
if document_id is None:
document_id = str(uuid.uuid4())
if document is None:
raise AirflowBadRequest("You cannot insert a None document")
# Add document id if isn't found
if 'id' in document:
if document['id'] is None:
document['id'] = document_id
else:
document['id'] = document_id
created_document = self.get_conn().CreateItem(
get_collection_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name)),
document)
return created_document |
Insert a list of new documents into an existing collection in the CosmosDB database. | def insert_documents(self, documents, database_name=None, collection_name=None):
"""
Insert a list of new documents into an existing collection in the CosmosDB database.
"""
if documents is None:
raise AirflowBadRequest("You cannot insert empty documents")
created_documents = []
for single_document in documents:
created_documents.append(
self.get_conn().CreateItem(
get_collection_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name)),
single_document))
return created_documents |
Delete an existing document out of a collection in the CosmosDB database. | def delete_document(self, document_id, database_name=None, collection_name=None):
"""
Delete an existing document out of a collection in the CosmosDB database.
"""
if document_id is None:
raise AirflowBadRequest("Cannot delete a document without an id")
self.get_conn().DeleteItem(
get_document_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name),
document_id)) |
Get a document from an existing collection in the CosmosDB database. | def get_document(self, document_id, database_name=None, collection_name=None):
"""
Get a document from an existing collection in the CosmosDB database.
"""
if document_id is None:
raise AirflowBadRequest("Cannot get a document without an id")
try:
return self.get_conn().ReadItem(
get_document_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name),
document_id))
except HTTPFailure:
return None |
Get a list of documents from an existing collection in the CosmosDB database via SQL query. | def get_documents(self, sql_string, database_name=None, collection_name=None, partition_key=None):
"""
Get a list of documents from an existing collection in the CosmosDB database via SQL query.
"""
if sql_string is None:
raise AirflowBadRequest("SQL query string cannot be None")
# Query them in SQL
query = {'query': sql_string}
try:
result_iterable = self.get_conn().QueryItems(
get_collection_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name)),
query,
partition_key)
return list(result_iterable)
except HTTPFailure:
return None |
Return python code of a given dag_id. | def get_code(dag_id):
"""Return python code of a given dag_id."""
session = settings.Session()
DM = models.DagModel
dag = session.query(DM).filter(DM.dag_id == dag_id).first()
session.close()
# Check DAG exists.
if dag is None:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
try:
with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f:
code = f.read()
return code
except IOError as e:
error_message = "Error {} while reading Dag id {} Code".format(str(e), dag_id)
raise AirflowException(error_message) |
Returns the Cloud Function with the given name. | def get_function(self, name):
"""
Returns the Cloud Function with the given name.
:param name: Name of the function.
:type name: str
:return: A Cloud Functions object representing the function.
:rtype: dict
"""
return self.get_conn().projects().locations().functions().get(
name=name).execute(num_retries=self.num_retries) |
Creates a new function in Cloud Function in the location specified in the body. | def create_new_function(self, location, body, project_id=None):
"""
Creates a new function in Cloud Function in the location specified in the body.
:param location: The location of the function.
:type location: str
:param body: The body required by the Cloud Functions insert API.
:type body: dict
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().projects().locations().functions().create(
location=self._full_location(project_id, location),
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name) |
Updates Cloud Functions according to the specified update mask. | def update_function(self, name, body, update_mask):
"""
Updates Cloud Functions according to the specified update mask.
:param name: The name of the function.
:type name: str
:param body: The body required by the cloud function patch API.
:type body: dict
:param update_mask: The update mask - array of fields that should be patched.
:type update_mask: [str]
:return: None
"""
response = self.get_conn().projects().locations().functions().patch(
updateMask=",".join(update_mask),
name=name,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name) |
Uploads zip file with sources. | def upload_function_zip(self, location, zip_path, project_id=None):
"""
Uploads zip file with sources.
:param location: The location where the function is created.
:type location: str
:param zip_path: The path of the valid .zip file to upload.
:type zip_path: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: The upload URL that was returned by generateUploadUrl method.
"""
response = self.get_conn().projects().locations().functions().generateUploadUrl(
parent=self._full_location(project_id, location)
).execute(num_retries=self.num_retries)
upload_url = response.get('uploadUrl')
with open(zip_path, 'rb') as fp:
requests.put(
url=upload_url,
data=fp,
# Those two headers needs to be specified according to:
# https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl
# nopep8
headers={
'Content-type': 'application/zip',
'x-goog-content-length-range': '0,104857600',
}
)
return upload_url |
Deletes the specified Cloud Function. | def delete_function(self, name):
"""
Deletes the specified Cloud Function.
:param name: The name of the function.
:type name: str
:return: None
"""
response = self.get_conn().projects().locations().functions().delete(
name=name).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name) |
Waits for the named operation to complete - checks status of the asynchronous call. | def _wait_for_operation_to_complete(self, operation_name):
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param operation_name: The name of the operation.
:type operation_name: str
:return: The response returned by the operation.
:rtype: dict
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation_response = service.operations().get(
name=operation_name,
).execute(num_retries=self.num_retries)
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS) |
Publishes messages to a Pub/ Sub topic. | def publish(self, project, topic, messages):
"""Publishes messages to a Pub/Sub topic.
:param project: the GCP project ID in which to publish
:type project: str
:param topic: the Pub/Sub topic to which to publish; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param messages: messages to publish; if the data field in a
message is set, it should already be base64 encoded.
:type messages: list of PubSub messages; see
http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
"""
body = {'messages': messages}
full_topic = _format_topic(project, topic)
request = self.get_conn().projects().topics().publish(
topic=full_topic, body=body)
try:
request.execute(num_retries=self.num_retries)
except HttpError as e:
raise PubSubException(
'Error publishing to topic {}'.format(full_topic), e) |
Creates a Pub/ Sub topic if it does not already exist. | def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: str
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
message = 'Topic already exists: {}'.format(full_topic)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating topic {}'.format(full_topic), e) |
Deletes a Pub/ Sub topic if it exists. | def delete_topic(self, project, topic, fail_if_not_exists=False):
"""Deletes a Pub/Sub topic if it exists.
:param project: the GCP project ID in which to delete the topic
:type project: str
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().delete(topic=full_topic).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the topic was not found
if str(e.resp['status']) == '404':
message = 'Topic does not exist: {}'.format(full_topic)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting topic {}'.format(full_topic), e) |
Creates a Pub/ Sub subscription if it does not already exist. | def create_subscription(self, topic_project, topic, subscription=None,
subscription_project=None, ack_deadline_secs=10,
fail_if_exists=False):
"""Creates a Pub/Sub subscription, if it does not already exist.
:param topic_project: the GCP project ID of the topic that the
subscription will be bound to.
:type topic_project: str
:param topic: the Pub/Sub topic name that the subscription will be bound
to create; do not include the ``projects/{project}/subscriptions/``
prefix.
:type topic: str
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: str
:param subscription_project: the GCP project ID where the subscription
will be created. If unspecified, ``topic_project`` will be used.
:type subscription_project: str
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
:return: subscription name which will be the system-generated value if
the ``subscription`` parameter is not supplied
:rtype: str
"""
service = self.get_conn()
full_topic = _format_topic(topic_project, topic)
if not subscription:
subscription = 'sub-{}'.format(uuid4())
if not subscription_project:
subscription_project = topic_project
full_subscription = _format_subscription(subscription_project,
subscription)
body = {
'topic': full_topic,
'ackDeadlineSeconds': ack_deadline_secs
}
try:
service.projects().subscriptions().create(
name=full_subscription, body=body).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the subscription already exists.
if str(e.resp['status']) == '409':
message = 'Subscription already exists: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating subscription {}'.format(full_subscription),
e)
return subscription |
Deletes a Pub/ Sub subscription if it exists. | def delete_subscription(self, project, subscription,
fail_if_not_exists=False):
"""Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().delete(
subscription=full_subscription).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 404 indicates that the subscription was not found
if str(e.resp['status']) == '404':
message = 'Subscription does not exist: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting subscription {}'.format(full_subscription),
e) |
Pulls up to max_messages messages from Pub/ Sub subscription. | def pull(self, project, subscription, max_messages,
return_immediately=False):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: str
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:type max_messages: int
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:type return_immediately: bool
:return: A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#ReceivedMessage
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
body = {
'maxMessages': max_messages,
'returnImmediately': return_immediately
}
try:
response = service.projects().subscriptions().pull(
subscription=full_subscription, body=body).execute(num_retries=self.num_retries)
return response.get('receivedMessages', [])
except HttpError as e:
raise PubSubException(
'Error pulling messages from subscription {}'.format(
full_subscription), e) |
Pulls up to max_messages messages from Pub/ Sub subscription. | def acknowledge(self, project, subscription, ack_ids):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project name or ID in which to create
the topic
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: str
:param ack_ids: List of ReceivedMessage ackIds from a previous pull
response
:type ack_ids: list
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().acknowledge(
subscription=full_subscription, body={'ackIds': ack_ids}
).execute(num_retries=self.num_retries)
except HttpError as e:
raise PubSubException(
'Error acknowledging {} messages pulled from subscription {}'
.format(len(ack_ids), full_subscription), e) |
Wrapper around the private _get_dep_statuses method that contains some global checks for all dependencies. | def get_dep_statuses(self, ti, session, dep_context=None):
"""
Wrapper around the private _get_dep_statuses method that contains some global
checks for all dependencies.
:param ti: the task instance to get the dependency status for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: the context for which this dependency should be evaluated for
:type dep_context: DepContext
"""
# this avoids a circular dependency
from airflow.ti_deps.dep_context import DepContext
if dep_context is None:
dep_context = DepContext()
if self.IGNOREABLE and dep_context.ignore_all_deps:
yield self._passing_status(
reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and dep_context.ignore_task_deps:
yield self._passing_status(
reason="Context specified all task dependencies should be ignored.")
return
for dep_status in self._get_dep_statuses(ti, session, dep_context):
yield dep_status |
Returns whether or not this dependency is met for a given task instance. A dependency is considered met if all of the dependency statuses it reports are passing. | def is_met(self, ti, session, dep_context=None):
"""
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) |
Returns an iterable of strings that explain why this dependency wasn t met. | def get_failure_reasons(self, ti, session, dep_context=None):
"""
Returns an iterable of strings that explain why this dependency wasn't met.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
for dep_status in self.get_dep_statuses(ti, session, dep_context):
if not dep_status.passed:
yield dep_status.reason |
Parses a config file for s3 credentials. Can currently parse boto s3cmd. conf and AWS SDK config formats | def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
config = configparser.ConfigParser()
if config.read(config_file_name): # pragma: no cover
sections = config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = config.get(cred_section, key_id_option)
secret_key = config.get(cred_section, secret_key_option)
except Exception:
logging.warning("Option Error in parsing s3 config file")
raise
return access_key, secret_key |
Get the underlying botocore. Credentials object. | def get_credentials(self, region_name=None):
"""Get the underlying `botocore.Credentials` object.
This contains the following authentication attributes: access_key, secret_key and token.
"""
session, _ = self._get_credentials(region_name)
# Credentials are refreshable, so accessing your access key and
# secret key separately can lead to a race condition.
# See https://stackoverflow.com/a/36291428/8283373
return session.get_credentials().get_frozen_credentials() |
If the IAM role is a role name get the Amazon Resource Name ( ARN ) for the role. If IAM role is already an IAM role ARN no change is made. | def expand_role(self, role):
"""
If the IAM role is a role name, get the Amazon Resource Name (ARN) for the role.
If IAM role is already an IAM role ARN, no change is made.
:param role: IAM role name or ARN
:return: IAM role ARN
"""
if '/' in role:
return role
else:
return self.get_client_type('iam').get_role(RoleName=role)['Role']['Arn'] |
Returns verticaql connection object | def get_conn(self):
"""
Returns verticaql connection object
"""
conn = self.get_connection(self.vertica_conn_id)
conn_config = {
"user": conn.login,
"password": conn.password or '',
"database": conn.schema,
"host": conn.host or 'localhost'
}
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
conn = connect(**conn_config)
return conn |
Walks the tree of loggers and tries to set the context for each handler: param logger: logger: param value: value to set | def set_context(logger, value):
"""
Walks the tree of loggers and tries to set the context for each handler
:param logger: logger
:param value: value to set
"""
_logger = logger
while _logger:
for handler in _logger.handlers:
try:
handler.set_context(value)
except AttributeError:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
pass
if _logger.propagate is True:
_logger = _logger.parent
else:
_logger = None |
Do whatever it takes to actually log the specified logging record: param message: message to log | def write(self, message):
"""
Do whatever it takes to actually log the specified logging record
:param message: message to log
"""
if not message.endswith("\n"):
self._buffer += message
else:
self._buffer += message
self.logger.log(self.level, self._buffer.rstrip())
self._buffer = str() |
Ensure all logging output has been flushed | def flush(self):
"""
Ensure all logging output has been flushed
"""
if len(self._buffer) > 0:
self.logger.log(self.level, self._buffer)
self._buffer = str() |
If the path contains a folder with a. zip suffix then the folder is treated as a zip archive and path to zip is returned. | def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc |
Traverse a directory and look for Python files. | def list_py_file_paths(directory, safe_mode=True,
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns += [re.compile(p) for p in f.read().split('\n') if p]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths |
Construct a TaskInstance from the database based on the primary key | def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti |
: param dag_id: DAG ID: type dag_id: unicode: return: if the given DAG ID exists in the bag return the BaseDag corresponding to that ID. Otherwise throw an Exception: rtype: airflow. utils. dag_processing. SimpleDag | def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id] |
Launch DagFileProcessorManager processor and start DAG parsing loop in manager. | def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._process = self._launch_process(self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._child_signal_conn,
self._stat_queue,
self._result_queue,
self._async_mode)
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid) |
Harvest DAG parsing results from result queue and sync metadata from stat queue.: return: List of parsing result in SimpleDag format. | def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Metadata and results to be harvested can be inconsistent,
# but it should not be a big problem.
self._sync_metadata()
# Heartbeating after syncing metadata so we do not restart manager
# if it processed all files for max_run times and exit normally.
self._heartbeat_manager()
simple_dags = []
# multiprocessing.Queue().qsize will not work on MacOS.
if sys.platform == "darwin":
qsize = self._result_count
else:
qsize = self._result_queue.qsize()
for _ in range(qsize):
simple_dags.append(self._result_queue.get())
self._result_count = 0
return simple_dags |
Heartbeat DAG file processor and start it if it is not alive.: return: | def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and start it if it is not alive.
:return:
"""
if self._process and not self._process.is_alive() and not self.done:
self.start() |
Sync metadata from stat queue and only keep the latest stat.: return: | def _sync_metadata(self):
"""
Sync metadata from stat queue and only keep the latest stat.
:return:
"""
while not self._stat_queue.empty():
stat = self._stat_queue.get()
self._file_paths = stat.file_paths
self._all_pids = stat.all_pids
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._result_count += stat.result_count |
Send termination signal to DAG parsing processor manager and expect it to terminate all DAG file processors. | def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
self.log.info("Sending termination message to manager.")
self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER) |
Terminate ( and then kill ) the manager process launched.: return: | def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warn('Ending without manager process.')
return
this_process = psutil.Process(os.getpid())
try:
manager_process = psutil.Process(self._process.pid)
except psutil.NoSuchProcess:
self.log.info("Manager process not running.")
return
# First try SIGTERM
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Terminating manager process: %s", manager_process.pid)
manager_process.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %ss for manager process to exit...", timeout)
try:
psutil.wait_procs({manager_process}, timeout)
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
if manager_process.is_running() \
and manager_process.pid in [x.pid for x in this_process.children()]:
self.log.info("Killing manager process: %s", manager_process.pid)
manager_process.kill()
manager_process.wait() |
Helper method to clean up DAG file processors to avoid leaving orphan processes. | def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK) |
Use multiple processes to parse and generate tasks for the DAGs in parallel. By processing them in separate processes we can get parallelism and isolation from potentially harmful user code. | def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
if self._async_mode:
self.log.debug("Starting DagFileProcessorManager in async mode")
self.start_in_async()
else:
self.log.debug("Starting DagFileProcessorManager in sync mode")
self.start_in_sync() |
Parse DAG files repeatedly in a standalone loop. | def start_in_async(self):
"""
Parse DAG files repeatedly in a standalone loop.
"""
while True:
loop_start_time = time.time()
if self._signal_conn.poll():
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
max_runs_reached,
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length)
time.sleep(sleep_length) |
Parse DAG files in a loop controlled by DagParsingSignal. Actual DAG parsing loop will run once upon receiving one agent heartbeat message and will report done when finished the loop. | def start_in_sync(self):
"""
Parse DAG files in a loop controlled by DagParsingSignal.
Actual DAG parsing loop will run once upon receiving one
agent heartbeat message and will report done when finished the loop.
"""
while True:
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
self.max_runs_reached(),
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
self.wait_until_finished()
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
break |
Refresh file paths from dag dir if we haven t done it for too long. | def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors") |
Occasionally print out stats about how fast the files are getting processed | def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow() |
Clears import errors for files that no longer exist. | def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit() |
Print out stats about how files are getting processed. | def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str) |
: param file_path: the path to the file that s being processed: type file_path: unicode: return: the PID of the process processing the given file or None if the specified file is not being processed: rtype: int | def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None |
: param file_path: the path to the file that s being processed: type file_path: unicode: return: the current runtime ( in seconds ) of the process that s processing the specified file or None if the file is not currently being processed | def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (timezone.utcnow() - self._processors[file_path].start_time)\
.total_seconds()
return None |
: param file_path: the path to the file that s being processed: type file_path: unicode: return: the start time of the process that s processing the specified file or None if the file is not currently being processed: rtype: datetime | def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None |
Update this with a new set of paths to DAG definition files. | def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors |
Sleeps until all the processors are done. | def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1) |
This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors. | def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
zombies = self._find_zombies()
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, zombies)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags |
Find zombie task instances which are tasks haven t heartbeated for too long.: return: Zombie task instances in SimpleTaskInstance format. | def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long.
:return: Zombie task instances in SimpleTaskInstance format.
"""
now = timezone.utcnow()
zombies = []
if (now - self._last_zombie_query_time).total_seconds() \
> self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
zombies.append(SimpleTaskInstance(ti))
return zombies |
: return: whether all file paths have been processed max_runs times | def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] < self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True |
Kill all child processes on exit since we don t want to leave them as orphaned. | def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait() |