repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
apache/airflow
DatastoreHook.lookup
def lookup(self, keys, read_consistency=None, transaction=None): conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency if transaction: body['transaction'] = transaction resp = (conn .projects() .lookup(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Lookup some entities by key.
def lookup(self, keys, read_consistency=None, transaction=None): """ Lookup some entities by key. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup :param keys: the keys to lookup. :type keys: list :param read_consistency: the read consistency to use. default, strong or eventual. Cannot be used with a transaction. :type read_consistency: str :param transaction: the transaction to use, if any. :type transaction: str :return: the response body of the lookup request. :rtype: dict """ conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency if transaction: body['transaction'] = transaction resp = (conn .projects() .lookup(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.rollback
def rollback(self, transaction): conn = self.get_conn() conn.projects().rollback( projectId=self.project_id, body={'transaction': transaction} ).execute(num_retries=self.num_retries)
Roll back a transaction.
def rollback(self, transaction): """ Roll back a transaction. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback :param transaction: the transaction to roll back. :type transaction: str """ conn = self.get_conn() conn.projects().rollback( projectId=self.project_id, body={'transaction': transaction} ).execute(num_retries=self.num_retries)
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.run_query
def run_query(self, body): conn = self.get_conn() resp = (conn .projects() .runQuery(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp['batch']
Run a query for entities.
def run_query(self, body): """ Run a query for entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery :param body: the body of the query request. :type body: dict :return: the batch of query results. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .runQuery(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp['batch']
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.get_operation
def get_operation(self, name): conn = self.get_conn() resp = (conn .projects() .operations() .get(name=name) .execute(num_retries=self.num_retries)) return resp
Gets the latest state of a long-running operation.
def get_operation(self, name): """ Gets the latest state of a long-running operation. .. seealso:: https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get :param name: the name of the operation resource. :type name: str :return: a resource operation instance. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .operations() .get(name=name) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.delete_operation
def delete_operation(self, name): conn = self.get_conn() resp = (conn .projects() .operations() .delete(name=name) .execute(num_retries=self.num_retries)) return resp
Deletes the long-running operation.
def delete_operation(self, name): """ Deletes the long-running operation. .. seealso:: https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete :param name: the name of the operation resource. :type name: str :return: none if successful. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .operations() .delete(name=name) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.poll_operation_until_done
def poll_operation_until_done(self, name, polling_interval_in_seconds): while True: result = self.get_operation(name) state = result['metadata']['common']['state'] if state == 'PROCESSING': self.log.info('Operation is processing. Re-polling state in {} seconds' .format(polling_interval_in_seconds)) time.sleep(polling_interval_in_seconds) else: return result
Poll backup operation state until it's completed.
def poll_operation_until_done(self, name, polling_interval_in_seconds): """ Poll backup operation state until it's completed. :param name: the name of the operation resource :type name: str :param polling_interval_in_seconds: The number of seconds to wait before calling another request. :type polling_interval_in_seconds: int :return: a resource operation instance. :rtype: dict """ while True: result = self.get_operation(name) state = result['metadata']['common']['state'] if state == 'PROCESSING': self.log.info('Operation is processing. Re-polling state in {} seconds' .format(polling_interval_in_seconds)) time.sleep(polling_interval_in_seconds) else: return result
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.export_to_storage_bucket
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None): admin_conn = self.get_conn() output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'outputUrlPrefix': output_uri_prefix, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .export(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Export entities from Cloud Datastore to Cloud Storage for backup.
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None): """ Export entities from Cloud Datastore to Cloud Storage for backup. .. note:: Keep in mind that this requests the Admin API not the Data API. .. seealso:: https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export :param bucket: The name of the Cloud Storage bucket. :type bucket: str :param namespace: The Cloud Storage namespace path. :type namespace: str :param entity_filter: Description of what data from the project is included in the export. :type entity_filter: dict :param labels: Client-assigned labels. :type labels: dict of str :return: a resource operation instance. :rtype: dict """ admin_conn = self.get_conn() output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'outputUrlPrefix': output_uri_prefix, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .export(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.import_from_storage_bucket
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None): admin_conn = self.get_conn() input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'inputUrl': input_url, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .import_(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Import a backup from Cloud Storage to Cloud Datastore.
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None): """ Import a backup from Cloud Storage to Cloud Datastore. .. note:: Keep in mind that this requests the Admin API not the Data API. .. seealso:: https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import :param bucket: The name of the Cloud Storage bucket. :type bucket: str :param file: the metadata file written by the projects.export operation. :type file: str :param namespace: The Cloud Storage namespace path. :type namespace: str :param entity_filter: specify which kinds/namespaces are to be imported. :type entity_filter: dict :param labels: Client-assigned labels. :type labels: dict of str :return: a resource operation instance. :rtype: dict """ admin_conn = self.get_conn() input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'inputUrl': input_url, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .import_(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py
apache/airflow
AwsSnsHook.publish_to_target
def publish_to_target(self, target_arn, message): conn = self.get_conn() messages = { 'default': message } return conn.publish( TargetArn=target_arn, Message=json.dumps(messages), MessageStructure='json' )
Publish a message to a topic or an endpoint.
def publish_to_target(self, target_arn, message): """ Publish a message to a topic or an endpoint. :param target_arn: either a TopicArn or an EndpointArn :type target_arn: str :param message: the default message you want to send :param message: str """ conn = self.get_conn() messages = { 'default': message } return conn.publish( TargetArn=target_arn, Message=json.dumps(messages), MessageStructure='json' )
airflow/contrib/hooks/aws_sns_hook.py
apache/airflow
get_hostname
def get_hostname(): try: callable_path = conf.get('core', 'hostname_callable') except AirflowConfigException: callable_path = None if not callable_path: return socket.getfqdn() module_path, attr_name = callable_path.split(':') module = importlib.import_module(module_path) callable = getattr(module, attr_name) return callable()
Fetch the hostname using the callable from the config or using `socket.getfqdn` as a fallback.
def get_hostname(): """ Fetch the hostname using the callable from the config or using `socket.getfqdn` as a fallback. """ # First we attempt to fetch the callable path from the config. try: callable_path = conf.get('core', 'hostname_callable') except AirflowConfigException: callable_path = None # Then we handle the case when the config is missing or empty. This is the # default behavior. if not callable_path: return socket.getfqdn() # Since we have a callable path, we try to import and run it next. module_path, attr_name = callable_path.split(':') module = importlib.import_module(module_path) callable = getattr(module, attr_name) return callable()
airflow/utils/net.py
apache/airflow
CloudNaturalLanguageHook.get_conn
def get_conn(self): if not self._conn: self._conn = LanguageServiceClient(credentials=self._get_credentials()) return self._conn
Retrieves connection to Cloud Natural Language service.
def get_conn(self): """ Retrieves connection to Cloud Natural Language service. :return: Cloud Natural Language service object :rtype: google.cloud.language_v1.LanguageServiceClient """ if not self._conn: self._conn = LanguageServiceClient(credentials=self._get_credentials()) return self._conn
airflow/contrib/hooks/gcp_natural_language_hook.py
apache/airflow
CloudNaturalLanguageHook.analyze_entities
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None): client = self.get_conn() return client.analyze_entities( document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata )
Finds named entities in the text along with entity types, salience, mentions for each entity, and other properties.
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None): """ Finds named entities in the text along with entity types, salience, mentions for each entity, and other properties. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or class google.cloud.language_v1.types.Document :param encoding_type: The encoding type used by the API to calculate offsets. :type encoding_type: google.cloud.language_v1.types.EncodingType :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse """ client = self.get_conn() return client.analyze_entities( document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata )
airflow/contrib/hooks/gcp_natural_language_hook.py
apache/airflow
CloudNaturalLanguageHook.annotate_text
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None): client = self.get_conn() return client.annotate_text( document=document, features=features, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata, )
A convenience method that provides all the features that analyzeSentiment, analyzeEntities, and analyzeSyntax provide in one call.
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None): """ A convenience method that provides all the features that analyzeSentiment, analyzeEntities, and analyzeSyntax provide in one call. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or google.cloud.language_v1.types.Document :param features: The enabled features. If a dict is provided, it must be of the same form as the protobuf message Features :type features: dict or google.cloud.language_v1.enums.Features :param encoding_type: The encoding type used by the API to calculate offsets. :type encoding_type: google.cloud.language_v1.types.EncodingType :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnnotateTextResponse """ client = self.get_conn() return client.annotate_text( document=document, features=features, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata, )
airflow/contrib/hooks/gcp_natural_language_hook.py
apache/airflow
CloudNaturalLanguageHook.classify_text
def classify_text(self, document, retry=None, timeout=None, metadata=None): client = self.get_conn() return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)
Classifies a document into categories.
def classify_text(self, document, retry=None, timeout=None, metadata=None): """ Classifies a document into categories. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or class google.cloud.language_v1.types.Document :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse """ client = self.get_conn() return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)
airflow/contrib/hooks/gcp_natural_language_hook.py
apache/airflow
get_template_field
def get_template_field(env, fullname): modname, classname = fullname.rsplit(".", 1) try: with mock(env.config.autodoc_mock_imports): mod = import_module(modname) except ImportError: raise RoleException("Error loading %s module." % (modname, )) clazz = getattr(mod, classname) if not clazz: raise RoleException("Error finding %s class in %s module." % (classname, modname)) template_fields = getattr(clazz, "template_fields") if not template_fields: raise RoleException( "Could not find the template fields for %s class in %s module." % (classname, modname) ) return list(template_fields)
Gets template fields for specific operator class.
def get_template_field(env, fullname): """ Gets template fields for specific operator class. :param fullname: Full path to operator class. For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`` :return: List of template field :rtype: list[str] """ modname, classname = fullname.rsplit(".", 1) try: with mock(env.config.autodoc_mock_imports): mod = import_module(modname) except ImportError: raise RoleException("Error loading %s module." % (modname, )) clazz = getattr(mod, classname) if not clazz: raise RoleException("Error finding %s class in %s module." % (classname, modname)) template_fields = getattr(clazz, "template_fields") if not template_fields: raise RoleException( "Could not find the template fields for %s class in %s module." % (classname, modname) ) return list(template_fields)
docs/exts/docroles.py
apache/airflow
dispose_orm
def dispose_orm(): log.debug("Disposing DB connection pool (PID %s)", os.getpid()) global engine global Session if Session: Session.remove() Session = None if engine: engine.dispose() engine = None
Properly close pooled database connections
def dispose_orm(): """ Properly close pooled database connections """ log.debug("Disposing DB connection pool (PID %s)", os.getpid()) global engine global Session if Session: Session.remove() Session = None if engine: engine.dispose() engine = None
airflow/settings.py
apache/airflow
prepare_classpath
def prepare_classpath(): if DAGS_FOLDER not in sys.path: sys.path.append(DAGS_FOLDER) config_path = os.path.join(AIRFLOW_HOME, 'config') if config_path not in sys.path: sys.path.append(config_path) if PLUGINS_FOLDER not in sys.path: sys.path.append(PLUGINS_FOLDER)
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
def prepare_classpath(): """ Ensures that certain subfolders of AIRFLOW_HOME are on the classpath """ if DAGS_FOLDER not in sys.path: sys.path.append(DAGS_FOLDER) # Add ./config/ for loading custom log parsers etc, or # airflow_local_settings etc. config_path = os.path.join(AIRFLOW_HOME, 'config') if config_path not in sys.path: sys.path.append(config_path) if PLUGINS_FOLDER not in sys.path: sys.path.append(PLUGINS_FOLDER)
airflow/settings.py
apache/airflow
CeleryQueueSensor._check_task_id
def _check_task_id(self, context): ti = context['ti'] celery_result = ti.xcom_pull(task_ids=self.target_task_id) return celery_result.ready()
Gets the returned Celery result from the Airflow task ID provided to the sensor, and returns True if the celery result has been finished execution.
def _check_task_id(self, context): """ Gets the returned Celery result from the Airflow task ID provided to the sensor, and returns True if the celery result has been finished execution. :param context: Airflow's execution context :type context: dict :return: True if task has been executed, otherwise False :rtype: bool """ ti = context['ti'] celery_result = ti.xcom_pull(task_ids=self.target_task_id) return celery_result.ready()
airflow/contrib/sensors/celery_queue_sensor.py
apache/airflow
alchemy_to_dict
def alchemy_to_dict(obj): if not obj: return None d = {} for c in obj.__table__.columns: value = getattr(obj, c.name) if type(value) == datetime: value = value.isoformat() d[c.name] = value return d
Transforms a SQLAlchemy model instance into a dictionary
def alchemy_to_dict(obj): """ Transforms a SQLAlchemy model instance into a dictionary """ if not obj: return None d = {} for c in obj.__table__.columns: value = getattr(obj, c.name) if type(value) == datetime: value = value.isoformat() d[c.name] = value return d
airflow/utils/helpers.py
apache/airflow
chunks
def chunks(items, chunk_size): if chunk_size <= 0: raise ValueError('Chunk size must be a positive integer') for i in range(0, len(items), chunk_size): yield items[i:i + chunk_size]
Yield successive chunks of a given size from a list of items
def chunks(items, chunk_size): """ Yield successive chunks of a given size from a list of items """ if chunk_size <= 0: raise ValueError('Chunk size must be a positive integer') for i in range(0, len(items), chunk_size): yield items[i:i + chunk_size]
airflow/utils/helpers.py
apache/airflow
reduce_in_chunks
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0): if len(iterable) == 0: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer)
Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0): """ Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer """ if len(iterable) == 0: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer)
airflow/utils/helpers.py
apache/airflow
chain
def chain(*tasks): for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
Given a number of tasks, builds a dependency chain. chain(task_1, task_2, task_3, task_4) is equivalent to task_1.set_downstream(task_2) task_2.set_downstream(task_3) task_3.set_downstream(task_4)
def chain(*tasks): """ Given a number of tasks, builds a dependency chain. chain(task_1, task_2, task_3, task_4) is equivalent to task_1.set_downstream(task_2) task_2.set_downstream(task_3) task_3.set_downstream(task_4) """ for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
airflow/utils/helpers.py
apache/airflow
render_log_filename
def render_log_filename(ti, try_number, filename_template): filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number)
Given task instance, try_number, filename_template, return the rendered log filename
def render_log_filename(ti, try_number, filename_template): """ Given task instance, try_number, filename_template, return the rendered log filename :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number)
airflow/utils/helpers.py
apache/airflow
DataProcHook.wait
def wait(self, operation): submitted = _DataProcOperation(self.get_conn(), operation, self.num_retries) submitted.wait_for_done()
Awaits for Google Cloud Dataproc Operation to complete.
def wait(self, operation): """Awaits for Google Cloud Dataproc Operation to complete.""" submitted = _DataProcOperation(self.get_conn(), operation, self.num_retries) submitted.wait_for_done()
airflow/contrib/hooks/gcp_dataproc_hook.py
apache/airflow
_handle_databricks_operator_execution
def _handle_databricks_operator_execution(operator, hook, log, context): if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id) log.info('Run submitted with run_id: %s', operator.run_id) run_page_url = hook.get_run_page_url(operator.run_id) if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url) log.info('View run status, Spark UI, and logs at %s', run_page_url) while True: run_state = hook.get_run_state(operator.run_id) if run_state.is_terminal: if run_state.is_successful: log.info('%s completed successfully.', operator.task_id) log.info('View run status, Spark UI, and logs at %s', run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=operator.task_id, s=run_state) raise AirflowException(error_message) else: log.info('%s in run state: %s', operator.task_id, run_state) log.info('View run status, Spark UI, and logs at %s', run_page_url) log.info('Sleeping for %s seconds.', operator.polling_period_seconds) time.sleep(operator.polling_period_seconds)
Handles the Airflow + Databricks lifecycle logic for a Databricks operator
def _handle_databricks_operator_execution(operator, hook, log, context): """ Handles the Airflow + Databricks lifecycle logic for a Databricks operator :param operator: Databricks operator being handled :param context: Airflow context """ if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id) log.info('Run submitted with run_id: %s', operator.run_id) run_page_url = hook.get_run_page_url(operator.run_id) if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url) log.info('View run status, Spark UI, and logs at %s', run_page_url) while True: run_state = hook.get_run_state(operator.run_id) if run_state.is_terminal: if run_state.is_successful: log.info('%s completed successfully.', operator.task_id) log.info('View run status, Spark UI, and logs at %s', run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=operator.task_id, s=run_state) raise AirflowException(error_message) else: log.info('%s in run state: %s', operator.task_id, run_state) log.info('View run status, Spark UI, and logs at %s', run_page_url) log.info('Sleeping for %s seconds.', operator.polling_period_seconds) time.sleep(operator.polling_period_seconds)
airflow/contrib/operators/databricks_operator.py
apache/airflow
PigCliHook.run_cli
def run_cli(self, pig, verbose=True): with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir) as f: f.write(pig.encode('utf-8')) f.flush() fname = f.name pig_bin = 'pig' cmd_extra = [] pig_cmd = [pig_bin, '-f', fname] + cmd_extra if self.pig_properties: pig_properties_list = self.pig_properties.split() pig_cmd.extend(pig_properties_list) if verbose: self.log.info("%s", " ".join(pig_cmd)) sp = subprocess.Popen( pig_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True) self.sp = sp stdout = '' for line in iter(sp.stdout.readline, b''): stdout += line.decode('utf-8') if verbose: self.log.info(line.strip()) sp.wait() if sp.returncode: raise AirflowException(stdout) return stdout
Run an pig script using the pig cli
def run_cli(self, pig, verbose=True): """ Run an pig script using the pig cli >>> ph = PigCliHook() >>> result = ph.run_cli("ls /;") >>> ("hdfs://" in result) True """ with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir) as f: f.write(pig.encode('utf-8')) f.flush() fname = f.name pig_bin = 'pig' cmd_extra = [] pig_cmd = [pig_bin, '-f', fname] + cmd_extra if self.pig_properties: pig_properties_list = self.pig_properties.split() pig_cmd.extend(pig_properties_list) if verbose: self.log.info("%s", " ".join(pig_cmd)) sp = subprocess.Popen( pig_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True) self.sp = sp stdout = '' for line in iter(sp.stdout.readline, b''): stdout += line.decode('utf-8') if verbose: self.log.info(line.strip()) sp.wait() if sp.returncode: raise AirflowException(stdout) return stdout
airflow/hooks/pig_hook.py
apache/airflow
fetch_celery_task_state
def fetch_celery_task_state(celery_task): try: with timeout(seconds=2): res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool.
def fetch_celery_task_state(celery_task): """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param celery_task: a tuple of the Celery task key and the async Celery object used to fetch the task's state :type celery_task: tuple(str, celery.result.AsyncResult) :return: a tuple of the Celery task key and the Celery state of the task :rtype: tuple[str, str] """ try: with timeout(seconds=2): # Accessing state property of celery task will make actual network request # to get the current state of the task. res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
airflow/executors/celery_executor.py
apache/airflow
CeleryExecutor._num_tasks_per_send_process
def _num_tasks_per_send_process(self, to_send_count): return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
How many Celery tasks should each worker process send.
def _num_tasks_per_send_process(self, to_send_count): """ How many Celery tasks should each worker process send. :return: Number of tasks that should be sent per process :rtype: int """ return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
airflow/executors/celery_executor.py
apache/airflow
CeleryExecutor._num_tasks_per_fetch_process
def _num_tasks_per_fetch_process(self): return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
How many Celery tasks should be sent to each worker process.
def _num_tasks_per_fetch_process(self): """ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int """ return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
airflow/executors/celery_executor.py
apache/airflow
Variable.setdefault
def setdefault(cls, key, default, deserialize_json=False): obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it.
def setdefault(cls, key, default, deserialize_json=False): """ Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it. :param key: Dict key for this Variable :type key: str :param default: Default value to set and return if the variable isn't already in the DB :type default: Mixed :param deserialize_json: Store this as a JSON encoded value in the DB and un-encode it when retrieving a value :return: Mixed """ obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
airflow/models/variable.py
apache/airflow
MLEngineHook.create_job
def create_job(self, project_id, job, use_existing_job_fn=None): request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
Launches a MLEngine job and wait for it to reach a terminal state.
def create_job(self, project_id, job, use_existing_job_fn=None): """ Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook._get_job
def _get_job(self, project_id, job_id): job_name = 'projects/{}/jobs/{}'.format(project_id, job_id) request = self._mlengine.projects().jobs().get(name=job_name) while True: try: return request.execute() except HttpError as e: if e.resp.status == 429: time.sleep(30) else: self.log.error('Failed to get MLEngine job: {}'.format(e)) raise
Gets a MLEngine job based on the job name.
def _get_job(self, project_id, job_id): """ Gets a MLEngine job based on the job name. :return: MLEngine job object if succeed. :rtype: dict Raises: googleapiclient.errors.HttpError: if HTTP error is returned from server """ job_name = 'projects/{}/jobs/{}'.format(project_id, job_id) request = self._mlengine.projects().jobs().get(name=job_name) while True: try: return request.execute() except HttpError as e: if e.resp.status == 429: # polling after 30 seconds when quota failure occurs time.sleep(30) else: self.log.error('Failed to get MLEngine job: {}'.format(e)) raise
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook._wait_for_job_done
def _wait_for_job_done(self, project_id, job_id, interval=30): if interval <= 0: raise ValueError("Interval must be > 0") while True: job = self._get_job(project_id, job_id) if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']: return job time.sleep(interval)
Waits for the Job to reach a terminal state. This method will periodically check the job state until the job reach a terminal state.
def _wait_for_job_done(self, project_id, job_id, interval=30): """ Waits for the Job to reach a terminal state. This method will periodically check the job state until the job reach a terminal state. Raises: googleapiclient.errors.HttpError: if HTTP error is returned when getting the job """ if interval <= 0: raise ValueError("Interval must be > 0") while True: job = self._get_job(project_id, job_id) if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']: return job time.sleep(interval)
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.create_version
def create_version(self, project_id, model_name, version_spec): parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
Creates the Version on Google Cloud ML Engine.
def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.set_default_version
def set_default_version(self, project_id, model_name, version_name): full_version_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) request = self._mlengine.projects().models().versions().setDefault( name=full_version_name, body={}) try: response = request.execute() self.log.info('Successfully set version: %s to default', response) return response except HttpError as e: self.log.error('Something went wrong: %s', e) raise
Sets a version to be the default. Blocks until finished.
def set_default_version(self, project_id, model_name, version_name): """ Sets a version to be the default. Blocks until finished. """ full_version_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) request = self._mlengine.projects().models().versions().setDefault( name=full_version_name, body={}) try: response = request.execute() self.log.info('Successfully set version: %s to default', response) return response except HttpError as e: self.log.error('Something went wrong: %s', e) raise
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.list_versions
def list_versions(self, project_id, model_name): result = [] full_parent_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageSize=100) response = request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) while next_page_token is not None: next_request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageToken=next_page_token, pageSize=100) response = next_request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) time.sleep(5) return result
Lists all available versions of a model. Blocks until finished.
def list_versions(self, project_id, model_name): """ Lists all available versions of a model. Blocks until finished. """ result = [] full_parent_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageSize=100) response = request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) while next_page_token is not None: next_request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageToken=next_page_token, pageSize=100) response = next_request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) time.sleep(5) return result
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.delete_version
def delete_version(self, project_id, model_name, version_name): full_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) delete_request = self._mlengine.projects().models().versions().delete( name=full_name) response = delete_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
Deletes the given version of a model. Blocks until finished.
def delete_version(self, project_id, model_name, version_name): """ Deletes the given version of a model. Blocks until finished. """ full_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) delete_request = self._mlengine.projects().models().versions().delete( name=full_name) response = delete_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.create_model
def create_model(self, project_id, model): if not model['name']: raise ValueError("Model name must be provided and " "could not be an empty string") project = 'projects/{}'.format(project_id) request = self._mlengine.projects().models().create( parent=project, body=model) return request.execute()
Create a Model. Blocks until finished.
def create_model(self, project_id, model): """ Create a Model. Blocks until finished. """ if not model['name']: raise ValueError("Model name must be provided and " "could not be an empty string") project = 'projects/{}'.format(project_id) request = self._mlengine.projects().models().create( parent=project, body=model) return request.execute()
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
MLEngineHook.get_model
def get_model(self, project_id, model_name): if not model_name: raise ValueError("Model name must be provided and " "it could not be an empty string") full_model_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().get(name=full_model_name) try: return request.execute() except HttpError as e: if e.resp.status == 404: self.log.error('Model was not found: %s', e) return None raise
Gets a Model. Blocks until finished.
def get_model(self, project_id, model_name): """ Gets a Model. Blocks until finished. """ if not model_name: raise ValueError("Model name must be provided and " "it could not be an empty string") full_model_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().get(name=full_model_name) try: return request.execute() except HttpError as e: if e.resp.status == 404: self.log.error('Model was not found: %s', e) return None raise
airflow/contrib/hooks/gcp_mlengine_hook.py
apache/airflow
AwsDynamoDBHook.write_batch_data
def write_batch_data(self, items): dynamodb_conn = self.get_conn() try: table = dynamodb_conn.Table(self.table_name) with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch: for item in items: batch.put_item(Item=item) return True except Exception as general_error: raise AirflowException( 'Failed to insert items in dynamodb, error: {error}'.format( error=str(general_error) ) )
Write batch items to dynamodb table with provisioned throughout capacity.
def write_batch_data(self, items): """ Write batch items to dynamodb table with provisioned throughout capacity. """ dynamodb_conn = self.get_conn() try: table = dynamodb_conn.Table(self.table_name) with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch: for item in items: batch.put_item(Item=item) return True except Exception as general_error: raise AirflowException( 'Failed to insert items in dynamodb, error: {error}'.format( error=str(general_error) ) )
airflow/contrib/hooks/aws_dynamodb_hook.py
apache/airflow
_integrate_plugins
def _integrate_plugins(): from airflow.plugins_manager import executors_modules for executors_module in executors_modules: sys.modules[executors_module.__name__] = executors_module globals()[executors_module._name] = executors_module
Integrate plugins to the context.
def _integrate_plugins(): """Integrate plugins to the context.""" from airflow.plugins_manager import executors_modules for executors_module in executors_modules: sys.modules[executors_module.__name__] = executors_module globals()[executors_module._name] = executors_module
airflow/executors/__init__.py
apache/airflow
get_default_executor
def get_default_executor(): global DEFAULT_EXECUTOR if DEFAULT_EXECUTOR is not None: return DEFAULT_EXECUTOR executor_name = configuration.conf.get('core', 'EXECUTOR') DEFAULT_EXECUTOR = _get_executor(executor_name) log = LoggingMixin().log log.info("Using executor %s", executor_name) return DEFAULT_EXECUTOR
Creates a new instance of the configured executor if none exists and returns it
def get_default_executor(): """Creates a new instance of the configured executor if none exists and returns it""" global DEFAULT_EXECUTOR if DEFAULT_EXECUTOR is not None: return DEFAULT_EXECUTOR executor_name = configuration.conf.get('core', 'EXECUTOR') DEFAULT_EXECUTOR = _get_executor(executor_name) log = LoggingMixin().log log.info("Using executor %s", executor_name) return DEFAULT_EXECUTOR
airflow/executors/__init__.py
apache/airflow
_get_executor
def _get_executor(executor_name): if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() else: _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins
def _get_executor(executor_name): """ Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
airflow/executors/__init__.py
apache/airflow
SegmentHook.on_error
def on_error(self, error, items): self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
Handles error callbacks when using Segment with segment_debug_mode set to True
def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
airflow/contrib/hooks/segment_hook.py
apache/airflow
trigger_dag
def trigger_dag(dag_id): data = request.get_json(force=True) run_id = None if 'run_id' in data: run_id = data['run_id'] conf = None if 'conf' in data: conf = data['conf'] execution_date = None if 'execution_date' in data and data['execution_date'] is not None: execution_date = data['execution_date'] try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response if getattr(g, 'user', None): _log.info("User %s created %s", g.user, dr) response = jsonify(message="Created {}".format(dr)) return response
Trigger a new dag run for a Dag with an execution date of now unless specified in the data.
def trigger_dag(dag_id): """ Trigger a new dag run for a Dag with an execution date of now unless specified in the data. """ data = request.get_json(force=True) run_id = None if 'run_id' in data: run_id = data['run_id'] conf = None if 'conf' in data: conf = data['conf'] execution_date = None if 'execution_date' in data and data['execution_date'] is not None: execution_date = data['execution_date'] # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response if getattr(g, 'user', None): _log.info("User %s created %s", g.user, dr) response = jsonify(message="Created {}".format(dr)) return response
airflow/www/api/experimental/endpoints.py
apache/airflow
delete_dag
def delete_dag(dag_id): try: count = delete.delete_dag(dag_id) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(message="Removed {} record(s)".format(count), count=count)
Delete all DB records related to the specified Dag.
def delete_dag(dag_id): """ Delete all DB records related to the specified Dag. """ try: count = delete.delete_dag(dag_id) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(message="Removed {} record(s)".format(count), count=count)
airflow/www/api/experimental/endpoints.py
apache/airflow
get_pools
def get_pools(): try: pools = pool_api.get_pools() except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify([p.to_json() for p in pools])
Get all pools.
def get_pools(): """Get all pools.""" try: pools = pool_api.get_pools() except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify([p.to_json() for p in pools])
airflow/www/api/experimental/endpoints.py
apache/airflow
create_pool
def create_pool(): params = request.get_json(force=True) try: pool = pool_api.create_pool(**params) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
Create a pool.
def create_pool(): """Create a pool.""" params = request.get_json(force=True) try: pool = pool_api.create_pool(**params) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
airflow/www/api/experimental/endpoints.py
apache/airflow
delete_pool
def delete_pool(name): try: pool = pool_api.delete_pool(name=name) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
Delete pool.
def delete_pool(name): """Delete pool.""" try: pool = pool_api.delete_pool(name=name) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
airflow/www/api/experimental/endpoints.py
apache/airflow
AzureContainerInstanceHook.create_or_update
def create_or_update(self, resource_group, name, container_group): self.connection.container_groups.create_or_update(resource_group, name, container_group)
Create a new container group
def create_or_update(self, resource_group, name, container_group): """ Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
AzureContainerInstanceHook.get_state_exitcode_details
def get_state_exitcode_details(self, resource_group, name): current_state = self._get_instance_view(resource_group, name).current_state return (current_state.state, current_state.exit_code, current_state.detail_status)
Get the state and exitcode of a container group
def get_state_exitcode_details(self, resource_group, name): """ Get the state and exitcode of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A tuple with the state, exitcode, and details. If the exitcode is unknown 0 is returned. :rtype: tuple(state,exitcode,details) """ current_state = self._get_instance_view(resource_group, name).current_state return (current_state.state, current_state.exit_code, current_state.detail_status)
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
AzureContainerInstanceHook.get_messages
def get_messages(self, resource_group, name): instance_view = self._get_instance_view(resource_group, name) return [event.message for event in instance_view.events]
Get the messages of a container group
def get_messages(self, resource_group, name): """ Get the messages of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A list of the event messages :rtype: list[str] """ instance_view = self._get_instance_view(resource_group, name) return [event.message for event in instance_view.events]
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
AzureContainerInstanceHook.get_logs
def get_logs(self, resource_group, name, tail=1000): logs = self.connection.container.list_logs(resource_group, name, name, tail=tail) return logs.content.splitlines(True)
Get the tail from logs of a container group
def get_logs(self, resource_group, name, tail=1000): """ Get the tail from logs of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param tail: the size of the tail :type tail: int :return: A list of log messages :rtype: list[str] """ logs = self.connection.container.list_logs(resource_group, name, name, tail=tail) return logs.content.splitlines(True)
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
AzureContainerInstanceHook.delete
def delete(self, resource_group, name): self.connection.container_groups.delete(resource_group, name)
Delete a container group
def delete(self, resource_group, name): """ Delete a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ self.connection.container_groups.delete(resource_group, name)
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
AzureContainerInstanceHook.exists
def exists(self, resource_group, name): for container in self.connection.container_groups.list_by_resource_group(resource_group): if container.name == name: return True return False
Test if a container group exists
def exists(self, resource_group, name): """ Test if a container group exists :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ for container in self.connection.container_groups.list_by_resource_group(resource_group): if container.name == name: return True return False
airflow/contrib/hooks/azure_container_instance_hook.py
apache/airflow
apply_defaults
def apply_defaults(func): sig_cache = signature(func) non_optional_args = { name for (name, param) in sig_cache.parameters.items() if param.default == param.empty and param.name != 'self' and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)} @wraps(func) def wrapper(*args, **kwargs): if len(args) > 1: raise AirflowException( "Use keyword arguments when initializing operators") dag_args = {} dag_params = {} dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG if dag: dag_args = copy(dag.default_args) or {} dag_params = copy(dag.params) or {} params = {} if 'params' in kwargs: params = kwargs['params'] dag_params.update(params) default_args = {} if 'default_args' in kwargs: default_args = kwargs['default_args'] if 'params' in default_args: dag_params.update(default_args['params']) del default_args['params'] dag_args.update(default_args) default_args = dag_args for arg in sig_cache.parameters: if arg not in kwargs and arg in default_args: kwargs[arg] = default_args[arg] missing_args = list(non_optional_args - set(kwargs)) if missing_args: msg = "Argument {0} is required".format(missing_args) raise AirflowException(msg) kwargs['params'] = dag_params result = func(*args, **kwargs) return result return wrapper
Function decorator that Looks for an argument named "default_args", and fills the unspecified arguments from it. Since python2.
def apply_defaults(func): """ Function decorator that Looks for an argument named "default_args", and fills the unspecified arguments from it. Since python2.* isn't clear about which arguments are missing when calling a function, and that this can be quite confusing with multi-level inheritance and argument defaults, this decorator also alerts with specific information about the missing arguments. """ # Cache inspect.signature for the wrapper closure to avoid calling it # at every decorated invocation. This is separate sig_cache created # per decoration, i.e. each function decorated using apply_defaults will # have a different sig_cache. sig_cache = signature(func) non_optional_args = { name for (name, param) in sig_cache.parameters.items() if param.default == param.empty and param.name != 'self' and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)} @wraps(func) def wrapper(*args, **kwargs): if len(args) > 1: raise AirflowException( "Use keyword arguments when initializing operators") dag_args = {} dag_params = {} dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG if dag: dag_args = copy(dag.default_args) or {} dag_params = copy(dag.params) or {} params = {} if 'params' in kwargs: params = kwargs['params'] dag_params.update(params) default_args = {} if 'default_args' in kwargs: default_args = kwargs['default_args'] if 'params' in default_args: dag_params.update(default_args['params']) del default_args['params'] dag_args.update(default_args) default_args = dag_args for arg in sig_cache.parameters: if arg not in kwargs and arg in default_args: kwargs[arg] = default_args[arg] missing_args = list(non_optional_args - set(kwargs)) if missing_args: msg = "Argument {0} is required".format(missing_args) raise AirflowException(msg) kwargs['params'] = dag_params result = func(*args, **kwargs) return result return wrapper
airflow/utils/decorators.py
apache/airflow
HiveToDruidTransfer.construct_ingest_query
def construct_ingest_query(self, static_path, columns): num_shards = self.num_shards target_partition_size = self.target_partition_size if self.target_partition_size == -1: if self.num_shards == -1: target_partition_size = DEFAULT_TARGET_PARTITION_SIZE else: num_shards = -1 metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count'] dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": self.metric_spec, "granularitySpec": { "queryGranularity": self.query_granularity, "intervals": self.intervals, "type": "uniform", "segmentGranularity": self.segment_granularity, }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, "spatialDimensions": [] }, "timestampSpec": { "column": self.ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": self.druid_datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress": "false", "mapreduce.output.fileoutputformat.compress": "false", }, "partitionsSpec": { "type": "hashed", "targetPartitionSize": target_partition_size, "numShards": num_shards, }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if self.job_properties: ingest_query_dict['spec']['tuningConfig']['jobProperties'] \ .update(self.job_properties) if self.hadoop_dependency_coordinates: ingest_query_dict['hadoopDependencyCoordinates'] \ = self.hadoop_dependency_coordinates return ingest_query_dict
Builds an ingest query for an HDFS TSV load.
def construct_ingest_query(self, static_path, columns): """ Builds an ingest query for an HDFS TSV load. :param static_path: The path on hdfs where the data is :type static_path: str :param columns: List of all the columns that are available :type columns: list """ # backward compatibility for num_shards, # but target_partition_size is the default setting # and overwrites the num_shards num_shards = self.num_shards target_partition_size = self.target_partition_size if self.target_partition_size == -1: if self.num_shards == -1: target_partition_size = DEFAULT_TARGET_PARTITION_SIZE else: num_shards = -1 metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count'] # Take all the columns, which are not the time dimension # or a metric, as the dimension columns dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": self.metric_spec, "granularitySpec": { "queryGranularity": self.query_granularity, "intervals": self.intervals, "type": "uniform", "segmentGranularity": self.segment_granularity, }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, # list of names "spatialDimensions": [] }, "timestampSpec": { "column": self.ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": self.druid_datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress": "false", "mapreduce.output.fileoutputformat.compress": "false", }, "partitionsSpec": { "type": "hashed", "targetPartitionSize": target_partition_size, "numShards": num_shards, }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if self.job_properties: ingest_query_dict['spec']['tuningConfig']['jobProperties'] \ .update(self.job_properties) if self.hadoop_dependency_coordinates: ingest_query_dict['hadoopDependencyCoordinates'] \ = self.hadoop_dependency_coordinates return ingest_query_dict
airflow/operators/hive_to_druid.py
apache/airflow
RedisPubSubSensor.poke
def poke(self, context): self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels) message = self.pubsub.get_message() self.log.info('Message %s from channel %s', message, self.channels) if message and message['type'] == 'message': context['ti'].xcom_push(key='message', value=message) self.pubsub.unsubscribe(self.channels) return True return False
Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}``
def poke(self, context): """ Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not """ self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels) message = self.pubsub.get_message() self.log.info('Message %s from channel %s', message, self.channels) # Process only message types if message and message['type'] == 'message': context['ti'].xcom_push(key='message', value=message) self.pubsub.unsubscribe(self.channels) return True return False
airflow/contrib/sensors/redis_pub_sub_sensor.py
apache/airflow
DagRun.get_previous_dagrun
def get_previous_dagrun(self, session=None): return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date < self.execution_date ).order_by( DagRun.execution_date.desc() ).first()
The previous DagRun, if there is one
def get_previous_dagrun(self, session=None): """The previous DagRun, if there is one""" return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date < self.execution_date ).order_by( DagRun.execution_date.desc() ).first()
airflow/models/dagrun.py
apache/airflow
DagRun.get_previous_scheduled_dagrun
def get_previous_scheduled_dagrun(self, session=None): dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first()
The previous, SCHEDULED DagRun, if there is one
def get_previous_scheduled_dagrun(self, session=None): """The previous, SCHEDULED DagRun, if there is one""" dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first()
airflow/models/dagrun.py
apache/airflow
DagRun.update_state
def update_state(self, session=None): dag = self.get_dag() tis = self.get_task_instances(session=session) self.log.debug("Updating state for %s considering %s task(s)", self, len(tis)) for ti in list(tis): if ti.state == State.REMOVED: tis.remove(ti) else: ti.task = dag.get_task(ti.task_id) start_dttm = timezone.utcnow() unfinished_tasks = self.get_task_instances( state=State.unfinished(), session=session ) none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) if unfinished_tasks and none_depends_on_past and none_task_concurrency: no_dependencies_met = True for ut in unfinished_tasks: old_state = ut.state deps_met = ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True), session=session) if deps_met or old_state != ut.current_state(session=session): no_dependencies_met = False break duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000 Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) root_ids = [t.task_id for t in dag.roots] roots = [t for t in tis if t.task_id in root_ids] if (not unfinished_tasks and any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED) for r in roots): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and no_dependencies_met): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() session.merge(self) session.commit() return self.state
Determines the overall state of the DagRun based on the state of its TaskInstances.
def update_state(self, session=None): """ Determines the overall state of the DagRun based on the state of its TaskInstances. :return: State """ dag = self.get_dag() tis = self.get_task_instances(session=session) self.log.debug("Updating state for %s considering %s task(s)", self, len(tis)) for ti in list(tis): # skip in db? if ti.state == State.REMOVED: tis.remove(ti) else: ti.task = dag.get_task(ti.task_id) # pre-calculate # db is faster start_dttm = timezone.utcnow() unfinished_tasks = self.get_task_instances( state=State.unfinished(), session=session ) none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) # small speed up if unfinished_tasks and none_depends_on_past and none_task_concurrency: # todo: this can actually get pretty slow: one task costs between 0.01-015s no_dependencies_met = True for ut in unfinished_tasks: # We need to flag upstream and check for changes because upstream # failures/re-schedules can result in deadlock false positives old_state = ut.state deps_met = ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True), session=session) if deps_met or old_state != ut.current_state(session=session): no_dependencies_met = False break duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000 Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) root_ids = [t.task_id for t in dag.roots] roots = [t for t in tis if t.task_id in root_ids] # if all roots finished and at least one failed, the run failed if (not unfinished_tasks and any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) # if all roots succeeded and no unfinished tasks, the run succeeded elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED) for r in roots): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) # if *all tasks* are deadlocked, the run failed elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and no_dependencies_met): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) # finally, if the roots aren't done, the dag is still running else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() # todo: determine we want to use with_for_update to make sure to lock the run session.merge(self) session.commit() return self.state
airflow/models/dagrun.py
apache/airflow
DagRun.verify_integrity
def verify_integrity(self, session=None): from airflow.models.taskinstance import TaskInstance dag = self.get_dag() tis = self.get_task_instances(session=session) task_ids = [] for ti in tis: task_ids.append(ti.task_id) task = None try: task = dag.get_task(ti.task_id) except AirflowException: if ti.state == State.REMOVED: pass elif self.state is not State.RUNNING and not dag.partial: self.log.warning("Failed to get task '{}' for dag '{}'. " "Marking it as removed.".format(ti, dag)) Stats.incr( "task_removed_from_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.REMOVED is_task_in_dag = task is not None should_restore_task = is_task_in_dag and ti.state == State.REMOVED if should_restore_task: self.log.info("Restoring task '{}' which was previously " "removed from DAG '{}'".format(ti, dag)) Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.NONE for task in six.itervalues(dag.task_dict): if task.start_date > self.execution_date and not self.is_backfill: continue if task.task_id not in task_ids: Stats.incr( "task_instance_created-{}".format(task.__class__.__name__), 1, 1) ti = TaskInstance(task, self.execution_date) session.add(ti) session.commit()
Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required.
def verify_integrity(self, session=None): """ Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required. """ from airflow.models.taskinstance import TaskInstance # Avoid circular import dag = self.get_dag() tis = self.get_task_instances(session=session) # check for removed or restored tasks task_ids = [] for ti in tis: task_ids.append(ti.task_id) task = None try: task = dag.get_task(ti.task_id) except AirflowException: if ti.state == State.REMOVED: pass # ti has already been removed, just ignore it elif self.state is not State.RUNNING and not dag.partial: self.log.warning("Failed to get task '{}' for dag '{}'. " "Marking it as removed.".format(ti, dag)) Stats.incr( "task_removed_from_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.REMOVED is_task_in_dag = task is not None should_restore_task = is_task_in_dag and ti.state == State.REMOVED if should_restore_task: self.log.info("Restoring task '{}' which was previously " "removed from DAG '{}'".format(ti, dag)) Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.NONE # check for missing tasks for task in six.itervalues(dag.task_dict): if task.start_date > self.execution_date and not self.is_backfill: continue if task.task_id not in task_ids: Stats.incr( "task_instance_created-{}".format(task.__class__.__name__), 1, 1) ti = TaskInstance(task, self.execution_date) session.add(ti) session.commit()
airflow/models/dagrun.py
apache/airflow
jenkins_request_with_headers
def jenkins_request_with_headers(jenkins_server, req): try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( "Error communicating with server[%s]: " "empty response" % jenkins_server.server) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: if e.code in [401, 403, 500]: raise JenkinsException( 'Error in request. ' + 'Possibly authentication failed [%s]: %s' % ( e.code, e.msg) ) elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException('Error in request: %s' % e) except URLError as e: if str(e.reason) == "timed out": raise jenkins.TimeoutException('Error in request: %s' % e.reason) raise JenkinsException('Error in request: %s' % e.reason)
We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python-jenkins library with just the return call changed
def jenkins_request_with_headers(jenkins_server, req): """ We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python-jenkins library with just the return call changed :param jenkins_server: The server to query :param req: The request to execute :return: Dict containing the response body (key body) and the headers coming along (headers) """ try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( "Error communicating with server[%s]: " "empty response" % jenkins_server.server) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: # Jenkins's funky authentication means its nigh impossible to # distinguish errors. if e.code in [401, 403, 500]: # six.moves.urllib.error.HTTPError provides a 'reason' # attribute for all python version except for ver 2.6 # Falling back to HTTPError.msg since it contains the # same info as reason raise JenkinsException( 'Error in request. ' + 'Possibly authentication failed [%s]: %s' % ( e.code, e.msg) ) elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException('Error in request: %s' % e) except URLError as e: # python 2.6 compatibility to ensure same exception raised # since URLError wraps a socket timeout on python 2.6. if str(e.reason) == "timed out": raise jenkins.TimeoutException('Error in request: %s' % e.reason) raise JenkinsException('Error in request: %s' % e.reason)
airflow/contrib/operators/jenkins_job_trigger_operator.py
apache/airflow
conditionally_trigger
def conditionally_trigger(context, dag_run_obj): c_p = context['params']['condition_param'] print("Controller DAG : conditionally_trigger = {}".format(c_p)) if context['params']['condition_param']: dag_run_obj.payload = {'message': context['params']['message']} pp.pprint(dag_run_obj.payload) return dag_run_obj
This function decides whether or not to Trigger the remote DAG
def conditionally_trigger(context, dag_run_obj): """This function decides whether or not to Trigger the remote DAG""" c_p = context['params']['condition_param'] print("Controller DAG : conditionally_trigger = {}".format(c_p)) if context['params']['condition_param']: dag_run_obj.payload = {'message': context['params']['message']} pp.pprint(dag_run_obj.payload) return dag_run_obj
airflow/example_dags/example_trigger_controller_dag.py
apache/airflow
DatadogHook.send_metric
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None): response = api.Metric.send( metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval) self.validate_response(response) return response
Sends a single datapoint metric to DataDog
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None): """ Sends a single datapoint metric to DataDog :param metric_name: The name of the metric :type metric_name: str :param datapoint: A single integer or float related to the metric :type datapoint: int or float :param tags: A list of tags associated with the metric :type tags: list :param type_: Type of your metric: gauge, rate, or count :type type_: str :param interval: If the type of the metric is rate or count, define the corresponding interval :type interval: int """ response = api.Metric.send( metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval) self.validate_response(response) return response
airflow/contrib/hooks/datadog_hook.py
apache/airflow
DatadogHook.query_metric
def query_metric(self, query, from_seconds_ago, to_seconds_ago): now = int(time.time()) response = api.Metric.query( start=now - from_seconds_ago, end=now - to_seconds_ago, query=query) self.validate_response(response) return response
Queries datadog for a specific metric, potentially with some function applied to it and returns the results.
def query_metric(self, query, from_seconds_ago, to_seconds_ago): """ Queries datadog for a specific metric, potentially with some function applied to it and returns the results. :param query: The datadog query to execute (see datadog docs) :type query: str :param from_seconds_ago: How many seconds ago to start querying for. :type from_seconds_ago: int :param to_seconds_ago: Up to how many seconds ago to query for. :type to_seconds_ago: int """ now = int(time.time()) response = api.Metric.query( start=now - from_seconds_ago, end=now - to_seconds_ago, query=query) self.validate_response(response) return response
airflow/contrib/hooks/datadog_hook.py
apache/airflow
DagBag.get_dag
def get_dag(self, dag_id): from airflow.models.dag import DagModel root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id orm_dag = DagModel.get_current(root_dag_id) if orm_dag and ( root_dag_id not in self.dags or ( orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired ) ): found_dags = self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
Gets the DAG out of the dictionary, and refreshes it if expired
def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ from airflow.models.dag import DagModel # Avoid circular import # If asking for a known subdag, we want to refresh the parent root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id # If the dag corresponding to root_dag_id is absent or expired orm_dag = DagModel.get_current(root_dag_id) if orm_dag and ( root_dag_id not in self.dags or ( orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired ) ): # Reprocess source file found_dags = self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) # If the source file no longer exports `dag_id`, delete it from self.dags if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
airflow/models/dagbag.py
apache/airflow
DagBag.kill_zombies
def kill_zombies(self, zombies, session=None): from airflow.models.taskinstance import TaskInstance for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag.
def kill_zombies(self, zombies, session=None): """ Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session """ from airflow.models.taskinstance import TaskInstance # Avoid circular import for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) # Get properties needed for failure handling from SimpleTaskInstance. ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
airflow/models/dagbag.py
apache/airflow
DagBag.bag_dag
def bag_dag(self, dag, parent_dag, root_dag): dag.test_cycle() dag.resolve_template_files() dag.last_loaded = timezone.utcnow() for task in dag.tasks: settings.policy(task) subdags = dag.subdags try: for subdag in subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.dags[dag.dag_id] = dag self.log.debug('Loaded DAG %s', dag) except AirflowDagCycleException as cycle_exception: self.log.exception('Exception bagging dag: %s', dag.dag_id) if dag == root_dag: for subdag in subdags: if subdag.dag_id in self.dags: del self.dags[subdag.dag_id] raise cycle_exception
Adds the DAG into the bag, recurses into sub dags. Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
def bag_dag(self, dag, parent_dag, root_dag): """ Adds the DAG into the bag, recurses into sub dags. Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags """ dag.test_cycle() # throws if a task cycle is found dag.resolve_template_files() dag.last_loaded = timezone.utcnow() for task in dag.tasks: settings.policy(task) subdags = dag.subdags try: for subdag in subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.dags[dag.dag_id] = dag self.log.debug('Loaded DAG %s', dag) except AirflowDagCycleException as cycle_exception: # There was an error in bagging the dag. Remove it from the list of dags self.log.exception('Exception bagging dag: %s', dag.dag_id) # Only necessary at the root level since DAG.subdags automatically # performs DFS to search through all subdags if dag == root_dag: for subdag in subdags: if subdag.dag_id in self.dags: del self.dags[subdag.dag_id] raise cycle_exception
airflow/models/dagbag.py
apache/airflow
DagBag.dagbag_report
def dagbag_report(self): report = textwrap.dedent() stats = self.dagbag_stats return report.format( dag_folder=self.dag_folder, duration=sum([o.duration for o in stats]), dag_num=sum([o.dag_num for o in stats]), task_num=sum([o.task_num for o in stats]), table=pprinttable(stats), )
Prints a report around DagBag loading stats
def dagbag_report(self): """Prints a report around DagBag loading stats""" report = textwrap.dedent("""\n ------------------------------------------------------------------- DagBag loading stats for {dag_folder} ------------------------------------------------------------------- Number of DAGs: {dag_num} Total task number: {task_num} DagBag parsing time: {duration} {table} """) stats = self.dagbag_stats return report.format( dag_folder=self.dag_folder, duration=sum([o.duration for o in stats]), dag_num=sum([o.dag_num for o in stats]), task_num=sum([o.task_num for o in stats]), table=pprinttable(stats), )
airflow/models/dagbag.py
apache/airflow
ds_add
def ds_add(ds, days): ds = datetime.strptime(ds, '%Y-%m-%d') if days: ds = ds + timedelta(days) return ds.isoformat()[:10]
Add or subtract days from a YYYY-MM-DD
def ds_add(ds, days): """ Add or subtract days from a YYYY-MM-DD :param ds: anchor date in ``YYYY-MM-DD`` format to add to :type ds: str :param days: number of days to add to the ds, you can use negative values :type days: int >>> ds_add('2015-01-01', 5) '2015-01-06' >>> ds_add('2015-01-06', -5) '2015-01-01' """ ds = datetime.strptime(ds, '%Y-%m-%d') if days: ds = ds + timedelta(days) return ds.isoformat()[:10]
airflow/macros/__init__.py
apache/airflow
ds_format
def ds_format(ds, input_format, output_format): return datetime.strptime(ds, input_format).strftime(output_format)
Takes an input string and outputs another string as specified in the output format
def ds_format(ds, input_format, output_format): """ Takes an input string and outputs another string as specified in the output format :param ds: input string which contains a date :type ds: str :param input_format: input string format. E.g. %Y-%m-%d :type input_format: str :param output_format: output string format E.g. %Y-%m-%d :type output_format: str >>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y") '01-01-15' >>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d") '2015-01-05' """ return datetime.strptime(ds, input_format).strftime(output_format)
airflow/macros/__init__.py
apache/airflow
HdfsSensorRegex.poke
def poke(self, context): sb = self.hook(self.hdfs_conn_id).get_conn() self.log.info( 'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern ) result = [f for f in sb.ls([self.filepath], include_toplevel=False) if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) return bool(result)
poke matching files in a directory with self.regex
def poke(self, context): """ poke matching files in a directory with self.regex :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() self.log.info( 'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern ) result = [f for f in sb.ls([self.filepath], include_toplevel=False) if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) return bool(result)
airflow/contrib/sensors/hdfs_sensor.py
apache/airflow
HdfsSensorFolder.poke
def poke(self, context): sb = self.hook(self.hdfs_conn_id).get_conn() result = [f for f in sb.ls([self.filepath], include_toplevel=True)] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) if self.be_empty: self.log.info('Poking for filepath %s to a empty directory', self.filepath) return len(result) == 1 and result[0]['path'] == self.filepath else: self.log.info('Poking for filepath %s to a non empty directory', self.filepath) result.pop(0) return bool(result) and result[0]['file_type'] == 'f'
poke for a non empty directory
def poke(self, context): """ poke for a non empty directory :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() result = [f for f in sb.ls([self.filepath], include_toplevel=True)] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) if self.be_empty: self.log.info('Poking for filepath %s to a empty directory', self.filepath) return len(result) == 1 and result[0]['path'] == self.filepath else: self.log.info('Poking for filepath %s to a non empty directory', self.filepath) result.pop(0) return bool(result) and result[0]['file_type'] == 'f'
airflow/contrib/sensors/hdfs_sensor.py
apache/airflow
clear_task_instances
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None, ): job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) else: task_id = ti.task_id if dag and dag.has_task(task_id): task = dag.get_task(task_id) task_retries = task.retries ti.max_tries = ti.try_number + task_retries - 1 else: ti.max_tries = max(ti.max_tries, ti.try_number - 1) ti.state = State.NONE session.merge(ti) if job_ids: from airflow.jobs import BaseJob as BJ for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN if activate_dag_runs and tis: from airflow.models.dagrun import DagRun drs = session.query(DagRun).filter( DagRun.dag_id.in_({ti.dag_id for ti in tis}), DagRun.execution_date.in_({ti.execution_date for ti in tis}), ).all() for dr in drs: dr.state = State.RUNNING dr.start_date = timezone.utcnow()
Clears a set of task instances, but makes sure the running ones get killed.
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None, ): """ Clears a set of task instances, but makes sure the running ones get killed. :param tis: a list of task instances :param session: current session :param activate_dag_runs: flag to check for active dag run :param dag: DAG object """ job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) else: task_id = ti.task_id if dag and dag.has_task(task_id): task = dag.get_task(task_id) task_retries = task.retries ti.max_tries = ti.try_number + task_retries - 1 else: # Ignore errors when updating max_tries if dag is None or # task not found in dag since database records could be # outdated. We make max_tries the maximum value of its # original max_tries or the current task try number. ti.max_tries = max(ti.max_tries, ti.try_number - 1) ti.state = State.NONE session.merge(ti) if job_ids: from airflow.jobs import BaseJob as BJ for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN if activate_dag_runs and tis: from airflow.models.dagrun import DagRun # Avoid circular import drs = session.query(DagRun).filter( DagRun.dag_id.in_({ti.dag_id for ti in tis}), DagRun.execution_date.in_({ti.execution_date for ti in tis}), ).all() for dr in drs: dr.state = State.RUNNING dr.start_date = timezone.utcnow()
airflow/models/taskinstance.py
apache/airflow
TaskInstance.try_number
def try_number(self): if self.state == State.RUNNING: return self._try_number return self._try_number + 1
Return the try number that this task number will be when it is actually run. If the TI is currently running, this will match the column in the databse, in all othercases this will be incremenetd
def try_number(self): """ Return the try number that this task number will be when it is actually run. If the TI is currently running, this will match the column in the databse, in all othercases this will be incremenetd """ # This is designed so that task logs end up in the right file. if self.state == State.RUNNING: return self._try_number return self._try_number + 1
airflow/models/taskinstance.py
apache/airflow
TaskInstance.generate_command
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
Generates the shell command required to execute this task instance.
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): """ Generates the shell command required to execute this task instance. :param dag_id: DAG ID :type dag_id: unicode :param task_id: Task ID :type task_id: unicode :param execution_date: Execution date for the task :type execution_date: datetime :param mark_success: Whether to mark the task as successful :type mark_success: bool :param ignore_all_deps: Ignore all ignorable dependencies. Overrides the other ignore_* parameters. :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :type ignore_depends_on_past: bool :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :type ignore_task_deps: bool :param ignore_ti_state: Ignore the task instance's previous failure/success :type ignore_ti_state: bool :param local: Whether to run the task locally :type local: bool :param pickle_id: If the DAG was serialized to the DB, the ID associated with the pickled DAG :type pickle_id: unicode :param file_path: path to the file containing the DAG definition :param raw: raw mode (needs more details) :param job_id: job ID (needs more details) :param pool: the Airflow pool that the task should run in :type pool: unicode :param cfg_path: the Path to the configuration file :type cfg_path: basestring :return: shell command that can be used to run the task instance """ iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
airflow/models/taskinstance.py
apache/airflow
TaskInstance.current_state
def current_state(self, session=None): TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).all() if ti: state = ti[0].state else: state = None return state
Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used.
def current_state(self, session=None): """ Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. """ TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).all() if ti: state = ti[0].state else: state = None return state
airflow/models/taskinstance.py
apache/airflow
TaskInstance.error
def error(self, session=None): self.log.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit()
Forces the task instance's state to FAILED in the database.
def error(self, session=None): """ Forces the task instance's state to FAILED in the database. """ self.log.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit()
airflow/models/taskinstance.py
apache/airflow
TaskInstance.refresh_from_db
def refresh_from_db(self, session=None, lock_for_update=False): TI = TaskInstance qry = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date self.try_number = ti._try_number self.max_tries = ti.max_tries self.hostname = ti.hostname self.pid = ti.pid self.executor_config = ti.executor_config else: self.state = None
Refreshes the task instance from the database based on the primary key
def refresh_from_db(self, session=None, lock_for_update=False): """ Refreshes the task instance from the database based on the primary key :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. """ TI = TaskInstance qry = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date # Get the raw value of try_number column, don't read through the # accessor here otherwise it will be incremeneted by one already. self.try_number = ti._try_number self.max_tries = ti.max_tries self.hostname = ti.hostname self.pid = ti.pid self.executor_config = ti.executor_config else: self.state = None
airflow/models/taskinstance.py
apache/airflow
TaskInstance.clear_xcom_data
def clear_xcom_data(self, session=None): session.query(XCom).filter( XCom.dag_id == self.dag_id, XCom.task_id == self.task_id, XCom.execution_date == self.execution_date ).delete() session.commit()
Clears all XCom data from the database for the task instance
def clear_xcom_data(self, session=None): """ Clears all XCom data from the database for the task instance """ session.query(XCom).filter( XCom.dag_id == self.dag_id, XCom.task_id == self.task_id, XCom.execution_date == self.execution_date ).delete() session.commit()
airflow/models/taskinstance.py
apache/airflow
TaskInstance.next_retry_datetime
def next_retry_datetime(self): delay = self.task.retry_delay if self.task.retry_exponential_backoff: min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2))) hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id, self.execution_date, self.try_number) .encode('utf-8')).hexdigest(), 16) modded_hash = min_backoff + hash % min_backoff delay_backoff_in_seconds = min( modded_hash, timedelta.max.total_seconds() - 1 ) delay = timedelta(seconds=delay_backoff_in_seconds) if self.task.max_retry_delay: delay = min(self.task.max_retry_delay, delay) return self.end_date + delay
Get datetime of the next retry if the task instance fails. For exponential backoff, retry_delay is used as base and will be converted to seconds.
def next_retry_datetime(self): """ Get datetime of the next retry if the task instance fails. For exponential backoff, retry_delay is used as base and will be converted to seconds. """ delay = self.task.retry_delay if self.task.retry_exponential_backoff: min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2))) # deterministic per task instance hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id, self.execution_date, self.try_number) .encode('utf-8')).hexdigest(), 16) # between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number) modded_hash = min_backoff + hash % min_backoff # timedelta has a maximum representable value. The exponentiation # here means this value can be exceeded after a certain number # of tries (around 50 if the initial delay is 1s, even fewer if # the delay is larger). Cap the value here before creating a # timedelta object so the operation doesn't fail. delay_backoff_in_seconds = min( modded_hash, timedelta.max.total_seconds() - 1 ) delay = timedelta(seconds=delay_backoff_in_seconds) if self.task.max_retry_delay: delay = min(self.task.max_retry_delay, delay) return self.end_date + delay
airflow/models/taskinstance.py
apache/airflow
TaskInstance.ready_for_retry
def ready_for_retry(self): return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow())
Checks on whether the task instance is in the right state and timeframe to be retried.
def ready_for_retry(self): """ Checks on whether the task instance is in the right state and timeframe to be retried. """ return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow())
airflow/models/taskinstance.py
apache/airflow
TaskInstance.xcom_push
def xcom_push( self, key, value, execution_date=None): if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date)
Make an XCom available for tasks to pull.
def xcom_push( self, key, value, execution_date=None): """ Make an XCom available for tasks to pull. :param key: A key for the XCom :type key: str :param value: A value for the XCom. The value is pickled and stored in the database. :type value: any pickleable object :param execution_date: if provided, the XCom will not be visible until this date. This can be used, for example, to send a message to a task on a future date without it being immediately visible. :type execution_date: datetime """ if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date)
airflow/models/taskinstance.py
apache/airflow
TaskInstance.init_run_context
def init_run_context(self, raw=False): self.raw = raw self._set_context(self)
Sets the log context.
def init_run_context(self, raw=False): """ Sets the log context. """ self.raw = raw self._set_context(self)
airflow/models/taskinstance.py
apache/airflow
WasbTaskHandler.close
def close(self): if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): with open(local_loc, 'r') as logfile: log = logfile.read() self.wasb_write(log, remote_loc, append=True) if self.delete_local_copy: shutil.rmtree(os.path.dirname(local_loc)) self.closed = True
Close and upload local log file to remote storage Wasb.
def close(self): """ Close and upload local log file to remote storage Wasb. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions with open(local_loc, 'r') as logfile: log = logfile.read() self.wasb_write(log, remote_loc, append=True) if self.delete_local_copy: shutil.rmtree(os.path.dirname(local_loc)) # Mark closed so we don't double write if close is called twice self.closed = True
airflow/utils/log/wasb_task_handler.py
apache/airflow
GceHook.get_conn
def get_conn(self): if not self._conn: http_authorized = self._authorize() self._conn = build('compute', self.api_version, http=http_authorized, cache_discovery=False) return self._conn
Retrieves connection to Google Compute Engine.
def get_conn(self): """ Retrieves connection to Google Compute Engine. :return: Google Compute Engine services object :rtype: dict """ if not self._conn: http_authorized = self._authorize() self._conn = build('compute', self.api_version, http=http_authorized, cache_discovery=False) return self._conn
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.start_instance
def start_instance(self, zone, resource_id, project_id=None): response = self.get_conn().instances().start( project=project_id, zone=zone, instance=resource_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Starts an existing instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional.
def start_instance(self, zone, resource_id, project_id=None): """ Starts an existing instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instances().start( project=project_id, zone=zone, instance=resource_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.set_machine_type
def set_machine_type(self, zone, resource_id, body, project_id=None): response = self._execute_set_machine_type(zone, resource_id, body, project_id) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Sets machine type of an instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional.
def set_machine_type(self, zone, resource_id, body, project_id=None): """ Sets machine type of an instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param body: Body required by the Compute Engine setMachineType API, as described in https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType :type body: dict :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self._execute_set_machine_type(zone, resource_id, body, project_id) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.get_instance_template
def get_instance_template(self, resource_id, project_id=None): response = self.get_conn().instanceTemplates().get( project=project_id, instanceTemplate=resource_id ).execute(num_retries=self.num_retries) return response
Retrieves instance template by project_id and resource_id. Must be called with keyword arguments rather than positional.
def get_instance_template(self, resource_id, project_id=None): """ Retrieves instance template by project_id and resource_id. Must be called with keyword arguments rather than positional. :param resource_id: Name of the instance template :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :rtype: dict """ response = self.get_conn().instanceTemplates().get( project=project_id, instanceTemplate=resource_id ).execute(num_retries=self.num_retries) return response
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.insert_instance_template
def insert_instance_template(self, body, request_id=None, project_id=None): response = self.get_conn().instanceTemplates().insert( project=project_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Inserts instance template using body specified Must be called with keyword arguments rather than positional.
def insert_instance_template(self, body, request_id=None, project_id=None): """ Inserts instance template using body specified Must be called with keyword arguments rather than positional. :param body: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceTemplates().insert( project=project_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.get_instance_group_manager
def get_instance_group_manager(self, zone, resource_id, project_id=None): response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
Retrieves Instance Group Manager by project_id, zone and resource_id. Must be called with keyword arguments rather than positional.
def get_instance_group_manager(self, zone, resource_id, project_id=None): """ Retrieves Instance Group Manager by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance group manager representation as object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers :rtype: dict """ response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook.patch_instance_group_manager
def patch_instance_group_manager(self, zone, resource_id, body, request_id=None, project_id=None): response = self.get_conn().instanceGroupManagers().patch( project=project_id, zone=zone, instanceGroupManager=resource_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Patches Instance Group Manager with the specified body. Must be called with keyword arguments rather than positional.
def patch_instance_group_manager(self, zone, resource_id, body, request_id=None, project_id=None): """ Patches Instance Group Manager with the specified body. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param body: Instance Group Manager representation as json-merge-patch object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again). It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceGroupManagers().patch( project=project_id, zone=zone, instanceGroupManager=resource_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
GceHook._wait_for_operation_to_complete
def _wait_for_operation_to_complete(self, project_id, operation_name, zone=None): service = self.get_conn() while True: if zone is None: operation_response = self._check_global_operation_status( service, operation_name, project_id) else: operation_response = self._check_zone_operation_status( service, operation_name, project_id, zone, self.num_retries) if operation_response.get("status") == GceOperationStatus.DONE: error = operation_response.get("error") if error: code = operation_response.get("httpErrorStatusCode") msg = operation_response.get("httpErrorMessage") error_msg = str(error.get("errors"))[1:-1] raise AirflowException("{} {}: ".format(code, msg) + error_msg) return time.sleep(TIME_TO_SLEEP_IN_SECONDS)
Waits for the named operation to complete - checks status of the async call.
def _wait_for_operation_to_complete(self, project_id, operation_name, zone=None): """ Waits for the named operation to complete - checks status of the async call. :param operation_name: name of the operation :type operation_name: str :param zone: optional region of the request (might be None for global operations) :type zone: str :return: None """ service = self.get_conn() while True: if zone is None: # noinspection PyTypeChecker operation_response = self._check_global_operation_status( service, operation_name, project_id) else: # noinspection PyTypeChecker operation_response = self._check_zone_operation_status( service, operation_name, project_id, zone, self.num_retries) if operation_response.get("status") == GceOperationStatus.DONE: error = operation_response.get("error") if error: code = operation_response.get("httpErrorStatusCode") msg = operation_response.get("httpErrorMessage") # Extracting the errors list as string and trimming square braces error_msg = str(error.get("errors"))[1:-1] raise AirflowException("{} {}: ".format(code, msg) + error_msg) # No meaningful info to return from the response in case of success return time.sleep(TIME_TO_SLEEP_IN_SECONDS)
airflow/contrib/hooks/gcp_compute_hook.py
apache/airflow
S3Hook.check_for_bucket
def check_for_bucket(self, bucket_name): try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
Check if bucket_name exists.
def check_for_bucket(self, bucket_name): """ Check if bucket_name exists. :param bucket_name: the name of the bucket :type bucket_name: str """ try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
airflow/hooks/S3_hook.py
apache/airflow
S3Hook.create_bucket
def create_bucket(self, bucket_name, region_name=None): s3_conn = self.get_conn() if not region_name: region_name = s3_conn.meta.region_name if region_name == 'us-east-1': self.get_conn().create_bucket(Bucket=bucket_name) else: self.get_conn().create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': region_name })
Creates an Amazon S3 bucket.
def create_bucket(self, bucket_name, region_name=None): """ Creates an Amazon S3 bucket. :param bucket_name: The name of the bucket :type bucket_name: str :param region_name: The name of the aws region in which to create the bucket. :type region_name: str """ s3_conn = self.get_conn() if not region_name: region_name = s3_conn.meta.region_name if region_name == 'us-east-1': self.get_conn().create_bucket(Bucket=bucket_name) else: self.get_conn().create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': region_name })
airflow/hooks/S3_hook.py
apache/airflow
S3Hook.check_for_prefix
def check_for_prefix(self, bucket_name, prefix, delimiter): prefix = prefix + delimiter if prefix[-1] != delimiter else prefix prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1) previous_level = prefix_split[0] plist = self.list_prefixes(bucket_name, previous_level, delimiter) return False if plist is None else prefix in plist
Checks that a prefix exists in a bucket
def check_for_prefix(self, bucket_name, prefix, delimiter): """ Checks that a prefix exists in a bucket :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str """ prefix = prefix + delimiter if prefix[-1] != delimiter else prefix prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1) previous_level = prefix_split[0] plist = self.list_prefixes(bucket_name, previous_level, delimiter) return False if plist is None else prefix in plist
airflow/hooks/S3_hook.py
apache/airflow
S3Hook.list_prefixes
def list_prefixes(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False prefixes = [] for page in response: if 'CommonPrefixes' in page: has_results = True for p in page['CommonPrefixes']: prefixes.append(p['Prefix']) if has_results: return prefixes
Lists prefixes in a bucket under prefix
def list_prefixes(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists prefixes in a bucket under prefix :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False prefixes = [] for page in response: if 'CommonPrefixes' in page: has_results = True for p in page['CommonPrefixes']: prefixes.append(p['Prefix']) if has_results: return prefixes
airflow/hooks/S3_hook.py
apache/airflow
S3Hook.list_keys
def list_keys(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False keys = [] for page in response: if 'Contents' in page: has_results = True for k in page['Contents']: keys.append(k['Key']) if has_results: return keys
Lists keys in a bucket under prefix and not containing delimiter
def list_keys(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists keys in a bucket under prefix and not containing delimiter :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False keys = [] for page in response: if 'Contents' in page: has_results = True for k in page['Contents']: keys.append(k['Key']) if has_results: return keys
airflow/hooks/S3_hook.py
apache/airflow
S3Hook.check_for_key
def check_for_key(self, key, bucket_name=None): if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) try: self.get_conn().head_object(Bucket=bucket_name, Key=key) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
Checks if a key exists in a bucket
def check_for_key(self, key, bucket_name=None): """ Checks if a key exists in a bucket :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) try: self.get_conn().head_object(Bucket=bucket_name, Key=key) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
airflow/hooks/S3_hook.py