repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
apache/airflow
BaseExecutor.has_task
def has_task(self, task_instance): if task_instance.key in self.queued_tasks or task_instance.key in self.running: return True
Checks if a task is either queued or running in this executor
def has_task(self, task_instance): """ Checks if a task is either queued or running in this executor :param task_instance: TaskInstance :return: True if the task is known to this executor """ if task_instance.key in self.queued_tasks or task_instance.key in self.running: return True
airflow/executors/base_executor.py
apache/airflow
SnowflakeHook._get_aws_credentials
def _get_aws_credentials(self): if self.snowflake_conn_id: connection_object = self.get_connection(self.snowflake_conn_id) if 'aws_secret_access_key' in connection_object.extra_dejson: aws_access_key_id = connection_object.extra_dejson.get( 'aws_access_key_id') aws_secret_access_key = connection_object.extra_dejson.get( 'aws_secret_access_key') return aws_access_key_id, aws_secret_access_key
returns aws_access_key_id, aws_secret_access_key from extra intended to be used by external import and export statements
def _get_aws_credentials(self): """ returns aws_access_key_id, aws_secret_access_key from extra intended to be used by external import and export statements """ if self.snowflake_conn_id: connection_object = self.get_connection(self.snowflake_conn_id) if 'aws_secret_access_key' in connection_object.extra_dejson: aws_access_key_id = connection_object.extra_dejson.get( 'aws_access_key_id') aws_secret_access_key = connection_object.extra_dejson.get( 'aws_secret_access_key') return aws_access_key_id, aws_secret_access_key
airflow/contrib/hooks/snowflake_hook.py
apache/airflow
PostgresHook.copy_expert
def copy_expert(self, sql, filename, open=open): if not os.path.isfile(filename): with open(filename, 'w'): pass with open(filename, 'r+') as f: with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: cur.copy_expert(sql, f) f.truncate(f.tell()) conn.commit()
Executes SQL using psycopg2 copy_expert method. Necessary to execute COPY command without access to a superuser.
def copy_expert(self, sql, filename, open=open): """ Executes SQL using psycopg2 copy_expert method. Necessary to execute COPY command without access to a superuser. Note: if this method is called with a "COPY FROM" statement and the specified input file does not exist, it creates an empty file and no data is loaded, but the operation succeeds. So if users want to be aware when the input file does not exist, they have to check its existence by themselves. """ if not os.path.isfile(filename): with open(filename, 'w'): pass with open(filename, 'r+') as f: with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: cur.copy_expert(sql, f) f.truncate(f.tell()) conn.commit()
airflow/hooks/postgres_hook.py
apache/airflow
PostgresHook.bulk_dump
def bulk_dump(self, table, tmp_file): self.copy_expert("COPY {table} TO STDOUT".format(table=table), tmp_file)
Dumps a database table into a tab-delimited file
def bulk_dump(self, table, tmp_file): """ Dumps a database table into a tab-delimited file """ self.copy_expert("COPY {table} TO STDOUT".format(table=table), tmp_file)
airflow/hooks/postgres_hook.py
apache/airflow
FileToGoogleCloudStorageOperator.execute
def execute(self, context): hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket_name=self.bucket, object_name=self.dst, mime_type=self.mime_type, filename=self.src, gzip=self.gzip, )
Uploads the file to Google cloud storage
def execute(self, context): """ Uploads the file to Google cloud storage """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket_name=self.bucket, object_name=self.dst, mime_type=self.mime_type, filename=self.src, gzip=self.gzip, )
airflow/contrib/operators/file_to_gcs.py
apache/airflow
max_partition
def max_partition( table, schema="default", field=None, filter_map=None, metastore_conn_id='metastore_default'): from airflow.hooks.hive_hooks import HiveMetastoreHook if '.' in table: schema, table = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) return hh.max_partition( schema=schema, table_name=table, field=field, filter_map=filter_map)
Gets the max partition for a table.
def max_partition( table, schema="default", field=None, filter_map=None, metastore_conn_id='metastore_default'): """ Gets the max partition for a table. :param schema: The hive schema the table lives in :type schema: str :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :type table: str :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :type metastore_conn_id: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :param field: the field to get the max value from. If there's only one partition field, this will be inferred :type field: str >>> max_partition('airflow.static_babynames_partitioned') '2015-01-01' """ from airflow.hooks.hive_hooks import HiveMetastoreHook if '.' in table: schema, table = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) return hh.max_partition( schema=schema, table_name=table, field=field, filter_map=filter_map)
airflow/macros/hive.py
apache/airflow
CloudTranslateHook.get_conn
def get_conn(self): if not self._client: self._client = Client(credentials=self._get_credentials()) return self._client
Retrieves connection to Cloud Translate
def get_conn(self): """ Retrieves connection to Cloud Translate :return: Google Cloud Translate client object. :rtype: Client """ if not self._client: self._client = Client(credentials=self._get_credentials()) return self._client
airflow/contrib/hooks/gcp_translate_hook.py
apache/airflow
CloudTranslateHook.translate
def translate( self, values, target_language, format_=None, source_language=None, model=None ): client = self.get_conn() return client.translate( values=values, target_language=target_language, format_=format_, source_language=source_language, model=model, )
Translate a string or list of strings. See
def translate( self, values, target_language, format_=None, source_language=None, model=None ): """Translate a string or list of strings. See https://cloud.google.com/translate/docs/translating-text :type values: str or list :param values: String or list of strings to translate. :type target_language: str :param target_language: The language to translate results into. This is required by the API and defaults to the target language of the current instance. :type format_: str :param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML. :type source_language: str or None :param source_language: (Optional) The language of the text to be translated. :type model: str or None :param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``. :rtype: str or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys (though not all will be present in all cases) * ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text. * ``translatedText``: The translation of the text into the target language. * ``input``: The corresponding input value. * ``model``: The model used to translate the text. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`~exceptions.ValueError` if the number of values and translations differ. """ client = self.get_conn() return client.translate( values=values, target_language=target_language, format_=format_, source_language=source_language, model=model, )
airflow/contrib/hooks/gcp_translate_hook.py
apache/airflow
CloudSqlHook.get_instance
def get_instance(self, instance, project_id=None): return self.get_conn().instances().get( project=project_id, instance=instance ).execute(num_retries=self.num_retries)
Retrieves a resource containing information about a Cloud SQL instance.
def get_instance(self, instance, project_id=None): """ Retrieves a resource containing information about a Cloud SQL instance. :param instance: Database instance ID. This does not include the project ID. :type instance: str :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: A Cloud SQL instance resource. :rtype: dict """ return self.get_conn().instances().get( project=project_id, instance=instance ).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.create_instance
def create_instance(self, body, project_id=None): response = self.get_conn().instances().insert( project=project_id, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Creates a new Cloud SQL instance.
def create_instance(self, body, project_id=None): """ Creates a new Cloud SQL instance. :param body: Body required by the Cloud SQL insert API, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body. :type body: dict :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instances().insert( project=project_id, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.patch_instance
def patch_instance(self, body, instance, project_id=None): response = self.get_conn().instances().patch( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Updates settings of a Cloud SQL instance.
def patch_instance(self, body, instance, project_id=None): """ Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. :param body: Body required by the Cloud SQL patch API, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body. :type body: dict :param instance: Cloud SQL instance ID. This does not include the project ID. :type instance: str :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instances().patch( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.delete_instance
def delete_instance(self, instance, project_id=None): response = self.get_conn().instances().delete( project=project_id, instance=instance, ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Deletes a Cloud SQL instance.
def delete_instance(self, instance, project_id=None): """ Deletes a Cloud SQL instance. :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :param instance: Cloud SQL instance ID. This does not include the project ID. :type instance: str :return: None """ response = self.get_conn().instances().delete( project=project_id, instance=instance, ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.get_database
def get_database(self, instance, database, project_id=None): return self.get_conn().databases().get( project=project_id, instance=instance, database=database ).execute(num_retries=self.num_retries)
Retrieves a database resource from a Cloud SQL instance.
def get_database(self, instance, database, project_id=None): """ Retrieves a database resource from a Cloud SQL instance. :param instance: Database instance ID. This does not include the project ID. :type instance: str :param database: Name of the database in the instance. :type database: str :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: A Cloud SQL database resource, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource. :rtype: dict """ return self.get_conn().databases().get( project=project_id, instance=instance, database=database ).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.create_database
def create_database(self, instance, body, project_id=None): response = self.get_conn().databases().insert( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Creates a new database inside a Cloud SQL instance.
def create_database(self, instance, body, project_id=None): """ Creates a new database inside a Cloud SQL instance. :param instance: Database instance ID. This does not include the project ID. :type instance: str :param body: The request body, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body. :type body: dict :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().databases().insert( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.patch_database
def patch_database(self, instance, database, body, project_id=None): response = self.get_conn().databases().patch( project=project_id, instance=instance, database=database, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Updates a database resource inside a Cloud SQL instance. This method supports patch semantics. See
def patch_database(self, instance, database, body, project_id=None): """ Updates a database resource inside a Cloud SQL instance. This method supports patch semantics. See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch. :param instance: Database instance ID. This does not include the project ID. :type instance: str :param database: Name of the database to be updated in the instance. :type database: str :param body: The request body, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body. :type body: dict :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().databases().patch( project=project_id, instance=instance, database=database, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.delete_database
def delete_database(self, instance, database, project_id=None): response = self.get_conn().databases().delete( project=project_id, instance=instance, database=database ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Deletes a database from a Cloud SQL instance.
def delete_database(self, instance, database, project_id=None): """ Deletes a database from a Cloud SQL instance. :param instance: Database instance ID. This does not include the project ID. :type instance: str :param database: Name of the database to be deleted in the instance. :type database: str :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().databases().delete( project=project_id, instance=instance, database=database ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlHook.export_instance
def export_instance(self, instance, body, project_id=None): try: response = self.get_conn().instances().export( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name) except HttpError as ex: raise AirflowException( 'Exporting instance {} failed: {}'.format(instance, ex.content) )
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.
def export_instance(self, instance, body, project_id=None): """ Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file. :param instance: Database instance ID of the Cloud SQL instance. This does not include the project ID. :type instance: str :param body: The request body, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body :type body: dict :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ try: response = self.get_conn().instances().export( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name) except HttpError as ex: raise AirflowException( 'Exporting instance {} failed: {}'.format(instance, ex.content) )
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlProxyRunner.start_proxy
def start_proxy(self): self._download_sql_proxy_if_needed() if self.sql_proxy_process: raise AirflowException("The sql proxy is already running: {}".format( self.sql_proxy_process)) else: command_to_run = [self.sql_proxy_path] command_to_run.extend(self.command_line_parameters) try: self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory) os.makedirs(self.cloud_sql_proxy_socket_directory) except OSError: pass command_to_run.extend(self._get_credential_parameters()) self.log.info("Running the command: `%s`", " ".join(command_to_run)) self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE) self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid) while True: line = self.sql_proxy_process.stderr.readline().decode('utf-8') return_code = self.sql_proxy_process.poll() if line == '' and return_code is not None: self.sql_proxy_process = None raise AirflowException( "The cloud_sql_proxy finished early with return code {}!".format( return_code)) if line != '': self.log.info(line) if "googleapi: Error" in line or "invalid instance name:" in line: self.stop_proxy() raise AirflowException( "Error when starting the cloud_sql_proxy {}!".format( line)) if "Ready for new connections" in line: return
Starts Cloud SQL Proxy. You have to remember to stop the proxy if you started it!
def start_proxy(self): """ Starts Cloud SQL Proxy. You have to remember to stop the proxy if you started it! """ self._download_sql_proxy_if_needed() if self.sql_proxy_process: raise AirflowException("The sql proxy is already running: {}".format( self.sql_proxy_process)) else: command_to_run = [self.sql_proxy_path] command_to_run.extend(self.command_line_parameters) try: self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory) os.makedirs(self.cloud_sql_proxy_socket_directory) except OSError: # Needed for python 2 compatibility (exists_ok missing) pass command_to_run.extend(self._get_credential_parameters()) self.log.info("Running the command: `%s`", " ".join(command_to_run)) self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE) self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid) while True: line = self.sql_proxy_process.stderr.readline().decode('utf-8') return_code = self.sql_proxy_process.poll() if line == '' and return_code is not None: self.sql_proxy_process = None raise AirflowException( "The cloud_sql_proxy finished early with return code {}!".format( return_code)) if line != '': self.log.info(line) if "googleapi: Error" in line or "invalid instance name:" in line: self.stop_proxy() raise AirflowException( "Error when starting the cloud_sql_proxy {}!".format( line)) if "Ready for new connections" in line: return
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlProxyRunner.stop_proxy
def stop_proxy(self): if not self.sql_proxy_process: raise AirflowException("The sql proxy is not started yet") else: self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid) self.sql_proxy_process.kill() self.sql_proxy_process = None self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory) shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True) if self.sql_proxy_was_downloaded: self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path) try: os.remove(self.sql_proxy_path) except OSError as e: if not e.errno == errno.ENOENT: raise else: self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path) if os.path.isfile(self.credentials_path): self.log.info("Removing generated credentials file %s", self.credentials_path) os.remove(self.credentials_path)
Stops running proxy. You should stop the proxy after you stop using it.
def stop_proxy(self): """ Stops running proxy. You should stop the proxy after you stop using it. """ if not self.sql_proxy_process: raise AirflowException("The sql proxy is not started yet") else: self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid) self.sql_proxy_process.kill() self.sql_proxy_process = None # Cleanup! self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory) shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True) if self.sql_proxy_was_downloaded: self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path) # Silently ignore if the file has already been removed (concurrency) try: os.remove(self.sql_proxy_path) except OSError as e: if not e.errno == errno.ENOENT: raise else: self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path) if os.path.isfile(self.credentials_path): self.log.info("Removing generated credentials file %s", self.credentials_path) # Here file cannot be delete by concurrent task (each task has its own copy) os.remove(self.credentials_path)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.create_connection
def create_connection(self, session=None): connection = Connection(conn_id=self.db_conn_id) uri = self._generate_connection_uri() self.log.info("Creating connection %s", self.db_conn_id) connection.parse_from_uri(uri) session.add(connection) session.commit()
Create connection in the Connection table, according to whether it uses proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
def create_connection(self, session=None): """ Create connection in the Connection table, according to whether it uses proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated. :param session: Session of the SQL Alchemy ORM (automatically generated with decorator). """ connection = Connection(conn_id=self.db_conn_id) uri = self._generate_connection_uri() self.log.info("Creating connection %s", self.db_conn_id) connection.parse_from_uri(uri) session.add(connection) session.commit()
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.retrieve_connection
def retrieve_connection(self, session=None): self.log.info("Retrieving connection %s", self.db_conn_id) connections = session.query(Connection).filter( Connection.conn_id == self.db_conn_id) if connections.count(): return connections[0] return None
Retrieves the dynamically created connection from the Connection table.
def retrieve_connection(self, session=None): """ Retrieves the dynamically created connection from the Connection table. :param session: Session of the SQL Alchemy ORM (automatically generated with decorator). """ self.log.info("Retrieving connection %s", self.db_conn_id) connections = session.query(Connection).filter( Connection.conn_id == self.db_conn_id) if connections.count(): return connections[0] return None
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.delete_connection
def delete_connection(self, session=None): self.log.info("Deleting connection %s", self.db_conn_id) connections = session.query(Connection).filter( Connection.conn_id == self.db_conn_id) if connections.count(): connection = connections[0] session.delete(connection) session.commit() else: self.log.info("Connection was already deleted!")
Delete the dynamically created connection from the Connection table.
def delete_connection(self, session=None): """ Delete the dynamically created connection from the Connection table. :param session: Session of the SQL Alchemy ORM (automatically generated with decorator). """ self.log.info("Deleting connection %s", self.db_conn_id) connections = session.query(Connection).filter( Connection.conn_id == self.db_conn_id) if connections.count(): connection = connections[0] session.delete(connection) session.commit() else: self.log.info("Connection was already deleted!")
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.get_sqlproxy_runner
def get_sqlproxy_runner(self): if not self.use_proxy: raise AirflowException("Proxy runner can only be retrieved in case of use_proxy = True") return CloudSqlProxyRunner( path_prefix=self.sql_proxy_unique_path, instance_specification=self._get_sqlproxy_instance_specification(), project_id=self.project_id, sql_proxy_version=self.sql_proxy_version, sql_proxy_binary_path=self.sql_proxy_binary_path )
Retrieve Cloud SQL Proxy runner. It is used to manage the proxy lifecycle per task.
def get_sqlproxy_runner(self): """ Retrieve Cloud SQL Proxy runner. It is used to manage the proxy lifecycle per task. :return: The Cloud SQL Proxy runner. :rtype: CloudSqlProxyRunner """ if not self.use_proxy: raise AirflowException("Proxy runner can only be retrieved in case of use_proxy = True") return CloudSqlProxyRunner( path_prefix=self.sql_proxy_unique_path, instance_specification=self._get_sqlproxy_instance_specification(), project_id=self.project_id, sql_proxy_version=self.sql_proxy_version, sql_proxy_binary_path=self.sql_proxy_binary_path )
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.get_database_hook
def get_database_hook(self): if self.database_type == 'postgres': self.db_hook = PostgresHook(postgres_conn_id=self.db_conn_id, schema=self.database) else: self.db_hook = MySqlHook(mysql_conn_id=self.db_conn_id, schema=self.database) return self.db_hook
Retrieve database hook. This is the actual Postgres or MySQL database hook that uses proxy or connects directly to the Google Cloud SQL database.
def get_database_hook(self): """ Retrieve database hook. This is the actual Postgres or MySQL database hook that uses proxy or connects directly to the Google Cloud SQL database. """ if self.database_type == 'postgres': self.db_hook = PostgresHook(postgres_conn_id=self.db_conn_id, schema=self.database) else: self.db_hook = MySqlHook(mysql_conn_id=self.db_conn_id, schema=self.database) return self.db_hook
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.cleanup_database_hook
def cleanup_database_hook(self): if self.database_type == 'postgres': if hasattr(self.db_hook, 'conn') and self.db_hook.conn and self.db_hook.conn.notices: for output in self.db_hook.conn.notices: self.log.info(output)
Clean up database hook after it was used.
def cleanup_database_hook(self): """ Clean up database hook after it was used. """ if self.database_type == 'postgres': if hasattr(self.db_hook, 'conn') and self.db_hook.conn and self.db_hook.conn.notices: for output in self.db_hook.conn.notices: self.log.info(output)
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
CloudSqlDatabaseHook.reserve_free_tcp_port
def reserve_free_tcp_port(self): self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.reserved_tcp_socket.bind(('127.0.0.1', 0)) self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
Reserve free TCP port to be used by Cloud SQL Proxy
def reserve_free_tcp_port(self): """ Reserve free TCP port to be used by Cloud SQL Proxy """ self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.reserved_tcp_socket.bind(('127.0.0.1', 0)) self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
airflow/contrib/hooks/gcp_sql_hook.py
apache/airflow
_normalize_mlengine_job_id
def _normalize_mlengine_job_id(job_id): match = re.search(r'\d|\{{2}', job_id) if match and match.start() == 0: job = 'z_{}'.format(job_id) else: job = job_id tracker = 0 cleansed_job_id = '' for m in re.finditer(r'\{{2}.+?\}{2}', job): cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:m.start()]) cleansed_job_id += job[m.start():m.end()] tracker = m.end() cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:]) return cleansed_job_id
Replaces invalid MLEngine job_id characters with '_'. This also adds a leading 'z' in case job_id starts with an invalid character.
def _normalize_mlengine_job_id(job_id): """ Replaces invalid MLEngine job_id characters with '_'. This also adds a leading 'z' in case job_id starts with an invalid character. Args: job_id: A job_id str that may have invalid characters. Returns: A valid job_id representation. """ # Add a prefix when a job_id starts with a digit or a template match = re.search(r'\d|\{{2}', job_id) if match and match.start() == 0: job = 'z_{}'.format(job_id) else: job = job_id # Clean up 'bad' characters except templates tracker = 0 cleansed_job_id = '' for m in re.finditer(r'\{{2}.+?\}{2}', job): cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:m.start()]) cleansed_job_id += job[m.start():m.end()] tracker = m.end() # Clean up last substring or the full string if no templates cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:]) return cleansed_job_id
airflow/contrib/operators/mlengine_operator.py
apache/airflow
FTPSensor._get_error_code
def _get_error_code(self, e): try: matches = self.error_code_pattern.match(str(e)) code = int(matches.group(0)) return code except ValueError: return e
Extract error code from ftp exception
def _get_error_code(self, e): """Extract error code from ftp exception""" try: matches = self.error_code_pattern.match(str(e)) code = int(matches.group(0)) return code except ValueError: return e
airflow/contrib/sensors/ftp_sensor.py
apache/airflow
clear_dag_runs
def clear_dag_runs(): session = settings.Session() drs = session.query(DagRun).filter( DagRun.dag_id.in_(DAG_IDS), ).all() for dr in drs: logging.info('Deleting DagRun :: {}'.format(dr)) session.delete(dr)
Remove any existing DAG runs for the perf test DAGs.
def clear_dag_runs(): """ Remove any existing DAG runs for the perf test DAGs. """ session = settings.Session() drs = session.query(DagRun).filter( DagRun.dag_id.in_(DAG_IDS), ).all() for dr in drs: logging.info('Deleting DagRun :: {}'.format(dr)) session.delete(dr)
scripts/perf/scheduler_ops_metrics.py
apache/airflow
clear_dag_task_instances
def clear_dag_task_instances(): session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) for ti in tis: logging.info('Deleting TaskInstance :: {}'.format(ti)) session.delete(ti) session.commit()
Remove any existing task instances for the perf test DAGs.
def clear_dag_task_instances(): """ Remove any existing task instances for the perf test DAGs. """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) for ti in tis: logging.info('Deleting TaskInstance :: {}'.format(ti)) session.delete(ti) session.commit()
scripts/perf/scheduler_ops_metrics.py
apache/airflow
set_dags_paused_state
def set_dags_paused_state(is_paused): session = settings.Session() dms = session.query(DagModel).filter( DagModel.dag_id.in_(DAG_IDS)) for dm in dms: logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused)) dm.is_paused = is_paused session.commit()
Toggle the pause state of the DAGs in the test.
def set_dags_paused_state(is_paused): """ Toggle the pause state of the DAGs in the test. """ session = settings.Session() dms = session.query(DagModel).filter( DagModel.dag_id.in_(DAG_IDS)) for dm in dms: logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused)) dm.is_paused = is_paused session.commit()
scripts/perf/scheduler_ops_metrics.py
apache/airflow
SchedulerMetricsJob.print_stats
def print_stats(self): session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) successful_tis = [x for x in tis if x.state == State.SUCCESS] ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date, (ti.queued_dttm - self.start_date).total_seconds(), (ti.start_date - self.start_date).total_seconds(), (ti.end_date - self.start_date).total_seconds(), ti.duration) for ti in successful_tis] ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id', 'execution_date', 'queue_delay', 'start_delay', 'land_time', 'duration']) print('Performance Results') print('###################') for dag_id in DAG_IDS: print('DAG {}'.format(dag_id)) print(ti_perf_df[ti_perf_df['dag_id'] == dag_id]) print('###################') if len(tis) > len(successful_tis): print("WARNING!! The following task instances haven't completed") print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state) for ti in filter(lambda x: x.state != State.SUCCESS, tis)], columns=['dag_id', 'task_id', 'execution_date', 'state'])) session.commit()
Print operational metrics for the scheduler test.
def print_stats(self): """ Print operational metrics for the scheduler test. """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) successful_tis = [x for x in tis if x.state == State.SUCCESS] ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date, (ti.queued_dttm - self.start_date).total_seconds(), (ti.start_date - self.start_date).total_seconds(), (ti.end_date - self.start_date).total_seconds(), ti.duration) for ti in successful_tis] ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id', 'execution_date', 'queue_delay', 'start_delay', 'land_time', 'duration']) print('Performance Results') print('###################') for dag_id in DAG_IDS: print('DAG {}'.format(dag_id)) print(ti_perf_df[ti_perf_df['dag_id'] == dag_id]) print('###################') if len(tis) > len(successful_tis): print("WARNING!! The following task instances haven't completed") print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state) for ti in filter(lambda x: x.state != State.SUCCESS, tis)], columns=['dag_id', 'task_id', 'execution_date', 'state'])) session.commit()
scripts/perf/scheduler_ops_metrics.py
apache/airflow
SchedulerMetricsJob.heartbeat
def heartbeat(self): super(SchedulerMetricsJob, self).heartbeat() session = settings.Session() TI = TaskInstance successful_tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .filter(TI.state.in_([State.SUCCESS])) .all() ) session.commit() dagbag = DagBag(SUBDIR) dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS] num_task_instances = sum([(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]) if (len(successful_tis) == num_task_instances or (timezone.utcnow() - self.start_date).total_seconds() > MAX_RUNTIME_SECS): if len(successful_tis) == num_task_instances: self.log.info("All tasks processed! Printing stats.") else: self.log.info("Test timeout reached. Printing available stats.") self.print_stats() set_dags_paused_state(True) sys.exit()
Override the scheduler heartbeat to determine when the test is complete
def heartbeat(self): """ Override the scheduler heartbeat to determine when the test is complete """ super(SchedulerMetricsJob, self).heartbeat() session = settings.Session() # Get all the relevant task instances TI = TaskInstance successful_tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .filter(TI.state.in_([State.SUCCESS])) .all() ) session.commit() dagbag = DagBag(SUBDIR) dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS] # the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval. num_task_instances = sum([(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]) if (len(successful_tis) == num_task_instances or (timezone.utcnow() - self.start_date).total_seconds() > MAX_RUNTIME_SECS): if len(successful_tis) == num_task_instances: self.log.info("All tasks processed! Printing stats.") else: self.log.info("Test timeout reached. Printing available stats.") self.print_stats() set_dags_paused_state(True) sys.exit()
scripts/perf/scheduler_ops_metrics.py
apache/airflow
AwsLambdaHook.invoke_lambda
def invoke_lambda(self, payload): awslambda_conn = self.get_conn() response = awslambda_conn.invoke( FunctionName=self.function_name, InvocationType=self.invocation_type, LogType=self.log_type, Payload=payload, Qualifier=self.qualifier ) return response
Invoke Lambda Function
def invoke_lambda(self, payload): """ Invoke Lambda Function """ awslambda_conn = self.get_conn() response = awslambda_conn.invoke( FunctionName=self.function_name, InvocationType=self.invocation_type, LogType=self.log_type, Payload=payload, Qualifier=self.qualifier ) return response
airflow/contrib/hooks/aws_lambda_hook.py
apache/airflow
mkdirs
def mkdirs(path, mode): try: o_umask = os.umask(0) os.makedirs(path, mode) except OSError: if not os.path.isdir(path): raise finally: os.umask(o_umask)
Creates the directory specified by path, creating intermediate directories as necessary. If directory already exists, this is a no-op.
def mkdirs(path, mode): """ Creates the directory specified by path, creating intermediate directories as necessary. If directory already exists, this is a no-op. :param path: The directory to create :type path: str :param mode: The mode to give to the directory e.g. 0o755, ignores umask :type mode: int """ try: o_umask = os.umask(0) os.makedirs(path, mode) except OSError: if not os.path.isdir(path): raise finally: os.umask(o_umask)
airflow/utils/file.py
apache/airflow
_convert_to_float_if_possible
def _convert_to_float_if_possible(s): try: ret = float(s) except (ValueError, TypeError): ret = s return ret
A small helper function to convert a string to a numeric value if appropriate
def _convert_to_float_if_possible(s): """ A small helper function to convert a string to a numeric value if appropriate :param s: the string to be converted :type s: str """ try: ret = float(s) except (ValueError, TypeError): ret = s return ret
airflow/operators/check_operator.py
apache/airflow
make_aware
def make_aware(value, timezone=None): if timezone is None: timezone = TIMEZONE if is_localized(value): raise ValueError( "make_aware expects a naive datetime, got %s" % value) if hasattr(value, 'fold'): value = value.replace(fold=1) if hasattr(timezone, 'localize'): return timezone.localize(value) elif hasattr(timezone, 'convert'): return timezone.convert(value) else: return value.replace(tzinfo=timezone)
Make a naive datetime.datetime in a given time zone aware.
def make_aware(value, timezone=None): """ Make a naive datetime.datetime in a given time zone aware. :param value: datetime :param timezone: timezone :return: localized datetime in settings.TIMEZONE or timezone """ if timezone is None: timezone = TIMEZONE # Check that we won't overwrite the timezone of an aware datetime. if is_localized(value): raise ValueError( "make_aware expects a naive datetime, got %s" % value) if hasattr(value, 'fold'): # In case of python 3.6 we want to do the same that pendulum does for python3.5 # i.e in case we move clock back we want to schedule the run at the time of the second # instance of the same clock time rather than the first one. # Fold parameter has no impact in other cases so we can safely set it to 1 here value = value.replace(fold=1) if hasattr(timezone, 'localize'): # This method is available for pytz time zones. return timezone.localize(value) elif hasattr(timezone, 'convert'): # For pendulum return timezone.convert(value) else: # This may be wrong around DST changes! return value.replace(tzinfo=timezone)
airflow/utils/timezone.py
apache/airflow
make_naive
def make_naive(value, timezone=None): if timezone is None: timezone = TIMEZONE if is_naive(value): raise ValueError("make_naive() cannot be applied to a naive datetime") o = value.astimezone(timezone) naive = dt.datetime(o.year, o.month, o.day, o.hour, o.minute, o.second, o.microsecond) return naive
Make an aware datetime.datetime naive in a given time zone.
def make_naive(value, timezone=None): """ Make an aware datetime.datetime naive in a given time zone. :param value: datetime :param timezone: timezone :return: naive datetime """ if timezone is None: timezone = TIMEZONE # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("make_naive() cannot be applied to a naive datetime") o = value.astimezone(timezone) # cross library compatibility naive = dt.datetime(o.year, o.month, o.day, o.hour, o.minute, o.second, o.microsecond) return naive
airflow/utils/timezone.py
apache/airflow
datetime
def datetime(*args, **kwargs): if 'tzinfo' not in kwargs: kwargs['tzinfo'] = TIMEZONE return dt.datetime(*args, **kwargs)
Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified
def datetime(*args, **kwargs): """ Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified :return: datetime.datetime """ if 'tzinfo' not in kwargs: kwargs['tzinfo'] = TIMEZONE return dt.datetime(*args, **kwargs)
airflow/utils/timezone.py
apache/airflow
DruidDbApiHook.get_conn
def get_conn(self): conn = self.get_connection(self.druid_broker_conn_id) druid_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to druid broker on %s', conn.host) return druid_broker_conn
Establish a connection to druid broker.
def get_conn(self): """ Establish a connection to druid broker. """ conn = self.get_connection(self.druid_broker_conn_id) druid_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to druid broker on %s', conn.host) return druid_broker_conn
airflow/hooks/druid_hook.py
apache/airflow
HttpHook.run
def run(self, endpoint, data=None, headers=None, extra_options=None): extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith('/') and \ endpoint and not endpoint.startswith('/'): url = self.base_url + '/' + endpoint else: url = (self.base_url or '') + (endpoint or '') req = None if self.method == 'GET': req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == 'HEAD': req = requests.Request(self.method, url, headers=headers) else: req = requests.Request(self.method, url, data=data, headers=headers) prepped_request = session.prepare_request(req) self.log.info("Sending '%s' to url: %s", self.method, url) return self.run_and_check(session, prepped_request, extra_options)
Performs the request
def run(self, endpoint, data=None, headers=None, extra_options=None): """ Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict """ extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith('/') and \ endpoint and not endpoint.startswith('/'): url = self.base_url + '/' + endpoint else: url = (self.base_url or '') + (endpoint or '') req = None if self.method == 'GET': # GET uses params req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == 'HEAD': # HEAD doesn't use params req = requests.Request(self.method, url, headers=headers) else: # Others use data req = requests.Request(self.method, url, data=data, headers=headers) prepped_request = session.prepare_request(req) self.log.info("Sending '%s' to url: %s", self.method, url) return self.run_and_check(session, prepped_request, extra_options)
airflow/hooks/http_hook.py
apache/airflow
HttpHook.check_response
def check_response(self, response): try: response.raise_for_status() except requests.exceptions.HTTPError: self.log.error("HTTP error: %s", response.reason) if self.method not in ['GET', 'HEAD']: self.log.error(response.text) raise AirflowException(str(response.status_code) + ":" + response.reason)
Checks the status code and raise an AirflowException exception on non 2XX or 3XX status codes
def check_response(self, response): """ Checks the status code and raise an AirflowException exception on non 2XX or 3XX status codes :param response: A requests response object :type response: requests.response """ try: response.raise_for_status() except requests.exceptions.HTTPError: self.log.error("HTTP error: %s", response.reason) if self.method not in ['GET', 'HEAD']: self.log.error(response.text) raise AirflowException(str(response.status_code) + ":" + response.reason)
airflow/hooks/http_hook.py
apache/airflow
HttpHook.run_and_check
def run_and_check(self, session, prepped_request, extra_options): extra_options = extra_options or {} try: response = session.send( prepped_request, stream=extra_options.get("stream", False), verify=extra_options.get("verify", True), proxies=extra_options.get("proxies", {}), cert=extra_options.get("cert"), timeout=extra_options.get("timeout"), allow_redirects=extra_options.get("allow_redirects", True)) if extra_options.get('check_response', True): self.check_response(response) return response except requests.exceptions.ConnectionError as ex: self.log.warn(str(ex) + ' Tenacity will retry to execute the operation') raise ex
Grabs extra options like timeout and actually runs the request, checking for the result
def run_and_check(self, session, prepped_request, extra_options): """ Grabs extra options like timeout and actually runs the request, checking for the result :param session: the session to be used to execute the request :type session: requests.Session :param prepped_request: the prepared request generated in run() :type prepped_request: session.prepare_request :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict """ extra_options = extra_options or {} try: response = session.send( prepped_request, stream=extra_options.get("stream", False), verify=extra_options.get("verify", True), proxies=extra_options.get("proxies", {}), cert=extra_options.get("cert"), timeout=extra_options.get("timeout"), allow_redirects=extra_options.get("allow_redirects", True)) if extra_options.get('check_response', True): self.check_response(response) return response except requests.exceptions.ConnectionError as ex: self.log.warn(str(ex) + ' Tenacity will retry to execute the operation') raise ex
airflow/hooks/http_hook.py
apache/airflow
create_session
def create_session(): session = settings.Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
Contextmanager that will create and teardown a session.
def create_session(): """ Contextmanager that will create and teardown a session. """ session = settings.Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
airflow/utils/db.py
apache/airflow
resetdb
def resetdb(): from airflow import models from alembic.migration import MigrationContext log.info("Dropping tables that exist") models.base.Base.metadata.drop_all(settings.engine) mc = MigrationContext.configure(settings.engine) if mc._version.exists(settings.engine): mc._version.drop(settings.engine) from flask_appbuilder.models.sqla import Base Base.metadata.drop_all(settings.engine) initdb()
Clear out the database
def resetdb(): """ Clear out the database """ from airflow import models # alembic adds significant import time, so we import it lazily from alembic.migration import MigrationContext log.info("Dropping tables that exist") models.base.Base.metadata.drop_all(settings.engine) mc = MigrationContext.configure(settings.engine) if mc._version.exists(settings.engine): mc._version.drop(settings.engine) from flask_appbuilder.models.sqla import Base Base.metadata.drop_all(settings.engine) initdb()
airflow/utils/db.py
apache/airflow
PrestoHook._get_pretty_exception_message
def _get_pretty_exception_message(e): if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): return ('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: return str(e)
Parses some DatabaseError to provide a better error message
def _get_pretty_exception_message(e): """ Parses some DatabaseError to provide a better error message """ if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): return ('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: return str(e)
airflow/hooks/presto_hook.py
apache/airflow
PrestoHook.get_records
def get_records(self, hql, parameters=None): try: return super().get_records( self._strip_sql(hql), parameters) except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e))
Get a set of records from Presto
def get_records(self, hql, parameters=None): """ Get a set of records from Presto """ try: return super().get_records( self._strip_sql(hql), parameters) except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e))
airflow/hooks/presto_hook.py
apache/airflow
PrestoHook.get_pandas_df
def get_pandas_df(self, hql, parameters=None): import pandas cursor = self.get_cursor() try: cursor.execute(self._strip_sql(hql), parameters) data = cursor.fetchall() except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e)) column_descriptions = cursor.description if data: df = pandas.DataFrame(data) df.columns = [c[0] for c in column_descriptions] else: df = pandas.DataFrame() return df
Get a pandas dataframe from a sql query.
def get_pandas_df(self, hql, parameters=None): """ Get a pandas dataframe from a sql query. """ import pandas cursor = self.get_cursor() try: cursor.execute(self._strip_sql(hql), parameters) data = cursor.fetchall() except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e)) column_descriptions = cursor.description if data: df = pandas.DataFrame(data) df.columns = [c[0] for c in column_descriptions] else: df = pandas.DataFrame() return df
airflow/hooks/presto_hook.py
apache/airflow
PrestoHook.run
def run(self, hql, parameters=None): return super().run(self._strip_sql(hql), parameters)
Execute the statement against Presto. Can be used to create views.
def run(self, hql, parameters=None): """ Execute the statement against Presto. Can be used to create views. """ return super().run(self._strip_sql(hql), parameters)
airflow/hooks/presto_hook.py
apache/airflow
PrestoHook.insert_rows
def insert_rows(self, table, rows, target_fields=None): super().insert_rows(table, rows, target_fields, 0)
A generic way to insert a set of tuples into a table.
def insert_rows(self, table, rows, target_fields=None): """ A generic way to insert a set of tuples into a table. :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings """ super().insert_rows(table, rows, target_fields, 0)
airflow/hooks/presto_hook.py
apache/airflow
AzureCosmosDBHook.get_conn
def get_conn(self): if self.cosmos_client is not None: return self.cosmos_client self.cosmos_client = cosmos_client.CosmosClient(self.endpoint_uri, {'masterKey': self.master_key}) return self.cosmos_client
Return a cosmos db client.
def get_conn(self): """ Return a cosmos db client. """ if self.cosmos_client is not None: return self.cosmos_client # Initialize the Python Azure Cosmos DB client self.cosmos_client = cosmos_client.CosmosClient(self.endpoint_uri, {'masterKey': self.master_key}) return self.cosmos_client
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.does_collection_exist
def does_collection_exist(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) if len(existing_container) == 0: return False return True
Checks if a collection exists in CosmosDB.
def does_collection_exist(self, collection_name, database_name=None): """ Checks if a collection exists in CosmosDB. """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) if len(existing_container) == 0: return False return True
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.create_collection
def create_collection(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) if len(existing_container) == 0: self.get_conn().CreateContainer( get_database_link(self.__get_database_name(database_name)), {"id": collection_name})
Creates a new collection in the CosmosDB database.
def create_collection(self, collection_name, database_name=None): """ Creates a new collection in the CosmosDB database. """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") # We need to check to see if this container already exists so we don't try # to create it twice existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) # Only create if we did not find it already existing if len(existing_container) == 0: self.get_conn().CreateContainer( get_database_link(self.__get_database_name(database_name)), {"id": collection_name})
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.does_database_exist
def does_database_exist(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) if len(existing_database) == 0: return False return True
Checks if a database exists in CosmosDB.
def does_database_exist(self, database_name): """ Checks if a database exists in CosmosDB. """ if database_name is None: raise AirflowBadRequest("Database name cannot be None.") existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) if len(existing_database) == 0: return False return True
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.create_database
def create_database(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) if len(existing_database) == 0: self.get_conn().CreateDatabase({"id": database_name})
Creates a new database in CosmosDB.
def create_database(self, database_name): """ Creates a new database in CosmosDB. """ if database_name is None: raise AirflowBadRequest("Database name cannot be None.") # We need to check to see if this database already exists so we don't try # to create it twice existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) # Only create if we did not find it already existing if len(existing_database) == 0: self.get_conn().CreateDatabase({"id": database_name})
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.delete_database
def delete_database(self, database_name): if database_name is None: raise AirflowBadRequest("Database name cannot be None.") self.get_conn().DeleteDatabase(get_database_link(database_name))
Deletes an existing database in CosmosDB.
def delete_database(self, database_name): """ Deletes an existing database in CosmosDB. """ if database_name is None: raise AirflowBadRequest("Database name cannot be None.") self.get_conn().DeleteDatabase(get_database_link(database_name))
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.delete_collection
def delete_collection(self, collection_name, database_name=None): if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") self.get_conn().DeleteContainer( get_collection_link(self.__get_database_name(database_name), collection_name))
Deletes an existing collection in the CosmosDB database.
def delete_collection(self, collection_name, database_name=None): """ Deletes an existing collection in the CosmosDB database. """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") self.get_conn().DeleteContainer( get_collection_link(self.__get_database_name(database_name), collection_name))
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.insert_documents
def insert_documents(self, documents, database_name=None, collection_name=None): if documents is None: raise AirflowBadRequest("You cannot insert empty documents") created_documents = [] for single_document in documents: created_documents.append( self.get_conn().CreateItem( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), single_document)) return created_documents
Insert a list of new documents into an existing collection in the CosmosDB database.
def insert_documents(self, documents, database_name=None, collection_name=None): """ Insert a list of new documents into an existing collection in the CosmosDB database. """ if documents is None: raise AirflowBadRequest("You cannot insert empty documents") created_documents = [] for single_document in documents: created_documents.append( self.get_conn().CreateItem( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), single_document)) return created_documents
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.delete_document
def delete_document(self, document_id, database_name=None, collection_name=None): if document_id is None: raise AirflowBadRequest("Cannot delete a document without an id") self.get_conn().DeleteItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id))
Delete an existing document out of a collection in the CosmosDB database.
def delete_document(self, document_id, database_name=None, collection_name=None): """ Delete an existing document out of a collection in the CosmosDB database. """ if document_id is None: raise AirflowBadRequest("Cannot delete a document without an id") self.get_conn().DeleteItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id))
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.get_document
def get_document(self, document_id, database_name=None, collection_name=None): if document_id is None: raise AirflowBadRequest("Cannot get a document without an id") try: return self.get_conn().ReadItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id)) except HTTPFailure: return None
Get a document from an existing collection in the CosmosDB database.
def get_document(self, document_id, database_name=None, collection_name=None): """ Get a document from an existing collection in the CosmosDB database. """ if document_id is None: raise AirflowBadRequest("Cannot get a document without an id") try: return self.get_conn().ReadItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id)) except HTTPFailure: return None
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
AzureCosmosDBHook.get_documents
def get_documents(self, sql_string, database_name=None, collection_name=None, partition_key=None): if sql_string is None: raise AirflowBadRequest("SQL query string cannot be None") query = {'query': sql_string} try: result_iterable = self.get_conn().QueryItems( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), query, partition_key) return list(result_iterable) except HTTPFailure: return None
Get a list of documents from an existing collection in the CosmosDB database via SQL query.
def get_documents(self, sql_string, database_name=None, collection_name=None, partition_key=None): """ Get a list of documents from an existing collection in the CosmosDB database via SQL query. """ if sql_string is None: raise AirflowBadRequest("SQL query string cannot be None") # Query them in SQL query = {'query': sql_string} try: result_iterable = self.get_conn().QueryItems( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), query, partition_key) return list(result_iterable) except HTTPFailure: return None
airflow/contrib/hooks/azure_cosmos_hook.py
apache/airflow
GcfHook.create_new_function
def create_new_function(self, location, body, project_id=None): response = self.get_conn().projects().locations().functions().create( location=self._full_location(project_id, location), body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
Creates a new function in Cloud Function in the location specified in the body.
def create_new_function(self, location, body, project_id=None): """ Creates a new function in Cloud Function in the location specified in the body. :param location: The location of the function. :type location: str :param body: The body required by the Cloud Functions insert API. :type body: dict :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().projects().locations().functions().create( location=self._full_location(project_id, location), body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
airflow/contrib/hooks/gcp_function_hook.py
apache/airflow
GcfHook.update_function
def update_function(self, name, body, update_mask): response = self.get_conn().projects().locations().functions().patch( updateMask=",".join(update_mask), name=name, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
Updates Cloud Functions according to the specified update mask.
def update_function(self, name, body, update_mask): """ Updates Cloud Functions according to the specified update mask. :param name: The name of the function. :type name: str :param body: The body required by the cloud function patch API. :type body: dict :param update_mask: The update mask - array of fields that should be patched. :type update_mask: [str] :return: None """ response = self.get_conn().projects().locations().functions().patch( updateMask=",".join(update_mask), name=name, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
airflow/contrib/hooks/gcp_function_hook.py
apache/airflow
GcfHook.upload_function_zip
def upload_function_zip(self, location, zip_path, project_id=None): response = self.get_conn().projects().locations().functions().generateUploadUrl( parent=self._full_location(project_id, location) ).execute(num_retries=self.num_retries) upload_url = response.get('uploadUrl') with open(zip_path, 'rb') as fp: requests.put( url=upload_url, data=fp, headers={ 'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600', } ) return upload_url
Uploads zip file with sources.
def upload_function_zip(self, location, zip_path, project_id=None): """ Uploads zip file with sources. :param location: The location where the function is created. :type location: str :param zip_path: The path of the valid .zip file to upload. :type zip_path: str :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: The upload URL that was returned by generateUploadUrl method. """ response = self.get_conn().projects().locations().functions().generateUploadUrl( parent=self._full_location(project_id, location) ).execute(num_retries=self.num_retries) upload_url = response.get('uploadUrl') with open(zip_path, 'rb') as fp: requests.put( url=upload_url, data=fp, # Those two headers needs to be specified according to: # https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl # nopep8 headers={ 'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600', } ) return upload_url
airflow/contrib/hooks/gcp_function_hook.py
apache/airflow
GcfHook.delete_function
def delete_function(self, name): response = self.get_conn().projects().locations().functions().delete( name=name).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
Deletes the specified Cloud Function.
def delete_function(self, name): """ Deletes the specified Cloud Function. :param name: The name of the function. :type name: str :return: None """ response = self.get_conn().projects().locations().functions().delete( name=name).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
airflow/contrib/hooks/gcp_function_hook.py
apache/airflow
BaseTIDep.get_dep_statuses
def get_dep_statuses(self, ti, session, dep_context=None): from airflow.ti_deps.dep_context import DepContext if dep_context is None: dep_context = DepContext() if self.IGNOREABLE and dep_context.ignore_all_deps: yield self._passing_status( reason="Context specified all dependencies should be ignored.") return if self.IS_TASK_DEP and dep_context.ignore_task_deps: yield self._passing_status( reason="Context specified all task dependencies should be ignored.") return for dep_status in self._get_dep_statuses(ti, session, dep_context): yield dep_status
Wrapper around the private _get_dep_statuses method that contains some global checks for all dependencies.
def get_dep_statuses(self, ti, session, dep_context=None): """ Wrapper around the private _get_dep_statuses method that contains some global checks for all dependencies. :param ti: the task instance to get the dependency status for :type ti: airflow.models.TaskInstance :param session: database session :type session: sqlalchemy.orm.session.Session :param dep_context: the context for which this dependency should be evaluated for :type dep_context: DepContext """ # this avoids a circular dependency from airflow.ti_deps.dep_context import DepContext if dep_context is None: dep_context = DepContext() if self.IGNOREABLE and dep_context.ignore_all_deps: yield self._passing_status( reason="Context specified all dependencies should be ignored.") return if self.IS_TASK_DEP and dep_context.ignore_task_deps: yield self._passing_status( reason="Context specified all task dependencies should be ignored.") return for dep_status in self._get_dep_statuses(ti, session, dep_context): yield dep_status
airflow/ti_deps/deps/base_ti_dep.py
apache/airflow
_parse_s3_config
def _parse_s3_config(config_file_name, config_format='boto', profile=None): config = configparser.ConfigParser() if config.read(config_file_name): sections = config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) if config_format is None: config_format = 'boto' conf_format = config_format.lower() if conf_format == 'boto': if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' if conf_format in ('boto', 'aws'): key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' else: key_id_option = 'access_key' secret_key_option = 'secret_key' if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) except Exception: logging.warning("Option Error in parsing s3 config file") raise return access_key, secret_key
Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats
def _parse_s3_config(config_file_name, config_format='boto', profile=None): """ Parses a config file for s3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats :param config_file_name: path to the config file :type config_file_name: str :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :type config_format: str :param profile: profile name in AWS type config file :type profile: str """ config = configparser.ConfigParser() if config.read(config_file_name): # pragma: no cover sections = config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format if config_format is None: config_format = 'boto' conf_format = config_format.lower() if conf_format == 'boto': # pragma: no cover if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): # pragma: no cover key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) except Exception: logging.warning("Option Error in parsing s3 config file") raise return access_key, secret_key
airflow/contrib/hooks/aws_hook.py
apache/airflow
AwsHook.get_credentials
def get_credentials(self, region_name=None): session, _ = self._get_credentials(region_name) return session.get_credentials().get_frozen_credentials()
Get the underlying `botocore.Credentials` object. This contains the following authentication
def get_credentials(self, region_name=None): """Get the underlying `botocore.Credentials` object. This contains the following authentication attributes: access_key, secret_key and token. """ session, _ = self._get_credentials(region_name) # Credentials are refreshable, so accessing your access key and # secret key separately can lead to a race condition. # See https://stackoverflow.com/a/36291428/8283373 return session.get_credentials().get_frozen_credentials()
airflow/contrib/hooks/aws_hook.py
apache/airflow
StreamLogWriter.flush
def flush(self): if len(self._buffer) > 0: self.logger.log(self.level, self._buffer) self._buffer = str()
Ensure all logging output has been flushed
def flush(self): """ Ensure all logging output has been flushed """ if len(self._buffer) > 0: self.logger.log(self.level, self._buffer) self._buffer = str()
airflow/utils/log/logging_mixin.py
apache/airflow
correct_maybe_zipped
def correct_maybe_zipped(fileloc): _, archive, filename = re.search( r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups() if archive and zipfile.is_zipfile(archive): return archive else: return fileloc
If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive and path to zip is returned.
def correct_maybe_zipped(fileloc): """ If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive and path to zip is returned. """ _, archive, filename = re.search( r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups() if archive and zipfile.is_zipfile(archive): return archive else: return fileloc
airflow/utils/dag_processing.py
apache/airflow
list_py_file_paths
def list_py_file_paths(directory, safe_mode=True, include_examples=None): if include_examples is None: include_examples = conf.getboolean('core', 'LOAD_EXAMPLES') file_paths = [] if directory is None: return [] elif os.path.isfile(directory): return [directory] elif os.path.isdir(directory): patterns_by_dir = {} for root, dirs, files in os.walk(directory, followlinks=True): patterns = patterns_by_dir.get(root, []) ignore_file = os.path.join(root, '.airflowignore') if os.path.isfile(ignore_file): with open(ignore_file, 'r') as f: patterns += [re.compile(p) for p in f.read().split('\n') if p] dirs[:] = [ d for d in dirs if not any(p.search(os.path.join(root, d)) for p in patterns) ] for d in dirs: patterns_by_dir[os.path.join(root, d)] = patterns for f in files: try: file_path = os.path.join(root, f) if not os.path.isfile(file_path): continue mod_name, file_ext = os.path.splitext( os.path.split(file_path)[-1]) if file_ext != '.py' and not zipfile.is_zipfile(file_path): continue if any([re.findall(p, file_path) for p in patterns]): continue might_contain_dag = True if safe_mode and not zipfile.is_zipfile(file_path): with open(file_path, 'rb') as fp: content = fp.read() might_contain_dag = all( [s in content for s in (b'DAG', b'airflow')]) if not might_contain_dag: continue file_paths.append(file_path) except Exception: log = LoggingMixin().log log.exception("Error while examining %s", f) if include_examples: import airflow.example_dags example_dag_folder = airflow.example_dags.__path__[0] file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False)) return file_paths
Traverse a directory and look for Python files.
def list_py_file_paths(directory, safe_mode=True, include_examples=None): """ Traverse a directory and look for Python files. :param directory: the directory to traverse :type directory: unicode :param safe_mode: whether to use a heuristic to determine whether a file contains Airflow DAG definitions :return: a list of paths to Python files in the specified directory :rtype: list[unicode] """ if include_examples is None: include_examples = conf.getboolean('core', 'LOAD_EXAMPLES') file_paths = [] if directory is None: return [] elif os.path.isfile(directory): return [directory] elif os.path.isdir(directory): patterns_by_dir = {} for root, dirs, files in os.walk(directory, followlinks=True): patterns = patterns_by_dir.get(root, []) ignore_file = os.path.join(root, '.airflowignore') if os.path.isfile(ignore_file): with open(ignore_file, 'r') as f: # If we have new patterns create a copy so we don't change # the previous list (which would affect other subdirs) patterns += [re.compile(p) for p in f.read().split('\n') if p] # If we can ignore any subdirs entirely we should - fewer paths # to walk is better. We have to modify the ``dirs`` array in # place for this to affect os.walk dirs[:] = [ d for d in dirs if not any(p.search(os.path.join(root, d)) for p in patterns) ] # We want patterns defined in a parent folder's .airflowignore to # apply to subdirs too for d in dirs: patterns_by_dir[os.path.join(root, d)] = patterns for f in files: try: file_path = os.path.join(root, f) if not os.path.isfile(file_path): continue mod_name, file_ext = os.path.splitext( os.path.split(file_path)[-1]) if file_ext != '.py' and not zipfile.is_zipfile(file_path): continue if any([re.findall(p, file_path) for p in patterns]): continue # Heuristic that guesses whether a Python file contains an # Airflow DAG definition. might_contain_dag = True if safe_mode and not zipfile.is_zipfile(file_path): with open(file_path, 'rb') as fp: content = fp.read() might_contain_dag = all( [s in content for s in (b'DAG', b'airflow')]) if not might_contain_dag: continue file_paths.append(file_path) except Exception: log = LoggingMixin().log log.exception("Error while examining %s", f) if include_examples: import airflow.example_dags example_dag_folder = airflow.example_dags.__path__[0] file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False)) return file_paths
airflow/utils/dag_processing.py
apache/airflow
SimpleTaskInstance.construct_task_instance
def construct_task_instance(self, session=None, lock_for_update=False): TI = airflow.models.TaskInstance qry = session.query(TI).filter( TI.dag_id == self._dag_id, TI.task_id == self._task_id, TI.execution_date == self._execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() return ti
Construct a TaskInstance from the database based on the primary key
def construct_task_instance(self, session=None, lock_for_update=False): """ Construct a TaskInstance from the database based on the primary key :param session: DB session. :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. """ TI = airflow.models.TaskInstance qry = session.query(TI).filter( TI.dag_id == self._dag_id, TI.task_id == self._task_id, TI.execution_date == self._execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() return ti
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorAgent.start
def start(self): self._process = self._launch_process(self._dag_directory, self._file_paths, self._max_runs, self._processor_factory, self._child_signal_conn, self._stat_queue, self._result_queue, self._async_mode) self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
def start(self): """ Launch DagFileProcessorManager processor and start DAG parsing loop in manager. """ self._process = self._launch_process(self._dag_directory, self._file_paths, self._max_runs, self._processor_factory, self._child_signal_conn, self._stat_queue, self._result_queue, self._async_mode) self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorAgent.terminate
def terminate(self): self.log.info("Sending termination message to manager.") self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
Send termination signal to DAG parsing processor manager and expect it to terminate all DAG file processors.
def terminate(self): """ Send termination signal to DAG parsing processor manager and expect it to terminate all DAG file processors. """ self.log.info("Sending termination message to manager.") self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager._exit_gracefully
def _exit_gracefully(self, signum, frame): self.log.info("Exiting gracefully upon receiving signal %s", signum) self.terminate() self.end() self.log.debug("Finished terminating DAG processors.") sys.exit(os.EX_OK)
Helper method to clean up DAG file processors to avoid leaving orphan processes.
def _exit_gracefully(self, signum, frame): """ Helper method to clean up DAG file processors to avoid leaving orphan processes. """ self.log.info("Exiting gracefully upon receiving signal %s", signum) self.terminate() self.end() self.log.debug("Finished terminating DAG processors.") sys.exit(os.EX_OK)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.start
def start(self): self.log.info("Processing files using up to %s processes at a time ", self._parallelism) self.log.info("Process each file at most once every %s seconds", self._file_process_interval) self.log.info( "Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval ) if self._async_mode: self.log.debug("Starting DagFileProcessorManager in async mode") self.start_in_async() else: self.log.debug("Starting DagFileProcessorManager in sync mode") self.start_in_sync()
Use multiple processes to parse and generate tasks for the DAGs in parallel. By processing them in separate processes, we can get parallelism and isolation from potentially harmful user code.
def start(self): """ Use multiple processes to parse and generate tasks for the DAGs in parallel. By processing them in separate processes, we can get parallelism and isolation from potentially harmful user code. """ self.log.info("Processing files using up to %s processes at a time ", self._parallelism) self.log.info("Process each file at most once every %s seconds", self._file_process_interval) self.log.info( "Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval ) if self._async_mode: self.log.debug("Starting DagFileProcessorManager in async mode") self.start_in_async() else: self.log.debug("Starting DagFileProcessorManager in sync mode") self.start_in_sync()
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.start_in_async
def start_in_async(self): while True: loop_start_time = time.time() if self._signal_conn.poll(): agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), max_runs_reached, all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) break loop_duration = time.time() - loop_start_time if loop_duration < 1: sleep_length = 1 - loop_duration self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length) time.sleep(sleep_length)
Parse DAG files repeatedly in a standalone loop.
def start_in_async(self): """ Parse DAG files repeatedly in a standalone loop. """ while True: loop_start_time = time.time() if self._signal_conn.poll(): agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), max_runs_reached, all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) break loop_duration = time.time() - loop_start_time if loop_duration < 1: sleep_length = 1 - loop_duration self.log.debug("Sleeping for %.2f seconds to prevent excessive logging", sleep_length) time.sleep(sleep_length)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.start_in_sync
def start_in_sync(self): while True: agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT: self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), self.max_runs_reached(), all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) self.wait_until_finished() self._signal_conn.send(DagParsingSignal.MANAGER_DONE) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) self._signal_conn.send(DagParsingSignal.MANAGER_DONE) break
Parse DAG files in a loop controlled by DagParsingSignal. Actual DAG parsing loop will run once upon receiving one agent heartbeat message and will report done when finished the loop.
def start_in_sync(self): """ Parse DAG files in a loop controlled by DagParsingSignal. Actual DAG parsing loop will run once upon receiving one agent heartbeat message and will report done when finished the loop. """ while True: agent_signal = self._signal_conn.recv() if agent_signal == DagParsingSignal.TERMINATE_MANAGER: self.terminate() break elif agent_signal == DagParsingSignal.END_MANAGER: self.end() sys.exit(os.EX_OK) elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT: self._refresh_dag_dir() simple_dags = self.heartbeat() for simple_dag in simple_dags: self._result_queue.put(simple_dag) self._print_stat() all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths) max_runs_reached = self.max_runs_reached() dag_parsing_stat = DagParsingStat(self._file_paths, self.get_all_pids(), self.max_runs_reached(), all_files_processed, len(simple_dags)) self._stat_queue.put(dag_parsing_stat) self.wait_until_finished() self._signal_conn.send(DagParsingSignal.MANAGER_DONE) if max_runs_reached: self.log.info("Exiting dag parsing loop as all files " "have been processed %s times", self._max_runs) self._signal_conn.send(DagParsingSignal.MANAGER_DONE) break
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager._refresh_dag_dir
def _refresh_dag_dir(self): elapsed_time_since_refresh = (timezone.utcnow() - self.last_dag_dir_refresh_time).total_seconds() if elapsed_time_since_refresh > self.dag_dir_list_interval: self.log.info("Searching for files in %s", self._dag_directory) self._file_paths = list_py_file_paths(self._dag_directory) self.last_dag_dir_refresh_time = timezone.utcnow() self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory) self.set_file_paths(self._file_paths) try: self.log.debug("Removing old import errors") self.clear_nonexistent_import_errors() except Exception: self.log.exception("Error removing old import errors")
Refresh file paths from dag dir if we haven't done it for too long.
def _refresh_dag_dir(self): """ Refresh file paths from dag dir if we haven't done it for too long. """ elapsed_time_since_refresh = (timezone.utcnow() - self.last_dag_dir_refresh_time).total_seconds() if elapsed_time_since_refresh > self.dag_dir_list_interval: # Build up a list of Python files that could contain DAGs self.log.info("Searching for files in %s", self._dag_directory) self._file_paths = list_py_file_paths(self._dag_directory) self.last_dag_dir_refresh_time = timezone.utcnow() self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory) self.set_file_paths(self._file_paths) try: self.log.debug("Removing old import errors") self.clear_nonexistent_import_errors() except Exception: self.log.exception("Error removing old import errors")
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager._print_stat
def _print_stat(self): if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval): if len(self._file_paths) > 0: self._log_file_processing_stats(self._file_paths) self.last_stat_print_time = timezone.utcnow()
Occasionally print out stats about how fast the files are getting processed
def _print_stat(self): """ Occasionally print out stats about how fast the files are getting processed """ if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval): if len(self._file_paths) > 0: self._log_file_processing_stats(self._file_paths) self.last_stat_print_time = timezone.utcnow()
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.clear_nonexistent_import_errors
def clear_nonexistent_import_errors(self, session): query = session.query(errors.ImportError) if self._file_paths: query = query.filter( ~errors.ImportError.filename.in_(self._file_paths) ) query.delete(synchronize_session='fetch') session.commit()
Clears import errors for files that no longer exist.
def clear_nonexistent_import_errors(self, session): """ Clears import errors for files that no longer exist. :param session: session for ORM operations :type session: sqlalchemy.orm.session.Session """ query = session.query(errors.ImportError) if self._file_paths: query = query.filter( ~errors.ImportError.filename.in_(self._file_paths) ) query.delete(synchronize_session='fetch') session.commit()
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager._log_file_processing_stats
def _log_file_processing_stats(self, known_file_paths): headers = ["File Path", "PID", "Runtime", "Last Runtime", "Last Run"] rows = [] for file_path in known_file_paths: last_runtime = self.get_last_runtime(file_path) file_name = os.path.basename(file_path) file_name = os.path.splitext(file_name)[0].replace(os.sep, '.') if last_runtime: Stats.gauge( 'dag_processing.last_runtime.{}'.format(file_name), last_runtime ) processor_pid = self.get_pid(file_path) processor_start_time = self.get_start_time(file_path) runtime = ((timezone.utcnow() - processor_start_time).total_seconds() if processor_start_time else None) last_run = self.get_last_finish_time(file_path) if last_run: seconds_ago = (timezone.utcnow() - last_run).total_seconds() Stats.gauge( 'dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago ) rows.append((file_path, processor_pid, runtime, last_runtime, last_run)) rows = sorted(rows, key=lambda x: x[3] or 0.0) formatted_rows = [] for file_path, pid, runtime, last_runtime, last_run in rows: formatted_rows.append((file_path, pid, "{:.2f}s".format(runtime) if runtime else None, "{:.2f}s".format(last_runtime) if last_runtime else None, last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None)) log_str = ("\n" + "=" * 80 + "\n" + "DAG File Processing Stats\n\n" + tabulate(formatted_rows, headers=headers) + "\n" + "=" * 80) self.log.info(log_str)
Print out stats about how files are getting processed.
def _log_file_processing_stats(self, known_file_paths): """ Print out stats about how files are getting processed. :param known_file_paths: a list of file paths that may contain Airflow DAG definitions :type known_file_paths: list[unicode] :return: None """ # File Path: Path to the file containing the DAG definition # PID: PID associated with the process that's processing the file. May # be empty. # Runtime: If the process is currently running, how long it's been # running for in seconds. # Last Runtime: If the process ran before, how long did it take to # finish in seconds # Last Run: When the file finished processing in the previous run. headers = ["File Path", "PID", "Runtime", "Last Runtime", "Last Run"] rows = [] for file_path in known_file_paths: last_runtime = self.get_last_runtime(file_path) file_name = os.path.basename(file_path) file_name = os.path.splitext(file_name)[0].replace(os.sep, '.') if last_runtime: Stats.gauge( 'dag_processing.last_runtime.{}'.format(file_name), last_runtime ) processor_pid = self.get_pid(file_path) processor_start_time = self.get_start_time(file_path) runtime = ((timezone.utcnow() - processor_start_time).total_seconds() if processor_start_time else None) last_run = self.get_last_finish_time(file_path) if last_run: seconds_ago = (timezone.utcnow() - last_run).total_seconds() Stats.gauge( 'dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago ) rows.append((file_path, processor_pid, runtime, last_runtime, last_run)) # Sort by longest last runtime. (Can't sort None values in python3) rows = sorted(rows, key=lambda x: x[3] or 0.0) formatted_rows = [] for file_path, pid, runtime, last_runtime, last_run in rows: formatted_rows.append((file_path, pid, "{:.2f}s".format(runtime) if runtime else None, "{:.2f}s".format(last_runtime) if last_runtime else None, last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None)) log_str = ("\n" + "=" * 80 + "\n" + "DAG File Processing Stats\n\n" + tabulate(formatted_rows, headers=headers) + "\n" + "=" * 80) self.log.info(log_str)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.set_file_paths
def set_file_paths(self, new_file_paths): self._file_paths = new_file_paths self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths] filtered_processors = {} for file_path, processor in self._processors.items(): if file_path in new_file_paths: filtered_processors[file_path] = processor else: self.log.warning("Stopping processor for %s", file_path) processor.terminate() self._processors = filtered_processors
Update this with a new set of paths to DAG definition files.
def set_file_paths(self, new_file_paths): """ Update this with a new set of paths to DAG definition files. :param new_file_paths: list of paths to DAG definition files :type new_file_paths: list[unicode] :return: None """ self._file_paths = new_file_paths self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths] # Stop processors that are working on deleted files filtered_processors = {} for file_path, processor in self._processors.items(): if file_path in new_file_paths: filtered_processors[file_path] = processor else: self.log.warning("Stopping processor for %s", file_path) processor.terminate() self._processors = filtered_processors
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.wait_until_finished
def wait_until_finished(self): for file_path, processor in self._processors.items(): while not processor.done: time.sleep(0.1)
Sleeps until all the processors are done.
def wait_until_finished(self): """ Sleeps until all the processors are done. """ for file_path, processor in self._processors.items(): while not processor.done: time.sleep(0.1)
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.heartbeat
def heartbeat(self): finished_processors = {} running_processors = {} for file_path, processor in self._processors.items(): if processor.done: self.log.debug("Processor for %s finished", file_path) now = timezone.utcnow() finished_processors[file_path] = processor self._last_runtime[file_path] = (now - processor.start_time).total_seconds() self._last_finish_time[file_path] = now self._run_count[file_path] += 1 else: running_processors[file_path] = processor self._processors = running_processors self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism) self.log.debug("%s file paths queued for processing", len(self._file_path_queue)) simple_dags = [] for file_path, processor in finished_processors.items(): if processor.result is None: self.log.warning( "Processor for %s exited with return code %s.", processor.file_path, processor.exit_code ) else: for simple_dag in processor.result: simple_dags.append(simple_dag) if len(self._file_path_queue) == 0: file_paths_in_progress = self._processors.keys() now = timezone.utcnow() file_paths_recently_processed = [] for file_path in self._file_paths: last_finish_time = self.get_last_finish_time(file_path) if (last_finish_time is not None and (now - last_finish_time).total_seconds() < self._file_process_interval): file_paths_recently_processed.append(file_path) files_paths_at_run_limit = [file_path for file_path, num_runs in self._run_count.items() if num_runs == self._max_runs] files_paths_to_queue = list(set(self._file_paths) - set(file_paths_in_progress) - set(file_paths_recently_processed) - set(files_paths_at_run_limit)) for file_path, processor in self._processors.items(): self.log.debug( "File path %s is still being processed (started: %s)", processor.file_path, processor.start_time.isoformat() ) self.log.debug( "Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue) ) self._file_path_queue.extend(files_paths_to_queue) zombies = self._find_zombies() while (self._parallelism - len(self._processors) > 0 and len(self._file_path_queue) > 0): file_path = self._file_path_queue.pop(0) processor = self._processor_factory(file_path, zombies) processor.start() self.log.debug( "Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path ) self._processors[file_path] = processor self._run_count[self._heart_beat_key] += 1 return simple_dags
This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors.
def heartbeat(self): """ This should be periodically called by the manager loop. This method will kick off new processes to process DAG definition files and read the results from the finished processors. :return: a list of SimpleDags that were produced by processors that have finished since the last time this was called :rtype: list[airflow.utils.dag_processing.SimpleDag] """ finished_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" running_processors = {} """:type : dict[unicode, AbstractDagFileProcessor]""" for file_path, processor in self._processors.items(): if processor.done: self.log.debug("Processor for %s finished", file_path) now = timezone.utcnow() finished_processors[file_path] = processor self._last_runtime[file_path] = (now - processor.start_time).total_seconds() self._last_finish_time[file_path] = now self._run_count[file_path] += 1 else: running_processors[file_path] = processor self._processors = running_processors self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism) self.log.debug("%s file paths queued for processing", len(self._file_path_queue)) # Collect all the DAGs that were found in the processed files simple_dags = [] for file_path, processor in finished_processors.items(): if processor.result is None: self.log.warning( "Processor for %s exited with return code %s.", processor.file_path, processor.exit_code ) else: for simple_dag in processor.result: simple_dags.append(simple_dag) # Generate more file paths to process if we processed all the files # already. if len(self._file_path_queue) == 0: # If the file path is already being processed, or if a file was # processed recently, wait until the next batch file_paths_in_progress = self._processors.keys() now = timezone.utcnow() file_paths_recently_processed = [] for file_path in self._file_paths: last_finish_time = self.get_last_finish_time(file_path) if (last_finish_time is not None and (now - last_finish_time).total_seconds() < self._file_process_interval): file_paths_recently_processed.append(file_path) files_paths_at_run_limit = [file_path for file_path, num_runs in self._run_count.items() if num_runs == self._max_runs] files_paths_to_queue = list(set(self._file_paths) - set(file_paths_in_progress) - set(file_paths_recently_processed) - set(files_paths_at_run_limit)) for file_path, processor in self._processors.items(): self.log.debug( "File path %s is still being processed (started: %s)", processor.file_path, processor.start_time.isoformat() ) self.log.debug( "Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue) ) self._file_path_queue.extend(files_paths_to_queue) zombies = self._find_zombies() # Start more processors if we have enough slots and files to process while (self._parallelism - len(self._processors) > 0 and len(self._file_path_queue) > 0): file_path = self._file_path_queue.pop(0) processor = self._processor_factory(file_path, zombies) processor.start() self.log.debug( "Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path ) self._processors[file_path] = processor # Update heartbeat count. self._run_count[self._heart_beat_key] += 1 return simple_dags
airflow/utils/dag_processing.py
apache/airflow
DagFileProcessorManager.end
def end(self): pids_to_kill = self.get_all_pids() if len(pids_to_kill) > 0: this_process = psutil.Process(os.getpid()) child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] for child in child_processes: self.log.info("Terminating child PID: %s", child.pid) child.terminate() timeout = 5 self.log.info("Waiting up to %s seconds for processes to exit...", timeout) try: psutil.wait_procs( child_processes, timeout=timeout, callback=lambda x: self.log.info('Terminated PID %s', x.pid)) except psutil.TimeoutExpired: self.log.debug("Ran out of time while waiting for processes to exit") child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] if len(child_processes) > 0: self.log.info("SIGKILL processes that did not terminate gracefully") for child in child_processes: self.log.info("Killing child PID: %s", child.pid) child.kill() child.wait()
Kill all child processes on exit since we don't want to leave them as orphaned.
def end(self): """ Kill all child processes on exit since we don't want to leave them as orphaned. """ pids_to_kill = self.get_all_pids() if len(pids_to_kill) > 0: # First try SIGTERM this_process = psutil.Process(os.getpid()) # Only check child processes to ensure that we don't have a case # where we kill the wrong process because a child process died # but the PID got reused. child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] for child in child_processes: self.log.info("Terminating child PID: %s", child.pid) child.terminate() # TODO: Remove magic number timeout = 5 self.log.info("Waiting up to %s seconds for processes to exit...", timeout) try: psutil.wait_procs( child_processes, timeout=timeout, callback=lambda x: self.log.info('Terminated PID %s', x.pid)) except psutil.TimeoutExpired: self.log.debug("Ran out of time while waiting for processes to exit") # Then SIGKILL child_processes = [x for x in this_process.children(recursive=True) if x.is_running() and x.pid in pids_to_kill] if len(child_processes) > 0: self.log.info("SIGKILL processes that did not terminate gracefully") for child in child_processes: self.log.info("Killing child PID: %s", child.pid) child.kill() child.wait()
airflow/utils/dag_processing.py
apache/airflow
SSHHook.get_conn
def get_conn(self): self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id) client = paramiko.SSHClient() if not self.allow_host_key_change: self.log.warning('Remote Identification Change is not verified. ' 'This wont protect against Man-In-The-Middle attacks') client.load_system_host_keys() if self.no_host_key_check: self.log.warning('No Host Key Verification. This wont protect ' 'against Man-In-The-Middle attacks') client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password and self.password.strip(): client.connect(hostname=self.remote_host, username=self.username, password=self.password, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) else: client.connect(hostname=self.remote_host, username=self.username, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) if self.keepalive_interval: client.get_transport().set_keepalive(self.keepalive_interval) self.client = client return client
Opens a ssh connection to the remote host.
def get_conn(self): """ Opens a ssh connection to the remote host. :rtype: paramiko.client.SSHClient """ self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id) client = paramiko.SSHClient() if not self.allow_host_key_change: self.log.warning('Remote Identification Change is not verified. ' 'This wont protect against Man-In-The-Middle attacks') client.load_system_host_keys() if self.no_host_key_check: self.log.warning('No Host Key Verification. This wont protect ' 'against Man-In-The-Middle attacks') # Default is RejectPolicy client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password and self.password.strip(): client.connect(hostname=self.remote_host, username=self.username, password=self.password, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) else: client.connect(hostname=self.remote_host, username=self.username, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) if self.keepalive_interval: client.get_transport().set_keepalive(self.keepalive_interval) self.client = client return client
airflow/contrib/hooks/ssh_hook.py
apache/airflow
GCPTransferServiceHook.create_transfer_job
def create_transfer_job(self, body): body = self._inject_project_id(body, BODY, PROJECT_ID) return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
Creates a transfer job that runs periodically.
def create_transfer_job(self, body): """ Creates a transfer job that runs periodically. :param body: (Required) A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: transfer job. See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :rtype: dict """ body = self._inject_project_id(body, BODY, PROJECT_ID) return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.get_transfer_job
def get_transfer_job(self, job_name, project_id=None): return ( self.get_conn() .transferJobs() .get(jobName=job_name, projectId=project_id) .execute(num_retries=self.num_retries) )
Gets the latest state of a long-running operation in Google Storage Transfer Service.
def get_transfer_job(self, job_name, project_id=None): """ Gets the latest state of a long-running operation in Google Storage Transfer Service. :param job_name: (Required) Name of the job to be fetched :type job_name: str :param project_id: (Optional) the ID of the project that owns the Transfer Job. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Transfer Job :rtype: dict """ return ( self.get_conn() .transferJobs() .get(jobName=job_name, projectId=project_id) .execute(num_retries=self.num_retries) )
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.list_transfer_job
def list_transfer_job(self, filter): conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) request = conn.transferJobs().list(filter=json.dumps(filter)) jobs = [] while request is not None: response = request.execute(num_retries=self.num_retries) jobs.extend(response[TRANSFER_JOBS]) request = conn.transferJobs().list_next(previous_request=request, previous_response=response) return jobs
Lists long-running operations in Google Storage Transfer Service that match the specified filter.
def list_transfer_job(self, filter): """ Lists long-running operations in Google Storage Transfer Service that match the specified filter. :param filter: (Required) A request filter, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter :type filter: dict :return: List of Transfer Jobs :rtype: list[dict] """ conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) request = conn.transferJobs().list(filter=json.dumps(filter)) jobs = [] while request is not None: response = request.execute(num_retries=self.num_retries) jobs.extend(response[TRANSFER_JOBS]) request = conn.transferJobs().list_next(previous_request=request, previous_response=response) return jobs
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.update_transfer_job
def update_transfer_job(self, job_name, body): body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
Updates a transfer job that runs periodically.
def update_transfer_job(self, job_name, body): """ Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict """ body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.cancel_transfer_operation
def cancel_transfer_operation(self, operation_name): self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
Cancels an transfer operation in Google Storage Transfer Service.
def cancel_transfer_operation(self, operation_name): """ Cancels an transfer operation in Google Storage Transfer Service. :param operation_name: Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.pause_transfer_operation
def pause_transfer_operation(self, operation_name): self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries)
Pauses an transfer operation in Google Storage Transfer Service.
def pause_transfer_operation(self, operation_name): """ Pauses an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.resume_transfer_operation
def resume_transfer_operation(self, operation_name): self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
Resumes an transfer operation in Google Storage Transfer Service.
def resume_transfer_operation(self, operation_name): """ Resumes an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
GCPTransferServiceHook.wait_for_transfer_job
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
Waits until the job reaches the expected state.
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): """ Waits until the job reaches the expected state. :param job: Transfer job See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :type job: dict :param expected_statuses: State that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :param timeout: :type timeout: time in which the operation must end in seconds :rtype: None """ while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
airflow/contrib/hooks/gcp_transfer_hook.py
apache/airflow
run_command
def run_command(command): process = subprocess.Popen( shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()] if process.returncode != 0: raise AirflowConfigException( "Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}" .format(command, process.returncode, output, stderr) ) return output
Runs command and returns stdout
def run_command(command): """ Runs command and returns stdout """ process = subprocess.Popen( shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()] if process.returncode != 0: raise AirflowConfigException( "Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}" .format(command, process.returncode, output, stderr) ) return output
airflow/configuration.py
apache/airflow
AirflowConfigParser.remove_option
def remove_option(self, section, option, remove_default=True): if super().has_option(section, option): super().remove_option(section, option) if self.airflow_defaults.has_option(section, option) and remove_default: self.airflow_defaults.remove_option(section, option)
Remove an option if it exists in config from a file or default config. If both of config have the same option, this removes the option in both configs unless remove_default=False.
def remove_option(self, section, option, remove_default=True): """ Remove an option if it exists in config from a file or default config. If both of config have the same option, this removes the option in both configs unless remove_default=False. """ if super().has_option(section, option): super().remove_option(section, option) if self.airflow_defaults.has_option(section, option) and remove_default: self.airflow_defaults.remove_option(section, option)
airflow/configuration.py
apache/airflow
DatastoreHook.allocate_ids
def allocate_ids(self, partial_keys): conn = self.get_conn() resp = (conn .projects() .allocateIds(projectId=self.project_id, body={'keys': partial_keys}) .execute(num_retries=self.num_retries)) return resp['keys']
Allocate IDs for incomplete keys.
def allocate_ids(self, partial_keys): """ Allocate IDs for incomplete keys. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds :param partial_keys: a list of partial keys. :type partial_keys: list :return: a list of full keys. :rtype: list """ conn = self.get_conn() resp = (conn .projects() .allocateIds(projectId=self.project_id, body={'keys': partial_keys}) .execute(num_retries=self.num_retries)) return resp['keys']
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.begin_transaction
def begin_transaction(self): conn = self.get_conn() resp = (conn .projects() .beginTransaction(projectId=self.project_id, body={}) .execute(num_retries=self.num_retries)) return resp['transaction']
Begins a new transaction.
def begin_transaction(self): """ Begins a new transaction. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction :return: a transaction handle. :rtype: str """ conn = self.get_conn() resp = (conn .projects() .beginTransaction(projectId=self.project_id, body={}) .execute(num_retries=self.num_retries)) return resp['transaction']
airflow/contrib/hooks/datastore_hook.py
apache/airflow
DatastoreHook.commit
def commit(self, body): conn = self.get_conn() resp = (conn .projects() .commit(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Commit a transaction, optionally creating, deleting or modifying some entities.
def commit(self, body): """ Commit a transaction, optionally creating, deleting or modifying some entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit :param body: the body of the commit request. :type body: dict :return: the response body of the commit request. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .commit(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
airflow/contrib/hooks/datastore_hook.py