repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
apache/airflow
DingdingHook._get_endpoint
def _get_endpoint(self): conn = self.get_connection(self.http_conn_id) token = conn.password if not token: raise AirflowException('Dingding token is requests but get nothing, ' 'check you conn_id configuration.') return 'robot/send?access_token={}'.format(token)
Get Dingding endpoint for sending message.
def _get_endpoint(self): """ Get Dingding endpoint for sending message. """ conn = self.get_connection(self.http_conn_id) token = conn.password if not token: raise AirflowException('Dingding token is requests but get nothing, ' 'check you conn_id configuration.') return 'robot/send?access_token={}'.format(token)
airflow/contrib/hooks/dingding_hook.py
apache/airflow
DingdingHook.send
def send(self): support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard'] if self.message_type not in support_type: raise ValueError('DingdingWebhookHook only support {} ' 'so far, but receive {}'.format(support_type, self.message_type)) data = self._build_message() self.log.info('Sending Dingding type %s message %s', self.message_type, data) resp = self.run(endpoint=self._get_endpoint(), data=data, headers={'Content-Type': 'application/json'}) if int(resp.json().get('errcode')) != 0: raise AirflowException('Send Dingding message failed, receive error ' 'message %s', resp.text) self.log.info('Success Send Dingding message')
Send Dingding message
def send(self): """ Send Dingding message """ support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard'] if self.message_type not in support_type: raise ValueError('DingdingWebhookHook only support {} ' 'so far, but receive {}'.format(support_type, self.message_type)) data = self._build_message() self.log.info('Sending Dingding type %s message %s', self.message_type, data) resp = self.run(endpoint=self._get_endpoint(), data=data, headers={'Content-Type': 'application/json'}) # Dingding success send message will with errcode equal to 0 if int(resp.json().get('errcode')) != 0: raise AirflowException('Send Dingding message failed, receive error ' 'message %s', resp.text) self.log.info('Success Send Dingding message')
airflow/contrib/hooks/dingding_hook.py
apache/airflow
_bind_parameters
def _bind_parameters(operation, parameters): string_parameters = {} for (name, value) in iteritems(parameters): if value is None: string_parameters[name] = 'NULL' elif isinstance(value, basestring): string_parameters[name] = "'" + _escape(value) + "'" else: string_parameters[name] = str(value) return operation % string_parameters
Helper method that binds parameters to a SQL query.
def _bind_parameters(operation, parameters): """ Helper method that binds parameters to a SQL query. """ # inspired by MySQL Python Connector (conversion.py) string_parameters = {} for (name, value) in iteritems(parameters): if value is None: string_parameters[name] = 'NULL' elif isinstance(value, basestring): string_parameters[name] = "'" + _escape(value) + "'" else: string_parameters[name] = str(value) return operation % string_parameters
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
_escape
def _escape(s): e = s e = e.replace('\\', '\\\\') e = e.replace('\n', '\\n') e = e.replace('\r', '\\r') e = e.replace("'", "\\'") e = e.replace('"', '\\"') return e
Helper method that escapes parameters to a SQL query.
def _escape(s): """ Helper method that escapes parameters to a SQL query. """ e = s e = e.replace('\\', '\\\\') e = e.replace('\n', '\\n') e = e.replace('\r', '\\r') e = e.replace("'", "\\'") e = e.replace('"', '\\"') return e
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
_bq_cast
def _bq_cast(string_field, bq_type): if string_field is None: return None elif bq_type == 'INTEGER': return int(string_field) elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP': return float(string_field) elif bq_type == 'BOOLEAN': if string_field not in ['true', 'false']: raise ValueError("{} must have value 'true' or 'false'".format( string_field)) return string_field == 'true' else: return string_field
Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings.
def _bq_cast(string_field, bq_type): """ Helper method that casts a BigQuery row to the appropriate data types. This is useful because BigQuery returns all fields as strings. """ if string_field is None: return None elif bq_type == 'INTEGER': return int(string_field) elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP': return float(string_field) elif bq_type == 'BOOLEAN': if string_field not in ['true', 'false']: raise ValueError("{} must have value 'true' or 'false'".format( string_field)) return string_field == 'true' else: return string_field
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
_validate_value
def _validate_value(key, value, expected_type): if not isinstance(value, expected_type): raise TypeError("{} argument must have a type {} not {}".format( key, expected_type, type(value)))
function to check expected type and raise error if type is not correct
def _validate_value(key, value, expected_type): """ function to check expected type and raise error if type is not correct """ if not isinstance(value, expected_type): raise TypeError("{} argument must have a type {} not {}".format( key, expected_type, type(value)))
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryHook.table_exists
def table_exists(self, project_id, dataset_id, table_id): service = self.get_service() try: service.tables().get( projectId=project_id, datasetId=dataset_id, tableId=table_id).execute(num_retries=self.num_retries) return True except HttpError as e: if e.resp['status'] == '404': return False raise
Checks for the existence of a table in Google BigQuery.
def table_exists(self, project_id, dataset_id, table_id): """ Checks for the existence of a table in Google BigQuery. :param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook must provide access to the specified project. :type project_id: str :param dataset_id: The name of the dataset in which to look for the table. :type dataset_id: str :param table_id: The name of the table to check the existence of. :type table_id: str """ service = self.get_service() try: service.tables().get( projectId=project_id, datasetId=dataset_id, tableId=table_id).execute(num_retries=self.num_retries) return True except HttpError as e: if e.resp['status'] == '404': return False raise
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.create_empty_table
def create_empty_table(self, project_id, dataset_id, table_id, schema_fields=None, time_partitioning=None, cluster_fields=None, labels=None, view=None, num_retries=None): project_id = project_id if project_id is not None else self.project_id table_resource = { 'tableReference': { 'tableId': table_id } } if schema_fields: table_resource['schema'] = {'fields': schema_fields} if time_partitioning: table_resource['timePartitioning'] = time_partitioning if cluster_fields: table_resource['clustering'] = { 'fields': cluster_fields } if labels: table_resource['labels'] = labels if view: table_resource['view'] = view num_retries = num_retries if num_retries else self.num_retries self.log.info('Creating Table %s:%s.%s', project_id, dataset_id, table_id) try: self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource).execute(num_retries=num_retries) self.log.info('Table created successfully: %s:%s.%s', project_id, dataset_id, table_id) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
Creates a new, empty table in the dataset. To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
def create_empty_table(self, project_id, dataset_id, table_id, schema_fields=None, time_partitioning=None, cluster_fields=None, labels=None, view=None, num_retries=None): """ Creates a new, empty table in the dataset. To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg :param project_id: The project to create the table into. :type project_id: str :param dataset_id: The dataset to create the table into. :type dataset_id: str :param table_id: The Name of the table to be created. :type table_id: str :param schema_fields: If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema :type schema_fields: list :param labels: a dictionary containing labels for the table, passed to BigQuery :type labels: dict **Example**: :: schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. .. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning :type time_partitioning: dict :param cluster_fields: [Optional] The fields used for clustering. Must be specified with time_partitioning, data in the table will be first partitioned and subsequently clustered. https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields :type cluster_fields: list :param view: [Optional] A dictionary containing definition for the view. If set, it will create a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view :type view: dict **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000", "useLegacySql": False } :return: None """ project_id = project_id if project_id is not None else self.project_id table_resource = { 'tableReference': { 'tableId': table_id } } if schema_fields: table_resource['schema'] = {'fields': schema_fields} if time_partitioning: table_resource['timePartitioning'] = time_partitioning if cluster_fields: table_resource['clustering'] = { 'fields': cluster_fields } if labels: table_resource['labels'] = labels if view: table_resource['view'] = view num_retries = num_retries if num_retries else self.num_retries self.log.info('Creating Table %s:%s.%s', project_id, dataset_id, table_id) try: self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource).execute(num_retries=num_retries) self.log.info('Table created successfully: %s:%s.%s', project_id, dataset_id, table_id) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.patch_table
def patch_table(self, dataset_id, table_id, project_id=None, description=None, expiration_time=None, external_data_configuration=None, friendly_name=None, labels=None, schema=None, time_partitioning=None, view=None, require_partition_filter=None): project_id = project_id if project_id is not None else self.project_id table_resource = {} if description is not None: table_resource['description'] = description if expiration_time is not None: table_resource['expirationTime'] = expiration_time if external_data_configuration: table_resource['externalDataConfiguration'] = external_data_configuration if friendly_name is not None: table_resource['friendlyName'] = friendly_name if labels: table_resource['labels'] = labels if schema: table_resource['schema'] = {'fields': schema} if time_partitioning: table_resource['timePartitioning'] = time_partitioning if view: table_resource['view'] = view if require_partition_filter is not None: table_resource['requirePartitionFilter'] = require_partition_filter self.log.info('Patching Table %s:%s.%s', project_id, dataset_id, table_id) try: self.service.tables().patch( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) self.log.info('Table patched successfully: %s:%s.%s', project_id, dataset_id, table_id) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
Patch information in an existing table. It only updates fileds that are provided in the request object.
def patch_table(self, dataset_id, table_id, project_id=None, description=None, expiration_time=None, external_data_configuration=None, friendly_name=None, labels=None, schema=None, time_partitioning=None, view=None, require_partition_filter=None): """ Patch information in an existing table. It only updates fileds that are provided in the request object. Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch :param dataset_id: The dataset containing the table to be patched. :type dataset_id: str :param table_id: The Name of the table to be patched. :type table_id: str :param project_id: The project containing the table to be patched. :type project_id: str :param description: [Optional] A user-friendly description of this table. :type description: str :param expiration_time: [Optional] The time when this table expires, in milliseconds since the epoch. :type expiration_time: int :param external_data_configuration: [Optional] A dictionary containing properties of a table stored outside of BigQuery. :type external_data_configuration: dict :param friendly_name: [Optional] A descriptive name for this table. :type friendly_name: str :param labels: [Optional] A dictionary containing labels associated with this table. :type labels: dict :param schema: [Optional] If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema The supported schema modifications and unsupported schema modification are listed here: https://cloud.google.com/bigquery/docs/managing-table-schemas **Example**: :: schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}] :type schema: list :param time_partitioning: [Optional] A dictionary containing time-based partitioning definition for the table. :type time_partitioning: dict :param view: [Optional] A dictionary containing definition for the view. If set, it will patch a view instead of a table: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view **Example**: :: view = { "query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500", "useLegacySql": False } :type view: dict :param require_partition_filter: [Optional] If true, queries over the this table require a partition filter. If false, queries over the table :type require_partition_filter: bool """ project_id = project_id if project_id is not None else self.project_id table_resource = {} if description is not None: table_resource['description'] = description if expiration_time is not None: table_resource['expirationTime'] = expiration_time if external_data_configuration: table_resource['externalDataConfiguration'] = external_data_configuration if friendly_name is not None: table_resource['friendlyName'] = friendly_name if labels: table_resource['labels'] = labels if schema: table_resource['schema'] = {'fields': schema} if time_partitioning: table_resource['timePartitioning'] = time_partitioning if view: table_resource['view'] = view if require_partition_filter is not None: table_resource['requirePartitionFilter'] = require_partition_filter self.log.info('Patching Table %s:%s.%s', project_id, dataset_id, table_id) try: self.service.tables().patch( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) self.log.info('Table patched successfully: %s:%s.%s', project_id, dataset_id, table_id) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.cancel_query
def cancel_query(self): jobs = self.service.jobs() if (self.running_job_id and not self.poll_job_complete(self.running_job_id)): self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id) if self.location: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id, location=self.location).execute(num_retries=self.num_retries) else: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id).execute(num_retries=self.num_retries) else: self.log.info('No running BigQuery jobs to cancel.') return max_polling_attempts = 12 polling_attempts = 0 job_complete = False while polling_attempts < max_polling_attempts and not job_complete: polling_attempts = polling_attempts + 1 job_complete = self.poll_job_complete(self.running_job_id) if job_complete: self.log.info('Job successfully canceled: %s, %s', self.project_id, self.running_job_id) elif polling_attempts == max_polling_attempts: self.log.info( "Stopping polling due to timeout. Job with id %s " "has not completed cancel and may or may not finish.", self.running_job_id) else: self.log.info('Waiting for canceled job with id %s to finish.', self.running_job_id) time.sleep(5)
Cancel all started queries that have not yet completed
def cancel_query(self): """ Cancel all started queries that have not yet completed """ jobs = self.service.jobs() if (self.running_job_id and not self.poll_job_complete(self.running_job_id)): self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id) if self.location: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id, location=self.location).execute(num_retries=self.num_retries) else: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id).execute(num_retries=self.num_retries) else: self.log.info('No running BigQuery jobs to cancel.') return # Wait for all the calls to cancel to finish max_polling_attempts = 12 polling_attempts = 0 job_complete = False while polling_attempts < max_polling_attempts and not job_complete: polling_attempts = polling_attempts + 1 job_complete = self.poll_job_complete(self.running_job_id) if job_complete: self.log.info('Job successfully canceled: %s, %s', self.project_id, self.running_job_id) elif polling_attempts == max_polling_attempts: self.log.info( "Stopping polling due to timeout. Job with id %s " "has not completed cancel and may or may not finish.", self.running_job_id) else: self.log.info('Waiting for canceled job with id %s to finish.', self.running_job_id) time.sleep(5)
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.run_table_delete
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False): deletion_project, deletion_dataset, deletion_table = \ _split_tablename(table_input=deletion_dataset_table, default_project_id=self.project_id) try: self.service.tables() \ .delete(projectId=deletion_project, datasetId=deletion_dataset, tableId=deletion_table) \ .execute(num_retries=self.num_retries) self.log.info('Deleted table %s:%s.%s.', deletion_project, deletion_dataset, deletion_table) except HttpError: if not ignore_if_missing: raise Exception('Table deletion failed. Table does not exist.') else: self.log.info('Table does not exist. Skipping.')
Delete an existing table from the dataset; If the table does not exist, return an error unless ignore_if_missing is set to True.
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False): """ Delete an existing table from the dataset; If the table does not exist, return an error unless ignore_if_missing is set to True. :param deletion_dataset_table: A dotted ``(<project>.|<project>:)<dataset>.<table>`` that indicates which table will be deleted. :type deletion_dataset_table: str :param ignore_if_missing: if True, then return success even if the requested table does not exist. :type ignore_if_missing: bool :return: """ deletion_project, deletion_dataset, deletion_table = \ _split_tablename(table_input=deletion_dataset_table, default_project_id=self.project_id) try: self.service.tables() \ .delete(projectId=deletion_project, datasetId=deletion_dataset, tableId=deletion_table) \ .execute(num_retries=self.num_retries) self.log.info('Deleted table %s:%s.%s.', deletion_project, deletion_dataset, deletion_table) except HttpError: if not ignore_if_missing: raise Exception('Table deletion failed. Table does not exist.') else: self.log.info('Table does not exist. Skipping.')
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.run_table_upsert
def run_table_upsert(self, dataset_id, table_resource, project_id=None): table_id = table_resource['tableReference']['tableId'] project_id = project_id if project_id is not None else self.project_id tables_list_resp = self.service.tables().list( projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries) while True: for table in tables_list_resp.get('tables', []): if table['tableReference']['tableId'] == table_id: self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) return self.service.tables().update( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) if 'nextPageToken' in tables_list_resp: tables_list_resp = self.service.tables()\ .list(projectId=project_id, datasetId=dataset_id, pageToken=tables_list_resp['nextPageToken'])\ .execute(num_retries=self.num_retries) else: self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id) return self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource).execute(num_retries=self.num_retries)
creates a new, empty table in the dataset; If the table already exists, update the existing table. Since BigQuery does not natively allow table upserts, this is not an atomic operation.
def run_table_upsert(self, dataset_id, table_resource, project_id=None): """ creates a new, empty table in the dataset; If the table already exists, update the existing table. Since BigQuery does not natively allow table upserts, this is not an atomic operation. :param dataset_id: the dataset to upsert the table into. :type dataset_id: str :param table_resource: a table resource. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :type table_resource: dict :param project_id: the project to upsert the table into. If None, project will be self.project_id. :return: """ # check to see if the table exists table_id = table_resource['tableReference']['tableId'] project_id = project_id if project_id is not None else self.project_id tables_list_resp = self.service.tables().list( projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries) while True: for table in tables_list_resp.get('tables', []): if table['tableReference']['tableId'] == table_id: # found the table, do update self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) return self.service.tables().update( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) # If there is a next page, we need to check the next page. if 'nextPageToken' in tables_list_resp: tables_list_resp = self.service.tables()\ .list(projectId=project_id, datasetId=dataset_id, pageToken=tables_list_resp['nextPageToken'])\ .execute(num_retries=self.num_retries) # If there is no next page, then the table doesn't exist. else: # do insert self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id) return self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource).execute(num_retries=self.num_retries)
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.get_dataset
def get_dataset(self, dataset_id, project_id=None): if not dataset_id or not isinstance(dataset_id, str): raise ValueError("dataset_id argument must be provided and has " "a type 'str'. You provided: {}".format(dataset_id)) dataset_project_id = project_id if project_id else self.project_id try: dataset_resource = self.service.datasets().get( datasetId=dataset_id, projectId=dataset_project_id).execute(num_retries=self.num_retries) self.log.info("Dataset Resource: %s", dataset_resource) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return dataset_resource
Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist
def get_dataset(self, dataset_id, project_id=None): """ Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist :param dataset_id: The BigQuery Dataset ID :type dataset_id: str :param project_id: The GCP Project ID :type project_id: str :return: dataset_resource .. seealso:: For more information, see Dataset Resource content: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource """ if not dataset_id or not isinstance(dataset_id, str): raise ValueError("dataset_id argument must be provided and has " "a type 'str'. You provided: {}".format(dataset_id)) dataset_project_id = project_id if project_id else self.project_id try: dataset_resource = self.service.datasets().get( datasetId=dataset_id, projectId=dataset_project_id).execute(num_retries=self.num_retries) self.log.info("Dataset Resource: %s", dataset_resource) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return dataset_resource
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.get_datasets_list
def get_datasets_list(self, project_id=None): dataset_project_id = project_id if project_id else self.project_id try: datasets_list = self.service.datasets().list( projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets'] self.log.info("Datasets List: %s", datasets_list) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return datasets_list
Method returns full list of BigQuery datasets in the current project
def get_datasets_list(self, project_id=None): """ Method returns full list of BigQuery datasets in the current project .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :type project_id: str :return: datasets_list Example of returned datasets_list: :: { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_2_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_2_test" } }, { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_1_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_1_test" } } ] """ dataset_project_id = project_id if project_id else self.project_id try: datasets_list = self.service.datasets().list( projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets'] self.log.info("Datasets List: %s", datasets_list) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return datasets_list
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryBaseCursor.insert_all
def insert_all(self, project_id, dataset_id, table_id, rows, ignore_unknown_values=False, skip_invalid_rows=False, fail_on_error=False): dataset_project_id = project_id if project_id else self.project_id body = { "rows": rows, "ignoreUnknownValues": ignore_unknown_values, "kind": "bigquery#tableDataInsertAllRequest", "skipInvalidRows": skip_invalid_rows, } try: self.log.info( 'Inserting %s row(s) into Table %s:%s.%s', len(rows), dataset_project_id, dataset_id, table_id ) resp = self.service.tabledata().insertAll( projectId=dataset_project_id, datasetId=dataset_id, tableId=table_id, body=body ).execute(num_retries=self.num_retries) if 'insertErrors' not in resp: self.log.info( 'All row(s) inserted successfully: %s:%s.%s', dataset_project_id, dataset_id, table_id ) else: error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format( len(resp['insertErrors']), dataset_project_id, dataset_id, table_id, resp['insertErrors']) if fail_on_error: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(error_msg) ) self.log.info(error_msg) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
Method to stream data into BigQuery one record at a time without needing to run a load job
def insert_all(self, project_id, dataset_id, table_id, rows, ignore_unknown_values=False, skip_invalid_rows=False, fail_on_error=False): """ Method to stream data into BigQuery one record at a time without needing to run a load job .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll :param project_id: The name of the project where we have the table :type project_id: str :param dataset_id: The name of the dataset where we have the table :type dataset_id: str :param table_id: The name of the table :type table_id: str :param rows: the rows to insert :type rows: list **Example or rows**: rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}] :param ignore_unknown_values: [Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. The default value is false, which treats unknown values as errors. :type ignore_unknown_values: bool :param skip_invalid_rows: [Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist. :type skip_invalid_rows: bool :param fail_on_error: [Optional] Force the task to fail if any errors occur. The default value is false, which indicates the task should not fail even if any insertion errors occur. :type fail_on_error: bool """ dataset_project_id = project_id if project_id else self.project_id body = { "rows": rows, "ignoreUnknownValues": ignore_unknown_values, "kind": "bigquery#tableDataInsertAllRequest", "skipInvalidRows": skip_invalid_rows, } try: self.log.info( 'Inserting %s row(s) into Table %s:%s.%s', len(rows), dataset_project_id, dataset_id, table_id ) resp = self.service.tabledata().insertAll( projectId=dataset_project_id, datasetId=dataset_id, tableId=table_id, body=body ).execute(num_retries=self.num_retries) if 'insertErrors' not in resp: self.log.info( 'All row(s) inserted successfully: %s:%s.%s', dataset_project_id, dataset_id, table_id ) else: error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format( len(resp['insertErrors']), dataset_project_id, dataset_id, table_id, resp['insertErrors']) if fail_on_error: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(error_msg) ) self.log.info(error_msg) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryCursor.execute
def execute(self, operation, parameters=None): sql = _bind_parameters(operation, parameters) if parameters else operation self.job_id = self.run_query(sql)
Executes a BigQuery query, and returns the job ID.
def execute(self, operation, parameters=None): """ Executes a BigQuery query, and returns the job ID. :param operation: The query to execute. :type operation: str :param parameters: Parameters to substitute into the query. :type parameters: dict """ sql = _bind_parameters(operation, parameters) if parameters else operation self.job_id = self.run_query(sql)
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryCursor.executemany
def executemany(self, operation, seq_of_parameters): for parameters in seq_of_parameters: self.execute(operation, parameters)
Execute a BigQuery query multiple times with different parameters.
def executemany(self, operation, seq_of_parameters): """ Execute a BigQuery query multiple times with different parameters. :param operation: The query to execute. :type operation: str :param seq_of_parameters: List of dictionary parameters to substitute into the query. :type seq_of_parameters: list """ for parameters in seq_of_parameters: self.execute(operation, parameters)
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
BigQueryCursor.next
def next(self): if not self.job_id: return None if len(self.buffer) == 0: if self.all_pages_loaded: return None query_results = (self.service.jobs().getQueryResults( projectId=self.project_id, jobId=self.job_id, pageToken=self.page_token).execute(num_retries=self.num_retries)) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = ([ _bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f']) ]) self.buffer.append(typed_row) if not self.page_token: self.all_pages_loaded = True else: self.page_token = None self.job_id = None self.page_token = None return None return self.buffer.pop(0)
Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer.
def next(self): """ Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. """ if not self.job_id: return None if len(self.buffer) == 0: if self.all_pages_loaded: return None query_results = (self.service.jobs().getQueryResults( projectId=self.project_id, jobId=self.job_id, pageToken=self.page_token).execute(num_retries=self.num_retries)) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = ([ _bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f']) ]) self.buffer.append(typed_row) if not self.page_token: self.all_pages_loaded = True else: # Reset all state since we've exhausted the results. self.page_token = None self.job_id = None self.page_token = None return None return self.buffer.pop(0)
airflow/contrib/hooks/bigquery_hook.py
apache/airflow
PostgresToGoogleCloudStorageOperator._query_postgres
def _query_postgres(self): postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id) conn = postgres.get_conn() cursor = conn.cursor() cursor.execute(self.sql, self.parameters) return cursor
Queries Postgres and returns a cursor to the results.
def _query_postgres(self): """ Queries Postgres and returns a cursor to the results. """ postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id) conn = postgres.get_conn() cursor = conn.cursor() cursor.execute(self.sql, self.parameters) return cursor
airflow/contrib/operators/postgres_to_gcs_operator.py
apache/airflow
_make_intermediate_dirs
def _make_intermediate_dirs(sftp_client, remote_directory): if remote_directory == '/': sftp_client.chdir('/') return if remote_directory == '': return try: sftp_client.chdir(remote_directory) except IOError: dirname, basename = os.path.split(remote_directory.rstrip('/')) _make_intermediate_dirs(sftp_client, dirname) sftp_client.mkdir(basename) sftp_client.chdir(basename) return
Create all the intermediate directories in a remote host
def _make_intermediate_dirs(sftp_client, remote_directory): """ Create all the intermediate directories in a remote host :param sftp_client: A Paramiko SFTP client. :param remote_directory: Absolute Path of the directory containing the file :return: """ if remote_directory == '/': sftp_client.chdir('/') return if remote_directory == '': return try: sftp_client.chdir(remote_directory) except IOError: dirname, basename = os.path.split(remote_directory.rstrip('/')) _make_intermediate_dirs(sftp_client, dirname) sftp_client.mkdir(basename) sftp_client.chdir(basename) return
airflow/contrib/operators/sftp_operator.py
apache/airflow
SQSHook.create_queue
def create_queue(self, queue_name, attributes=None): return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
Create queue using connection object
def create_queue(self, queue_name, attributes=None): """ Create queue using connection object :param queue_name: name of the queue. :type queue_name: str :param attributes: additional attributes for the queue (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue` :type attributes: dict :return: dict with the information about the queue For details of the returned value see :py:meth:`botocore.client.SQS.create_queue` :rtype: dict """ return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
airflow/contrib/hooks/aws_sqs_hook.py
apache/airflow
SQSHook.send_message
def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds, MessageAttributes=message_attributes or {})
Send message to the queue
def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): """ Send message to the queue :param queue_url: queue url :type queue_url: str :param message_body: the contents of the message :type message_body: str :param delay_seconds: seconds to delay the message :type delay_seconds: int :param message_attributes: additional attributes for the message (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message` :type message_attributes: dict :return: dict with the information about the message sent For details of the returned value see :py:meth:`botocore.client.SQS.send_message` :rtype: dict """ return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds, MessageAttributes=message_attributes or {})
airflow/contrib/hooks/aws_sqs_hook.py
apache/airflow
BaseTaskRunner.run_command
def run_command(self, run_with=None, join_args=False): run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, close_fds=True, env=os.environ.copy(), preexec_fn=os.setsid ) log_reader = threading.Thread( target=self._read_task_logs, args=(proc.stdout,), ) log_reader.daemon = True log_reader.start() return proc
Run the task command.
def run_command(self, run_with=None, join_args=False): """ Run the task command. :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']`` :type run_with: list :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs ``['airflow run']`` :param join_args: bool :return: the process that was run :rtype: subprocess.Popen """ run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, close_fds=True, env=os.environ.copy(), preexec_fn=os.setsid ) # Start daemon thread to read subprocess logging output log_reader = threading.Thread( target=self._read_task_logs, args=(proc.stdout,), ) log_reader.daemon = True log_reader.start() return proc
airflow/task/task_runner/base_task_runner.py
apache/airflow
BaseTaskRunner.on_finish
def on_finish(self): if self._cfg_path and os.path.isfile(self._cfg_path): if self.run_as_user: subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True) else: os.remove(self._cfg_path)
A callback that should be called when this is done running.
def on_finish(self): """ A callback that should be called when this is done running. """ if self._cfg_path and os.path.isfile(self._cfg_path): if self.run_as_user: subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True) else: os.remove(self._cfg_path)
airflow/task/task_runner/base_task_runner.py
apache/airflow
_main
def _main(): usage = "usage: nvd3.py [options]" parser = OptionParser(usage=usage, version=("python-nvd3 - Charts generator with " "nvd3.js and d3.js")) parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print messages to stdout") (options, args) = parser.parse_args()
Parse options and process commands
def _main(): """ Parse options and process commands """ # Parse arguments usage = "usage: nvd3.py [options]" parser = OptionParser(usage=usage, version=("python-nvd3 - Charts generator with " "nvd3.js and d3.js")) parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print messages to stdout") (options, args) = parser.parse_args()
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
NVD3Chart.buildhtmlheader
def buildhtmlheader(self): self.htmlheader = '' global _js_initialized if '_js_initialized' not in globals() or not _js_initialized: for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
generate HTML header content
def buildhtmlheader(self): """generate HTML header content""" self.htmlheader = '' # If the JavaScript assets have already been injected, don't bother re-sourcing them. global _js_initialized if '_js_initialized' not in globals() or not _js_initialized: for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
NVD3Chart.buildcontainer
def buildcontainer(self): if self.container: return if self.width: if self.width[-1] != '%': self.style += 'width:%spx;' % self.width else: self.style += 'width:%s;' % self.width if self.height: if self.height[-1] != '%': self.style += 'height:%spx;' % self.height else: self.style += 'height:%s;' % self.height if self.style: self.style = 'style="%s"' % self.style self.container = self.containerheader + \ '<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style)
generate HTML div
def buildcontainer(self): """generate HTML div""" if self.container: return # Create SVG div with style if self.width: if self.width[-1] != '%': self.style += 'width:%spx;' % self.width else: self.style += 'width:%s;' % self.width if self.height: if self.height[-1] != '%': self.style += 'height:%spx;' % self.height else: self.style += 'height:%s;' % self.height if self.style: self.style = 'style="%s"' % self.style self.container = self.containerheader + \ '<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style)
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
NVD3Chart.buildjschart
def buildjschart(self): self.jschart = '' if self.tooltip_condition_string == '': self.tooltip_condition_string = 'var y = String(graph.point.y);\n' self.series_js = json.dumps(self.series)
generate javascript code for the chart
def buildjschart(self): """generate javascript code for the chart""" self.jschart = '' # add custom tooltip string in jschart # default condition (if build_custom_tooltip is not called explicitly with date_flag=True) if self.tooltip_condition_string == '': self.tooltip_condition_string = 'var y = String(graph.point.y);\n' # Include data self.series_js = json.dumps(self.series)
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
NVD3Chart.create_x_axis
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False): axis = {} if custom_format and format: axis['tickFormat'] = format elif format: if format == 'AM_PM': axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }" else: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" if date: self.dateformat = format axis['tickFormat'] = ("function(d) { return d3.time.format('%s')" "(new Date(parseInt(d))) }\n" "" % self.dateformat) if name[0] == 'x': self.x_axis_date = True self.axislist[name] = axis if name == "xAxis" and self.focus_enable: self.axislist['x2Axis'] = axis
Create X-axis
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False): """Create X-axis""" axis = {} if custom_format and format: axis['tickFormat'] = format elif format: if format == 'AM_PM': axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }" else: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" # date format : see https://github.com/mbostock/d3/wiki/Time-Formatting if date: self.dateformat = format axis['tickFormat'] = ("function(d) { return d3.time.format('%s')" "(new Date(parseInt(d))) }\n" "" % self.dateformat) # flag is the x Axis is a date if name[0] == 'x': self.x_axis_date = True # Add new axis to list of axis self.axislist[name] = axis # Create x2Axis if focus_enable if name == "xAxis" and self.focus_enable: self.axislist['x2Axis'] = axis
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
NVD3Chart.create_y_axis
def create_y_axis(self, name, label=None, format=None, custom_format=False): axis = {} if custom_format and format: axis['tickFormat'] = format elif format: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" self.axislist[name] = axis
Create Y-axis
def create_y_axis(self, name, label=None, format=None, custom_format=False): """ Create Y-axis """ axis = {} if custom_format and format: axis['tickFormat'] = format elif format: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" # Add new axis to list of axis self.axislist[name] = axis
airflow/_vendor/nvd3/NVD3Chart.py
apache/airflow
action_logging
def action_logging(f): @functools.wraps(f) def wrapper(*args, **kwargs): with create_session() as session: if g.user.is_anonymous: user = 'anonymous' else: user = g.user.username log = Log( event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())), task_id=request.args.get('task_id'), dag_id=request.args.get('dag_id')) if 'execution_date' in request.args: log.execution_date = pendulum.parse( request.args.get('execution_date')) session.add(log) return f(*args, **kwargs) return wrapper
Decorator to log user actions
def action_logging(f): """ Decorator to log user actions """ @functools.wraps(f) def wrapper(*args, **kwargs): with create_session() as session: if g.user.is_anonymous: user = 'anonymous' else: user = g.user.username log = Log( event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())), task_id=request.args.get('task_id'), dag_id=request.args.get('dag_id')) if 'execution_date' in request.args: log.execution_date = pendulum.parse( request.args.get('execution_date')) session.add(log) return f(*args, **kwargs) return wrapper
airflow/www/decorators.py
apache/airflow
gzipped
def gzipped(f): @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response return f(*args, **kwargs) return view_func
Decorator to make a view compressed
def gzipped(f): """ Decorator to make a view compressed """ @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response return f(*args, **kwargs) return view_func
airflow/www/decorators.py
apache/airflow
DagModel.create_dagrun
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
Creates a dag run from this dag including the tasks associated with this dag.
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): """ Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
airflow/models/dag.py
apache/airflow
SQSPublishOperator.execute
def execute(self, context): hook = SQSHook(aws_conn_id=self.aws_conn_id) result = hook.send_message(queue_url=self.sqs_queue, message_body=self.message_content, delay_seconds=self.delay_seconds, message_attributes=self.message_attributes) self.log.info('result is send_message is %s', result) return result
Publish the message to SQS queue
def execute(self, context): """ Publish the message to SQS queue :param context: the context object :type context: dict :return: dict with information about the message sent For details of the returned dict see :py:meth:`botocore.client.SQS.send_message` :rtype: dict """ hook = SQSHook(aws_conn_id=self.aws_conn_id) result = hook.send_message(queue_url=self.sqs_queue, message_body=self.message_content, delay_seconds=self.delay_seconds, message_attributes=self.message_attributes) self.log.info('result is send_message is %s', result) return result
airflow/contrib/operators/aws_sqs_publish_operator.py
apache/airflow
json_response
def json_response(obj): return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
returns a json response from a json serializable python object
def json_response(obj): """ returns a json response from a json serializable python object """ return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
airflow/www/utils.py
apache/airflow
open_maybe_zipped
def open_maybe_zipped(f, mode='r'): _, archive, filename = ZIP_REGEX.search(f).groups() if archive and zipfile.is_zipfile(archive): return zipfile.ZipFile(archive, mode=mode).open(filename) else: return io.open(f, mode=mode)
Opens the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive.
def open_maybe_zipped(f, mode='r'): """ Opens the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. :return: a file object, as in `open`, or as in `ZipFile.open`. """ _, archive, filename = ZIP_REGEX.search(f).groups() if archive and zipfile.is_zipfile(archive): return zipfile.ZipFile(archive, mode=mode).open(filename) else: return io.open(f, mode=mode)
airflow/www/utils.py
apache/airflow
make_cache_key
def make_cache_key(*args, **kwargs): path = request.path args = str(hash(frozenset(request.args.items()))) return (path + args).encode('ascii', 'ignore')
Used by cache to get a unique key per URL
def make_cache_key(*args, **kwargs): """ Used by cache to get a unique key per URL """ path = request.path args = str(hash(frozenset(request.args.items()))) return (path + args).encode('ascii', 'ignore')
airflow/www/utils.py
apache/airflow
CloudVideoIntelligenceHook.annotate_video
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location=None, retry=None, timeout=None, metadata=None, ): client = self.get_conn() return client.annotate_video( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location, retry=retry, timeout=timeout, metadata=metadata, )
Performs video annotation.
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location=None, retry=None, timeout=None, metadata=None, ): """ Performs video annotation. :param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type input_uri: str :param input_content: The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. :type input_content: bytes :param features: Requested video annotation features. :type features: list[google.cloud.videointelligence_v1.VideoIntelligenceServiceClient.enums.Feature] :param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type output_uri: str :param video_context: Optional, Additional video context and/or feature-specific parameters. :type video_context: dict or google.cloud.videointelligence_v1.types.VideoContext :param location: Optional, cloud region where annotation should take place. Supported cloud regions: us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined based on video file location. :type location: str :param retry: Retry object used to determine when/if to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: Optional, The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Optional, Additional metadata that is provided to the method. :type metadata: seq[tuple[str, str]] """ client = self.get_conn() return client.annotate_video( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location, retry=retry, timeout=timeout, metadata=metadata, )
airflow/contrib/hooks/gcp_video_intelligence_hook.py
apache/airflow
OpsgenieAlertHook._get_api_key
def _get_api_key(self): conn = self.get_connection(self.http_conn_id) api_key = conn.password if not api_key: raise AirflowException('Opsgenie API Key is required for this hook, ' 'please check your conn_id configuration.') return api_key
Get Opsgenie api_key for creating alert
def _get_api_key(self): """ Get Opsgenie api_key for creating alert """ conn = self.get_connection(self.http_conn_id) api_key = conn.password if not api_key: raise AirflowException('Opsgenie API Key is required for this hook, ' 'please check your conn_id configuration.') return api_key
airflow/contrib/hooks/opsgenie_alert_hook.py
apache/airflow
OpsgenieAlertHook.get_conn
def get_conn(self, headers=None): conn = self.get_connection(self.http_conn_id) self.base_url = conn.host if conn.host else 'https://api.opsgenie.com' session = requests.Session() if headers: session.headers.update(headers) return session
Overwrite HttpHook get_conn because this hook just needs base_url and headers, and does not need generic params
def get_conn(self, headers=None): """ Overwrite HttpHook get_conn because this hook just needs base_url and headers, and does not need generic params :param headers: additional headers to be passed through as a dictionary :type headers: dict """ conn = self.get_connection(self.http_conn_id) self.base_url = conn.host if conn.host else 'https://api.opsgenie.com' session = requests.Session() if headers: session.headers.update(headers) return session
airflow/contrib/hooks/opsgenie_alert_hook.py
apache/airflow
OpsgenieAlertHook.execute
def execute(self, payload={}): api_key = self._get_api_key() return self.run(endpoint='v2/alerts', data=json.dumps(payload), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey %s' % api_key})
Execute the Opsgenie Alert call
def execute(self, payload={}): """ Execute the Opsgenie Alert call :param payload: Opsgenie API Create Alert payload values See https://docs.opsgenie.com/docs/alert-api#section-create-alert :type payload: dict """ api_key = self._get_api_key() return self.run(endpoint='v2/alerts', data=json.dumps(payload), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey %s' % api_key})
airflow/contrib/hooks/opsgenie_alert_hook.py
apache/airflow
OpsgenieAlertOperator._build_opsgenie_payload
def _build_opsgenie_payload(self): payload = {} for key in [ "message", "alias", "description", "responders", "visibleTo", "actions", "tags", "details", "entity", "source", "priority", "user", "note" ]: val = getattr(self, key) if val: payload[key] = val return payload
Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload.
def _build_opsgenie_payload(self): """ Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload. :return: Opsgenie payload (dict) to send """ payload = {} for key in [ "message", "alias", "description", "responders", "visibleTo", "actions", "tags", "details", "entity", "source", "priority", "user", "note" ]: val = getattr(self, key) if val: payload[key] = val return payload
airflow/contrib/operators/opsgenie_alert_operator.py
apache/airflow
OpsgenieAlertOperator.execute
def execute(self, context): self.hook = OpsgenieAlertHook(self.opsgenie_conn_id) self.hook.execute(self._build_opsgenie_payload())
Call the OpsgenieAlertHook to post message
def execute(self, context): """ Call the OpsgenieAlertHook to post message """ self.hook = OpsgenieAlertHook(self.opsgenie_conn_id) self.hook.execute(self._build_opsgenie_payload())
airflow/contrib/operators/opsgenie_alert_operator.py
apache/airflow
AWSAthenaHook.get_conn
def get_conn(self): if not self.conn: self.conn = self.get_client_type('athena') return self.conn
check if aws conn exists already or create one and return it
def get_conn(self): """ check if aws conn exists already or create one and return it :return: boto3 session """ if not self.conn: self.conn = self.get_client_type('athena') return self.conn
airflow/contrib/hooks/aws_athena_hook.py
apache/airflow
AWSAthenaHook.run_query
def run_query(self, query, query_context, result_configuration, client_request_token=None): response = self.conn.start_query_execution(QueryString=query, ClientRequestToken=client_request_token, QueryExecutionContext=query_context, ResultConfiguration=result_configuration) query_execution_id = response['QueryExecutionId'] return query_execution_id
Run Presto query on athena with provided config and return submitted query_execution_id
def run_query(self, query, query_context, result_configuration, client_request_token=None): """ Run Presto query on athena with provided config and return submitted query_execution_id :param query: Presto query to run :type query: str :param query_context: Context in which query need to be run :type query_context: dict :param result_configuration: Dict with path to store results in and config related to encryption :type result_configuration: dict :param client_request_token: Unique token created by user to avoid multiple executions of same query :type client_request_token: str :return: str """ response = self.conn.start_query_execution(QueryString=query, ClientRequestToken=client_request_token, QueryExecutionContext=query_context, ResultConfiguration=result_configuration) query_execution_id = response['QueryExecutionId'] return query_execution_id
airflow/contrib/hooks/aws_athena_hook.py
apache/airflow
AWSAthenaHook.check_query_status
def check_query_status(self, query_execution_id): response = self.conn.get_query_execution(QueryExecutionId=query_execution_id) state = None try: state = response['QueryExecution']['Status']['State'] except Exception as ex: self.log.error('Exception while getting query state', ex) finally: return state
Fetch the status of submitted athena query.
def check_query_status(self, query_execution_id): """ Fetch the status of submitted athena query. Returns None or one of valid query states. :param query_execution_id: Id of submitted athena query :type query_execution_id: str :return: str """ response = self.conn.get_query_execution(QueryExecutionId=query_execution_id) state = None try: state = response['QueryExecution']['Status']['State'] except Exception as ex: self.log.error('Exception while getting query state', ex) finally: return state
airflow/contrib/hooks/aws_athena_hook.py
apache/airflow
AWSAthenaHook.poll_query_status
def poll_query_status(self, query_execution_id, max_tries=None): try_number = 1 final_query_state = None while True: query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format( try_number=try_number)) elif query_state in self.INTERMEDIATE_STATES: self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}' .format(try_number=try_number, state=query_state)) else: self.log.info('Trial {try_number}: Query execution completed. Final state is {state}' .format(try_number=try_number, state=query_state)) final_query_state = query_state break if max_tries and try_number >= max_tries: final_query_state = query_state break try_number += 1 sleep(self.sleep_time) return final_query_state
Poll the status of submitted athena query until query state reaches final state.
def poll_query_status(self, query_execution_id, max_tries=None): """ Poll the status of submitted athena query until query state reaches final state. Returns one of the final states :param query_execution_id: Id of submitted athena query :type query_execution_id: str :param max_tries: Number of times to poll for query state before function exits :type max_tries: int :return: str """ try_number = 1 final_query_state = None # Query state when query reaches final state or max_tries reached while True: query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format( try_number=try_number)) elif query_state in self.INTERMEDIATE_STATES: self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}' .format(try_number=try_number, state=query_state)) else: self.log.info('Trial {try_number}: Query execution completed. Final state is {state}' .format(try_number=try_number, state=query_state)) final_query_state = query_state break if max_tries and try_number >= max_tries: # Break loop if max_tries reached final_query_state = query_state break try_number += 1 sleep(self.sleep_time) return final_query_state
airflow/contrib/hooks/aws_athena_hook.py
apache/airflow
ZendeskHook.__handle_rate_limit_exception
def __handle_rate_limit_exception(self, rate_limit_exception): retry_after = int( rate_limit_exception.response.headers.get('Retry-After', 60)) self.log.info( "Hit Zendesk API rate limit. Pausing for %s seconds", retry_after ) time.sleep(retry_after)
Sleep for the time specified in the exception. If not specified, wait for 60 seconds.
def __handle_rate_limit_exception(self, rate_limit_exception): """ Sleep for the time specified in the exception. If not specified, wait for 60 seconds. """ retry_after = int( rate_limit_exception.response.headers.get('Retry-After', 60)) self.log.info( "Hit Zendesk API rate limit. Pausing for %s seconds", retry_after ) time.sleep(retry_after)
airflow/hooks/zendesk_hook.py
apache/airflow
ZendeskHook.call
def call(self, path, query=None, get_all_pages=True, side_loading=False): zendesk = self.get_conn() first_request_successful = False while not first_request_successful: try: results = zendesk.call(path, query) first_request_successful = True except RateLimitError as rle: self.__handle_rate_limit_exception(rle) keys = [path.split("/")[-1].split(".json")[0]] next_page = results['next_page'] if side_loading: keys += query['include'].split(',') results = {key: results[key] for key in keys} if get_all_pages: while next_page is not None: try: next_url = next_page.split(self.__url)[1] self.log.info("Calling %s", next_url) more_res = zendesk.call(next_url) for key in results: results[key].extend(more_res[key]) if next_page == more_res['next_page']: break else: next_page = more_res['next_page'] except RateLimitError as rle: self.__handle_rate_limit_exception(rle) except ZendeskError as ze: if b"Use a start_time older than 5 minutes" in ze.msg: break else: raise ze return results
Call Zendesk API and return results
def call(self, path, query=None, get_all_pages=True, side_loading=False): """ Call Zendesk API and return results :param path: The Zendesk API to call :param query: Query parameters :param get_all_pages: Accumulate results over all pages before returning. Due to strict rate limiting, this can often timeout. Waits for recommended period between tries after a timeout. :param side_loading: Retrieve related records as part of a single request. In order to enable side-loading, add an 'include' query parameter containing a comma-separated list of resources to load. For more information on side-loading see https://developer.zendesk.com/rest_api/docs/core/side_loading """ zendesk = self.get_conn() first_request_successful = False while not first_request_successful: try: results = zendesk.call(path, query) first_request_successful = True except RateLimitError as rle: self.__handle_rate_limit_exception(rle) # Find the key with the results keys = [path.split("/")[-1].split(".json")[0]] next_page = results['next_page'] if side_loading: keys += query['include'].split(',') results = {key: results[key] for key in keys} if get_all_pages: while next_page is not None: try: # Need to split because the next page URL has # `github.zendesk...` # in it, but the call function needs it removed. next_url = next_page.split(self.__url)[1] self.log.info("Calling %s", next_url) more_res = zendesk.call(next_url) for key in results: results[key].extend(more_res[key]) if next_page == more_res['next_page']: # Unfortunately zdesk doesn't always throw ZendeskError # when we are done getting all the data. Sometimes the # next just refers to the current set of results. # Hence, need to deal with this special case break else: next_page = more_res['next_page'] except RateLimitError as rle: self.__handle_rate_limit_exception(rle) except ZendeskError as ze: if b"Use a start_time older than 5 minutes" in ze.msg: # We have pretty up to date data break else: raise ze return results
airflow/hooks/zendesk_hook.py
apache/airflow
AwsGlueCatalogHook.get_partitions
def get_partitions(self, database_name, table_name, expression='', page_size=None, max_items=None): config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('get_partitions') response = paginator.paginate( DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config ) partitions = set() for page in response: for p in page['Partitions']: partitions.add(tuple(p['Values'])) return partitions
Retrieves the partition values for a table.
def get_partitions(self, database_name, table_name, expression='', page_size=None, max_items=None): """ Retrieves the partition values for a table. :param database_name: The name of the catalog database where the partitions reside. :type database_name: str :param table_name: The name of the partitions' table. :type table_name: str :param expression: An expression filtering the partitions to be returned. Please see official AWS documentation for further information. https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions :type expression: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int :return: set of partition values where each value is a tuple since a partition may be composed of multiple columns. For example: ``{('2018-01-01','1'), ('2018-01-01','2')}`` """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('get_partitions') response = paginator.paginate( DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config ) partitions = set() for page in response: for p in page['Partitions']: partitions.add(tuple(p['Values'])) return partitions
airflow/contrib/hooks/aws_glue_catalog_hook.py
apache/airflow
AwsGlueCatalogHook.get_table
def get_table(self, database_name, table_name): result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name) return result['Table']
Get the information of the table
def get_table(self, database_name, table_name): """ Get the information of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type table_name: str :rtype: dict >>> hook = AwsGlueCatalogHook() >>> r = hook.get_table('db', 'table_foo') >>> r['Name'] = 'table_foo' """ result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name) return result['Table']
airflow/contrib/hooks/aws_glue_catalog_hook.py
apache/airflow
AwsGlueCatalogHook.get_table_location
def get_table_location(self, database_name, table_name): table = self.get_table(database_name, table_name) return table['StorageDescriptor']['Location']
Get the physical location of the table
def get_table_location(self, database_name, table_name): """ Get the physical location of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type table_name: str :return: str """ table = self.get_table(database_name, table_name) return table['StorageDescriptor']['Location']
airflow/contrib/hooks/aws_glue_catalog_hook.py
apache/airflow
RedshiftHook.cluster_status
def cluster_status(self, cluster_identifier): conn = self.get_conn() try: response = conn.describe_clusters( ClusterIdentifier=cluster_identifier)['Clusters'] return response[0]['ClusterStatus'] if response else None except conn.exceptions.ClusterNotFoundFault: return 'cluster_not_found'
Return status of a cluster
def cluster_status(self, cluster_identifier): """ Return status of a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ conn = self.get_conn() try: response = conn.describe_clusters( ClusterIdentifier=cluster_identifier)['Clusters'] return response[0]['ClusterStatus'] if response else None except conn.exceptions.ClusterNotFoundFault: return 'cluster_not_found'
airflow/contrib/hooks/redshift_hook.py
apache/airflow
RedshiftHook.delete_cluster
def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): response = self.get_conn().delete_cluster( ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_final_cluster_snapshot, FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
Delete a cluster and optionally create a snapshot
def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): """ Delete a cluster and optionally create a snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param skip_final_cluster_snapshot: determines cluster snapshot creation :type skip_final_cluster_snapshot: bool :param final_cluster_snapshot_identifier: name of final cluster snapshot :type final_cluster_snapshot_identifier: str """ response = self.get_conn().delete_cluster( ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_final_cluster_snapshot, FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
airflow/contrib/hooks/redshift_hook.py
apache/airflow
RedshiftHook.describe_cluster_snapshots
def describe_cluster_snapshots(self, cluster_identifier): response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots
Gets a list of snapshots for a cluster
def describe_cluster_snapshots(self, cluster_identifier): """ Gets a list of snapshots for a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots
airflow/contrib/hooks/redshift_hook.py
apache/airflow
RedshiftHook.restore_from_cluster_snapshot
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): response = self.get_conn().restore_from_cluster_snapshot( ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
Restores a cluster from its snapshot
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): """ Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str """ response = self.get_conn().restore_from_cluster_snapshot( ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
airflow/contrib/hooks/redshift_hook.py
apache/airflow
RedshiftHook.create_cluster_snapshot
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier): response = self.get_conn().create_cluster_snapshot( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier, ) return response['Snapshot'] if response['Snapshot'] else None
Creates a snapshot of a cluster
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier): """ Creates a snapshot of a cluster :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().create_cluster_snapshot( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier, ) return response['Snapshot'] if response['Snapshot'] else None
airflow/contrib/hooks/redshift_hook.py
apache/airflow
SlackAPIOperator.execute
def execute(self, **kwargs): if not self.api_params: self.construct_api_call_params() slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id) slack.call(self.method, self.api_params)
SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success
def execute(self, **kwargs): """ SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success """ if not self.api_params: self.construct_api_call_params() slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id) slack.call(self.method, self.api_params)
airflow/operators/slack_operator.py
apache/airflow
HdfsSensor.filter_for_filesize
def filter_for_filesize(result, size=None): if size: log = LoggingMixin().log log.debug( 'Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result) ) size *= settings.MEGABYTE result = [x for x in result if x['length'] >= size] log.debug('HdfsSensor.poke: after size filter result is %s', result) return result
Will test the filepath result and test if its size is at least self.filesize
def filter_for_filesize(result, size=None): """ Will test the filepath result and test if its size is at least self.filesize :param result: a list of dicts returned by Snakebite ls :param size: the file size in MB a file should be at least to trigger True :return: (bool) depending on the matching criteria """ if size: log = LoggingMixin().log log.debug( 'Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result) ) size *= settings.MEGABYTE result = [x for x in result if x['length'] >= size] log.debug('HdfsSensor.poke: after size filter result is %s', result) return result
airflow/sensors/hdfs_sensor.py
apache/airflow
HdfsSensor.filter_for_ignored_ext
def filter_for_ignored_ext(result, ignored_ext, ignore_copying): if ignore_copying: log = LoggingMixin().log regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext) ignored_extensions_regex = re.compile(regex_builder) log.debug( 'Filtering result for ignored extensions: %s in files %s', ignored_extensions_regex.pattern, map(lambda x: x['path'], result) ) result = [x for x in result if not ignored_extensions_regex.match(x['path'])] log.debug('HdfsSensor.poke: after ext filter result is %s', result) return result
Will filter if instructed to do so the result to remove matching criteria
def filter_for_ignored_ext(result, ignored_ext, ignore_copying): """ Will filter if instructed to do so the result to remove matching criteria :param result: list of dicts returned by Snakebite ls :type result: list[dict] :param ignored_ext: list of ignored extensions :type ignored_ext: list :param ignore_copying: shall we ignore ? :type ignore_copying: bool :return: list of dicts which were not removed :rtype: list[dict] """ if ignore_copying: log = LoggingMixin().log regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext) ignored_extensions_regex = re.compile(regex_builder) log.debug( 'Filtering result for ignored extensions: %s in files %s', ignored_extensions_regex.pattern, map(lambda x: x['path'], result) ) result = [x for x in result if not ignored_extensions_regex.match(x['path'])] log.debug('HdfsSensor.poke: after ext filter result is %s', result) return result
airflow/sensors/hdfs_sensor.py
apache/airflow
MongoToS3Operator.execute
def execute(self, context): s3_conn = S3Hook(self.s3_conn_id) if self.is_pipeline: results = MongoHook(self.mongo_conn_id).aggregate( mongo_collection=self.mongo_collection, aggregate_query=self.mongo_query, mongo_db=self.mongo_db ) else: results = MongoHook(self.mongo_conn_id).find( mongo_collection=self.mongo_collection, query=self.mongo_query, mongo_db=self.mongo_db ) docs_str = self._stringify(self.transform(results)) s3_conn.load_string( string_data=docs_str, key=self.s3_key, bucket_name=self.s3_bucket, replace=self.replace ) return True
Executed by task_instance at runtime
def execute(self, context): """ Executed by task_instance at runtime """ s3_conn = S3Hook(self.s3_conn_id) # Grab collection and execute query according to whether or not it is a pipeline if self.is_pipeline: results = MongoHook(self.mongo_conn_id).aggregate( mongo_collection=self.mongo_collection, aggregate_query=self.mongo_query, mongo_db=self.mongo_db ) else: results = MongoHook(self.mongo_conn_id).find( mongo_collection=self.mongo_collection, query=self.mongo_query, mongo_db=self.mongo_db ) # Performs transform then stringifies the docs results into json format docs_str = self._stringify(self.transform(results)) # Load Into S3 s3_conn.load_string( string_data=docs_str, key=self.s3_key, bucket_name=self.s3_bucket, replace=self.replace ) return True
airflow/contrib/operators/mongo_to_s3.py
apache/airflow
get_pool
def get_pool(name, session=None): if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) return pool
Get pool by a given name.
def get_pool(name, session=None): """Get pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) return pool
airflow/api/common/experimental/pool.py
apache/airflow
create_pool
def create_pool(name, slots, description, session=None): if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") try: slots = int(slots) except ValueError: raise AirflowBadRequest("Bad value for `slots`: %s" % slots) session.expire_on_commit = False pool = session.query(Pool).filter_by(pool=name).first() if pool is None: pool = Pool(pool=name, slots=slots, description=description) session.add(pool) else: pool.slots = slots pool.description = description session.commit() return pool
Create a pool with a given parameters.
def create_pool(name, slots, description, session=None): """Create a pool with a given parameters.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") try: slots = int(slots) except ValueError: raise AirflowBadRequest("Bad value for `slots`: %s" % slots) session.expire_on_commit = False pool = session.query(Pool).filter_by(pool=name).first() if pool is None: pool = Pool(pool=name, slots=slots, description=description) session.add(pool) else: pool.slots = slots pool.description = description session.commit() return pool
airflow/api/common/experimental/pool.py
apache/airflow
delete_pool
def delete_pool(name, session=None): if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) session.delete(pool) session.commit() return pool
Delete pool by a given name.
def delete_pool(name, session=None): """Delete pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) session.delete(pool) session.commit() return pool
airflow/api/common/experimental/pool.py
apache/airflow
GKEClusterHook._dict_to_proto
def _dict_to_proto(py_dict, proto): dict_json_str = json.dumps(py_dict) return json_format.Parse(dict_json_str, proto)
Converts a python dictionary to the proto supplied
def _dict_to_proto(py_dict, proto): """ Converts a python dictionary to the proto supplied :param py_dict: The dictionary to convert :type py_dict: dict :param proto: The proto object to merge with dictionary :type proto: protobuf :return: A parsed python dictionary in provided proto format :raises: ParseError: On JSON parsing problems. """ dict_json_str = json.dumps(py_dict) return json_format.Parse(dict_json_str, proto)
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
GKEClusterHook.wait_for_operation
def wait_for_operation(self, operation, project_id=None): self.log.info("Waiting for OPERATION_NAME %s", operation.name) time.sleep(OPERATIONAL_POLL_INTERVAL) while operation.status != Operation.Status.DONE: if operation.status == Operation.Status.RUNNING or operation.status == \ Operation.Status.PENDING: time.sleep(OPERATIONAL_POLL_INTERVAL) else: raise exceptions.GoogleCloudError( "Operation has failed with status: %s" % operation.status) operation = self.get_operation(operation.name, project_id=project_id or self.project_id) return operation
Given an operation, continuously fetches the status from Google Cloud until either completion or an error occurring
def wait_for_operation(self, operation, project_id=None): """ Given an operation, continuously fetches the status from Google Cloud until either completion or an error occurring :param operation: The Operation to wait for :type operation: google.cloud.container_V1.gapic.enums.Operation :param project_id: Google Cloud Platform project ID :type project_id: str :return: A new, updated operation fetched from Google Cloud """ self.log.info("Waiting for OPERATION_NAME %s", operation.name) time.sleep(OPERATIONAL_POLL_INTERVAL) while operation.status != Operation.Status.DONE: if operation.status == Operation.Status.RUNNING or operation.status == \ Operation.Status.PENDING: time.sleep(OPERATIONAL_POLL_INTERVAL) else: raise exceptions.GoogleCloudError( "Operation has failed with status: %s" % operation.status) # To update status of operation operation = self.get_operation(operation.name, project_id=project_id or self.project_id) return operation
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
GKEClusterHook.get_operation
def get_operation(self, operation_name, project_id=None): return self.get_client().get_operation(project_id=project_id or self.project_id, zone=self.location, operation_id=operation_name)
Fetches the operation from Google Cloud
def get_operation(self, operation_name, project_id=None): """ Fetches the operation from Google Cloud :param operation_name: Name of operation to fetch :type operation_name: str :param project_id: Google Cloud Platform project ID :type project_id: str :return: The new, updated operation from Google Cloud """ return self.get_client().get_operation(project_id=project_id or self.project_id, zone=self.location, operation_id=operation_name)
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
GKEClusterHook._append_label
def _append_label(cluster_proto, key, val): val = val.replace('.', '-').replace('+', '-') cluster_proto.resource_labels.update({key: val}) return cluster_proto
Append labels to provided Cluster Protobuf Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current airflow version string follows semantic versioning
def _append_label(cluster_proto, key, val): """ Append labels to provided Cluster Protobuf Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current airflow version string follows semantic versioning spec: x.y.z). :param cluster_proto: The proto to append resource_label airflow version to :type cluster_proto: google.cloud.container_v1.types.Cluster :param key: The key label :type key: str :param val: :type val: str :return: The cluster proto updated with new label """ val = val.replace('.', '-').replace('+', '-') cluster_proto.resource_labels.update({key: val}) return cluster_proto
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
GKEClusterHook.create_cluster
def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT): if isinstance(cluster, dict): cluster_proto = Cluster() cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto) elif not isinstance(cluster, Cluster): raise AirflowException( "cluster is not instance of Cluster proto or python dict") self._append_label(cluster, 'airflow-version', 'v' + version.version) self.log.info( "Creating (project_id=%s, zone=%s, cluster_name=%s)", self.project_id, self.location, cluster.name ) try: op = self.get_client().create_cluster(project_id=project_id or self.project_id, zone=self.location, cluster=cluster, retry=retry, timeout=timeout) op = self.wait_for_operation(op) return op.target_link except AlreadyExists as error: self.log.info('Assuming Success: %s', error.message) return self.get_cluster(name=cluster.name).self_link
Creates a cluster, consisting of the specified number and type of Google Compute Engine instances.
def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. :param cluster: A Cluster protobuf or dict. If dict is provided, it must be of the same form as the protobuf message :class:`google.cloud.container_v1.types.Cluster` :type cluster: dict or google.cloud.container_v1.types.Cluster :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object (``google.api_core.retry.Retry``) used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: The full url to the new, or existing, cluster :raises: ParseError: On JSON parsing problems when trying to convert dict AirflowException: cluster is not dict type nor Cluster proto type """ if isinstance(cluster, dict): cluster_proto = Cluster() cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto) elif not isinstance(cluster, Cluster): raise AirflowException( "cluster is not instance of Cluster proto or python dict") self._append_label(cluster, 'airflow-version', 'v' + version.version) self.log.info( "Creating (project_id=%s, zone=%s, cluster_name=%s)", self.project_id, self.location, cluster.name ) try: op = self.get_client().create_cluster(project_id=project_id or self.project_id, zone=self.location, cluster=cluster, retry=retry, timeout=timeout) op = self.wait_for_operation(op) return op.target_link except AlreadyExists as error: self.log.info('Assuming Success: %s', error.message) return self.get_cluster(name=cluster.name).self_link
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
GKEClusterHook.get_cluster
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): self.log.info( "Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)", project_id or self.project_id, self.location, name ) return self.get_client().get_cluster(project_id=project_id or self.project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout).self_link
Gets details of specified cluster
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Gets details of specified cluster :param name: The name of the cluster to retrieve :type name: str :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: google.cloud.container_v1.types.Cluster """ self.log.info( "Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)", project_id or self.project_id, self.location, name ) return self.get_client().get_cluster(project_id=project_id or self.project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout).self_link
airflow/contrib/hooks/gcp_container_hook.py
apache/airflow
DiscordWebhookHook._get_webhook_endpoint
def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint): if webhook_endpoint: endpoint = webhook_endpoint elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson endpoint = extra.get('webhook_endpoint', '') else: raise AirflowException('Cannot get webhook endpoint: No valid Discord ' 'webhook endpoint or http_conn_id supplied.') if not re.match('^webhooks/[0-9]+/[a-zA-Z0-9_-]+$', endpoint): raise AirflowException('Expected Discord webhook endpoint in the form ' 'of "webhooks/{webhook.id}/{webhook.token}".') return endpoint
Given a Discord http_conn_id, return the default webhook endpoint or override if a webhook_endpoint is manually supplied.
def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint): """ Given a Discord http_conn_id, return the default webhook endpoint or override if a webhook_endpoint is manually supplied. :param http_conn_id: The provided connection ID :param webhook_endpoint: The manually provided webhook endpoint :return: Webhook endpoint (str) to use """ if webhook_endpoint: endpoint = webhook_endpoint elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson endpoint = extra.get('webhook_endpoint', '') else: raise AirflowException('Cannot get webhook endpoint: No valid Discord ' 'webhook endpoint or http_conn_id supplied.') # make sure endpoint matches the expected Discord webhook format if not re.match('^webhooks/[0-9]+/[a-zA-Z0-9_-]+$', endpoint): raise AirflowException('Expected Discord webhook endpoint in the form ' 'of "webhooks/{webhook.id}/{webhook.token}".') return endpoint
airflow/contrib/hooks/discord_webhook_hook.py
apache/airflow
DiscordWebhookHook._build_discord_payload
def _build_discord_payload(self): payload = {} if self.username: payload['username'] = self.username if self.avatar_url: payload['avatar_url'] = self.avatar_url payload['tts'] = self.tts if len(self.message) <= 2000: payload['content'] = self.message else: raise AirflowException('Discord message length must be 2000 or fewer ' 'characters.') return json.dumps(payload)
Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload.
def _build_discord_payload(self): """ Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload. :return: Discord payload (str) to send """ payload = {} if self.username: payload['username'] = self.username if self.avatar_url: payload['avatar_url'] = self.avatar_url payload['tts'] = self.tts if len(self.message) <= 2000: payload['content'] = self.message else: raise AirflowException('Discord message length must be 2000 or fewer ' 'characters.') return json.dumps(payload)
airflow/contrib/hooks/discord_webhook_hook.py
apache/airflow
DiscordWebhookHook.execute
def execute(self): proxies = {} if self.proxy: proxies = {'https': self.proxy} discord_payload = self._build_discord_payload() self.run(endpoint=self.webhook_endpoint, data=discord_payload, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
Execute the Discord webhook call
def execute(self): """ Execute the Discord webhook call """ proxies = {} if self.proxy: # we only need https proxy for Discord proxies = {'https': self.proxy} discord_payload = self._build_discord_payload() self.run(endpoint=self.webhook_endpoint, data=discord_payload, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
airflow/contrib/hooks/discord_webhook_hook.py
apache/airflow
GoogleCloudKMSHook.encrypt
def encrypt(self, key_name, plaintext, authenticated_data=None): keys = self.get_conn().projects().locations().keyRings().cryptoKeys() body = {'plaintext': _b64encode(plaintext)} if authenticated_data: body['additionalAuthenticatedData'] = _b64encode(authenticated_data) request = keys.encrypt(name=key_name, body=body) response = request.execute(num_retries=self.num_retries) ciphertext = response['ciphertext'] return ciphertext
Encrypts a plaintext message using Google Cloud KMS.
def encrypt(self, key_name, plaintext, authenticated_data=None): """ Encrypts a plaintext message using Google Cloud KMS. :param key_name: The Resource Name for the key (or key version) to be used for encyption. Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**`` :type key_name: str :param plaintext: The message to be encrypted. :type plaintext: bytes :param authenticated_data: Optional additional authenticated data that must also be provided to decrypt the message. :type authenticated_data: bytes :return: The base 64 encoded ciphertext of the original message. :rtype: str """ keys = self.get_conn().projects().locations().keyRings().cryptoKeys() body = {'plaintext': _b64encode(plaintext)} if authenticated_data: body['additionalAuthenticatedData'] = _b64encode(authenticated_data) request = keys.encrypt(name=key_name, body=body) response = request.execute(num_retries=self.num_retries) ciphertext = response['ciphertext'] return ciphertext
airflow/contrib/hooks/gcp_kms_hook.py
apache/airflow
SqoopHook.import_table
def import_table(self, table, target_dir=None, append=False, file_type="text", columns=None, split_by=None, where=None, direct=False, driver=None, extra_import_options=None): cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--table", table] if columns: cmd += ["--columns", columns] if where: cmd += ["--where", where] self.Popen(cmd)
Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments
def import_table(self, table, target_dir=None, append=False, file_type="text", columns=None, split_by=None, where=None, direct=False, driver=None, extra_import_options=None): """ Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments :param table: Table to read :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet". Imports data to into the specified format. Defaults to text. :param columns: <col,col,col…> Columns to import from table :param split_by: Column of the table used to split work units :param where: WHERE clause to use during import :param direct: Use direct connector if exists for the database :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--table", table] if columns: cmd += ["--columns", columns] if where: cmd += ["--where", where] self.Popen(cmd)
airflow/contrib/hooks/sqoop_hook.py
apache/airflow
SqoopHook.import_query
def import_query(self, query, target_dir, append=False, file_type="text", split_by=None, direct=None, driver=None, extra_import_options=None): cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--query", query] self.Popen(cmd)
Imports a specific query from the rdbms to hdfs
def import_query(self, query, target_dir, append=False, file_type="text", split_by=None, direct=None, driver=None, extra_import_options=None): """ Imports a specific query from the rdbms to hdfs :param query: Free format query to run :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet" Imports data to hdfs into the specified format. Defaults to text. :param split_by: Column of the table used to split work units :param direct: Use direct import fast path :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--query", query] self.Popen(cmd)
airflow/contrib/hooks/sqoop_hook.py
apache/airflow
SqoopHook.export_table
def export_table(self, table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options=None): cmd = self._export_cmd(table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options) self.Popen(cmd)
Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments
def export_table(self, table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options=None): """ Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments :param table: Table remote destination :param export_dir: Hive table to export :param input_null_string: The string to be interpreted as null for string columns :param input_null_non_string: The string to be interpreted as null for non-string columns :param staging_table: The table in which data will be staged before being inserted into the destination table :param clear_staging_table: Indicate that any data present in the staging table can be deleted :param enclosed_by: Sets a required field enclosing character :param escaped_by: Sets the escape character :param input_fields_terminated_by: Sets the field separator character :param input_lines_terminated_by: Sets the end-of-line character :param input_optionally_enclosed_by: Sets a field enclosing character :param batch: Use batch mode for underlying statement execution :param relaxed_isolation: Transaction isolation to read uncommitted for the mappers :param extra_export_options: Extra export options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._export_cmd(table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options) self.Popen(cmd)
airflow/contrib/hooks/sqoop_hook.py
apache/airflow
GCPTextToSpeechHook.get_conn
def get_conn(self): if not self._client: self._client = TextToSpeechClient(credentials=self._get_credentials()) return self._client
Retrieves connection to Cloud Text to Speech.
def get_conn(self): """ Retrieves connection to Cloud Text to Speech. :return: Google Cloud Text to Speech client object. :rtype: google.cloud.texttospeech_v1.TextToSpeechClient """ if not self._client: self._client = TextToSpeechClient(credentials=self._get_credentials()) return self._client
airflow/contrib/hooks/gcp_text_to_speech_hook.py
apache/airflow
GCPTextToSpeechHook.synthesize_speech
def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None): client = self.get_conn() self.log.info("Synthesizing input: %s" % input_data) return client.synthesize_speech( input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout )
Synthesizes text input
def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None): """ Synthesizes text input :param input_data: text input to be synthesized. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput :type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput :param voice: configuration of voice to be used in synthesis. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams :type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams :param audio_config: configuration of the synthesized audio. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig :type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig :return: SynthesizeSpeechResponse See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse :rtype: object :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float """ client = self.get_conn() self.log.info("Synthesizing input: %s" % input_data) return client.synthesize_speech( input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout )
airflow/contrib/hooks/gcp_text_to_speech_hook.py
apache/airflow
S3TaskHandler.close
def close(self): if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): with open(local_loc, 'r') as logfile: log = logfile.read() self.s3_write(log, remote_loc) self.closed = True
Close and upload local log file to remote storage S3.
def close(self): """ Close and upload local log file to remote storage S3. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions with open(local_loc, 'r') as logfile: log = logfile.read() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True
airflow/utils/log/s3_task_handler.py
apache/airflow
WorkerConfiguration._get_init_containers
def _get_init_containers(self): if self.kube_config.dags_volume_claim or \ self.kube_config.dags_volume_host or self.kube_config.dags_in_image: return [] init_environment = [{ 'name': 'GIT_SYNC_REPO', 'value': self.kube_config.git_repo }, { 'name': 'GIT_SYNC_BRANCH', 'value': self.kube_config.git_branch }, { 'name': 'GIT_SYNC_ROOT', 'value': self.kube_config.git_sync_root }, { 'name': 'GIT_SYNC_DEST', 'value': self.kube_config.git_sync_dest }, { 'name': 'GIT_SYNC_DEPTH', 'value': '1' }, { 'name': 'GIT_SYNC_ONE_TIME', 'value': 'true' }] if self.kube_config.git_user: init_environment.append({ 'name': 'GIT_SYNC_USERNAME', 'value': self.kube_config.git_user }) if self.kube_config.git_password: init_environment.append({ 'name': 'GIT_SYNC_PASSWORD', 'value': self.kube_config.git_password }) volume_mounts = [{ 'mountPath': self.kube_config.git_sync_root, 'name': self.dags_volume_name, 'readOnly': False }] if self.kube_config.git_ssh_key_secret_name: volume_mounts.append({ 'name': self.git_sync_ssh_secret_volume_name, 'mountPath': '/etc/git-secret/ssh', 'subPath': 'ssh' }) init_environment.extend([ { 'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh' }, { 'name': 'GIT_SYNC_SSH', 'value': 'true' }]) if self.kube_config.git_ssh_known_hosts_configmap_name: volume_mounts.append({ 'name': self.git_sync_ssh_known_hosts_volume_name, 'mountPath': '/etc/git-secret/known_hosts', 'subPath': 'known_hosts' }) init_environment.extend([ { 'name': 'GIT_KNOWN_HOSTS', 'value': 'true' }, { 'name': 'GIT_SSH_KNOWN_HOSTS_FILE', 'value': '/etc/git-secret/known_hosts' } ]) else: init_environment.append({ 'name': 'GIT_KNOWN_HOSTS', 'value': 'false' }) return [{ 'name': self.kube_config.git_sync_init_container_name, 'image': self.kube_config.git_sync_container, 'securityContext': {'runAsUser': 65533}, 'env': init_environment, 'volumeMounts': volume_mounts }]
When using git to retrieve the DAGs, use the GitSync Init Container
def _get_init_containers(self): """When using git to retrieve the DAGs, use the GitSync Init Container""" # If we're using volume claims to mount the dags, no init container is needed if self.kube_config.dags_volume_claim or \ self.kube_config.dags_volume_host or self.kube_config.dags_in_image: return [] # Otherwise, define a git-sync init container init_environment = [{ 'name': 'GIT_SYNC_REPO', 'value': self.kube_config.git_repo }, { 'name': 'GIT_SYNC_BRANCH', 'value': self.kube_config.git_branch }, { 'name': 'GIT_SYNC_ROOT', 'value': self.kube_config.git_sync_root }, { 'name': 'GIT_SYNC_DEST', 'value': self.kube_config.git_sync_dest }, { 'name': 'GIT_SYNC_DEPTH', 'value': '1' }, { 'name': 'GIT_SYNC_ONE_TIME', 'value': 'true' }] if self.kube_config.git_user: init_environment.append({ 'name': 'GIT_SYNC_USERNAME', 'value': self.kube_config.git_user }) if self.kube_config.git_password: init_environment.append({ 'name': 'GIT_SYNC_PASSWORD', 'value': self.kube_config.git_password }) volume_mounts = [{ 'mountPath': self.kube_config.git_sync_root, 'name': self.dags_volume_name, 'readOnly': False }] if self.kube_config.git_ssh_key_secret_name: volume_mounts.append({ 'name': self.git_sync_ssh_secret_volume_name, 'mountPath': '/etc/git-secret/ssh', 'subPath': 'ssh' }) init_environment.extend([ { 'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh' }, { 'name': 'GIT_SYNC_SSH', 'value': 'true' }]) if self.kube_config.git_ssh_known_hosts_configmap_name: volume_mounts.append({ 'name': self.git_sync_ssh_known_hosts_volume_name, 'mountPath': '/etc/git-secret/known_hosts', 'subPath': 'known_hosts' }) init_environment.extend([ { 'name': 'GIT_KNOWN_HOSTS', 'value': 'true' }, { 'name': 'GIT_SSH_KNOWN_HOSTS_FILE', 'value': '/etc/git-secret/known_hosts' } ]) else: init_environment.append({ 'name': 'GIT_KNOWN_HOSTS', 'value': 'false' }) return [{ 'name': self.kube_config.git_sync_init_container_name, 'image': self.kube_config.git_sync_container, 'securityContext': {'runAsUser': 65533}, # git-sync user 'env': init_environment, 'volumeMounts': volume_mounts }]
airflow/contrib/kubernetes/worker_configuration.py
apache/airflow
WorkerConfiguration._get_environment
def _get_environment(self): env = {} for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars): env[env_var_name] = env_var_val env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor" if self.kube_config.airflow_configmap: env['AIRFLOW_HOME'] = self.worker_airflow_home env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags if (not self.kube_config.airflow_configmap and 'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets): env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN") if self.kube_config.git_dags_folder_mount_point: dag_volume_mount_path = os.path.join( self.kube_config.git_dags_folder_mount_point, self.kube_config.git_sync_dest, self.kube_config.git_subpath ) env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path return env
Defines any necessary environment variables for the pod executor
def _get_environment(self): """Defines any necessary environment variables for the pod executor""" env = {} for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars): env[env_var_name] = env_var_val env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor" if self.kube_config.airflow_configmap: env['AIRFLOW_HOME'] = self.worker_airflow_home env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags if (not self.kube_config.airflow_configmap and 'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets): env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN") if self.kube_config.git_dags_folder_mount_point: # /root/airflow/dags/repo/dags dag_volume_mount_path = os.path.join( self.kube_config.git_dags_folder_mount_point, self.kube_config.git_sync_dest, # repo self.kube_config.git_subpath # dags ) env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path return env
airflow/contrib/kubernetes/worker_configuration.py
apache/airflow
WorkerConfiguration._get_secrets
def _get_secrets(self): worker_secrets = [] for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets): k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=') worker_secrets.append( Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key) ) if self.kube_config.env_from_secret_ref: for secret_ref in self.kube_config.env_from_secret_ref.split(','): worker_secrets.append( Secret('env', None, secret_ref) ) return worker_secrets
Defines any necessary secrets for the pod executor
def _get_secrets(self): """Defines any necessary secrets for the pod executor""" worker_secrets = [] for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets): k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=') worker_secrets.append( Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key) ) if self.kube_config.env_from_secret_ref: for secret_ref in self.kube_config.env_from_secret_ref.split(','): worker_secrets.append( Secret('env', None, secret_ref) ) return worker_secrets
airflow/contrib/kubernetes/worker_configuration.py
apache/airflow
WorkerConfiguration._get_security_context
def _get_security_context(self): security_context = {} if self.kube_config.worker_run_as_user: security_context['runAsUser'] = self.kube_config.worker_run_as_user if self.kube_config.worker_fs_group: security_context['fsGroup'] = self.kube_config.worker_fs_group if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None: security_context['fsGroup'] = 65533 return security_context
Defines the security context
def _get_security_context(self): """Defines the security context""" security_context = {} if self.kube_config.worker_run_as_user: security_context['runAsUser'] = self.kube_config.worker_run_as_user if self.kube_config.worker_fs_group: security_context['fsGroup'] = self.kube_config.worker_fs_group # set fs_group to 65533 if not explicitly specified and using git ssh keypair auth if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None: security_context['fsGroup'] = 65533 return security_context
airflow/contrib/kubernetes/worker_configuration.py
apache/airflow
QuboleHook.get_extra_links
def get_extra_links(self, operator, dttm): conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id']) if conn and conn.host: host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host) else: host = 'https://api.qubole.com/v2/analyze?command_id=' ti = TaskInstance(task=operator, execution_date=dttm) qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id') url = host + str(qds_command_id) if qds_command_id else '' return url
Get link to qubole command result page.
def get_extra_links(self, operator, dttm): """ Get link to qubole command result page. :param operator: operator :param dttm: datetime :return: url link """ conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id']) if conn and conn.host: host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host) else: host = 'https://api.qubole.com/v2/analyze?command_id=' ti = TaskInstance(task=operator, execution_date=dttm) qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id') url = host + str(qds_command_id) if qds_command_id else '' return url
airflow/contrib/hooks/qubole_hook.py
apache/airflow
DagFileProcessor._launch_process
def _launch_process(result_queue, file_path, pickle_dags, dag_id_white_list, thread_name, zombies): def helper(): log = logging.getLogger("airflow.processor") stdout = StreamLogWriter(log, logging.INFO) stderr = StreamLogWriter(log, logging.WARN) set_context(log, file_path) try: sys.stdout = stdout sys.stderr = stderr settings.configure_orm() threading.current_thread().name = thread_name start_time = time.time() log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path) scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log) result = scheduler_job.process_file(file_path, zombies, pickle_dags) result_queue.put(result) end_time = time.time() log.info( "Processing %s took %.3f seconds", file_path, end_time - start_time ) except Exception: log.exception("Got an exception! Propagating...") raise finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ settings.dispose_orm() p = multiprocessing.Process(target=helper, args=(), name="{}-Process".format(thread_name)) p.start() return p
Launch a process to process the given file.
def _launch_process(result_queue, file_path, pickle_dags, dag_id_white_list, thread_name, zombies): """ Launch a process to process the given file. :param result_queue: the queue to use for passing back the result :type result_queue: multiprocessing.Queue :param file_path: the file to process :type file_path: unicode :param pickle_dags: whether to pickle the DAGs found in the file and save them to the DB :type pickle_dags: bool :param dag_id_white_list: if specified, only examine DAG ID's that are in this list :type dag_id_white_list: list[unicode] :param thread_name: the name to use for the process that is launched :type thread_name: unicode :return: the process that was launched :rtype: multiprocessing.Process :param zombies: zombie task instances to kill :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] """ def helper(): # This helper runs in the newly created process log = logging.getLogger("airflow.processor") stdout = StreamLogWriter(log, logging.INFO) stderr = StreamLogWriter(log, logging.WARN) set_context(log, file_path) try: # redirect stdout/stderr to log sys.stdout = stdout sys.stderr = stderr # Re-configure the ORM engine as there are issues with multiple processes settings.configure_orm() # Change the thread name to differentiate log lines. This is # really a separate process, but changing the name of the # process doesn't work, so changing the thread name instead. threading.current_thread().name = thread_name start_time = time.time() log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path) scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log) result = scheduler_job.process_file(file_path, zombies, pickle_dags) result_queue.put(result) end_time = time.time() log.info( "Processing %s took %.3f seconds", file_path, end_time - start_time ) except Exception: # Log exceptions through the logging framework. log.exception("Got an exception! Propagating...") raise finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ # We re-initialized the ORM within this Process above so we need to # tear it down manually here settings.dispose_orm() p = multiprocessing.Process(target=helper, args=(), name="{}-Process".format(thread_name)) p.start() return p
airflow/jobs.py
apache/airflow
DagFileProcessor.start
def start(self): self._process = DagFileProcessor._launch_process( self._result_queue, self.file_path, self._pickle_dags, self._dag_id_white_list, "DagFileProcessor{}".format(self._instance_id), self._zombies) self._start_time = timezone.utcnow()
Launch the process and start processing the DAG.
def start(self): """ Launch the process and start processing the DAG. """ self._process = DagFileProcessor._launch_process( self._result_queue, self.file_path, self._pickle_dags, self._dag_id_white_list, "DagFileProcessor{}".format(self._instance_id), self._zombies) self._start_time = timezone.utcnow()
airflow/jobs.py
apache/airflow
DagFileProcessor.done
def done(self): if self._process is None: raise AirflowException("Tried to see if it's done before starting!") if self._done: return True if self._result_queue and not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self._done = True self.log.debug("Waiting for %s", self._process) self._process.join() return True if self._result_queue and not self._process.is_alive(): self._done = True if not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self.log.debug("Waiting for %s", self._process) self._process.join() return True return False
Check if the process launched to process this file is done.
def done(self): """ Check if the process launched to process this file is done. :return: whether the process is finished running :rtype: bool """ if self._process is None: raise AirflowException("Tried to see if it's done before starting!") if self._done: return True # In case result queue is corrupted. if self._result_queue and not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self._done = True self.log.debug("Waiting for %s", self._process) self._process.join() return True # Potential error case when process dies if self._result_queue and not self._process.is_alive(): self._done = True # Get the object from the queue or else join() can hang. if not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self.log.debug("Waiting for %s", self._process) self._process.join() return True return False
airflow/jobs.py
apache/airflow
SchedulerJob._exit_gracefully
def _exit_gracefully(self, signum, frame): self.log.info("Exiting gracefully upon receiving signal %s", signum) if self.processor_agent: self.processor_agent.end() sys.exit(os.EX_OK)
Helper method to clean up processor_agent to avoid leaving orphan processes.
def _exit_gracefully(self, signum, frame): """ Helper method to clean up processor_agent to avoid leaving orphan processes. """ self.log.info("Exiting gracefully upon receiving signal %s", signum) if self.processor_agent: self.processor_agent.end() sys.exit(os.EX_OK)
airflow/jobs.py
apache/airflow
SchedulerJob._process_task_instances
def _process_task_instances(self, dag, queue, session=None): dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session) active_dag_runs = [] for run in dag_runs: self.log.info("Examining DAG run %s", run) if run.execution_date > timezone.utcnow(): self.log.error( "Execution date is in future: %s", run.execution_date ) continue if len(active_dag_runs) >= dag.max_active_runs: self.log.info("Number of active dag runs reached max_active_run.") break if run.is_backfill: continue run.dag = dag run.verify_integrity(session=session) run.update_state(session=session) if run.state == State.RUNNING: make_transient(run) active_dag_runs.append(run) for run in active_dag_runs: self.log.debug("Examining active DAG run: %s", run) tis = run.get_task_instances(state=(State.NONE, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE)) for ti in tis: task = dag.get_task(ti.task_id) ti.task = task if ti.are_dependencies_met( dep_context=DepContext(flag_upstream_failed=True), session=session): self.log.debug('Queuing task: %s', ti) queue.append(ti.key)
This method schedules the tasks for a single DAG by looking at the active DAG runs and adding task instances that should run to the queue.
def _process_task_instances(self, dag, queue, session=None): """ This method schedules the tasks for a single DAG by looking at the active DAG runs and adding task instances that should run to the queue. """ # update the state of the previously active dag runs dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session) active_dag_runs = [] for run in dag_runs: self.log.info("Examining DAG run %s", run) # don't consider runs that are executed in the future if run.execution_date > timezone.utcnow(): self.log.error( "Execution date is in future: %s", run.execution_date ) continue if len(active_dag_runs) >= dag.max_active_runs: self.log.info("Number of active dag runs reached max_active_run.") break # skip backfill dagruns for now as long as they are not really scheduled if run.is_backfill: continue # todo: run.dag is transient but needs to be set run.dag = dag # todo: preferably the integrity check happens at dag collection time run.verify_integrity(session=session) run.update_state(session=session) if run.state == State.RUNNING: make_transient(run) active_dag_runs.append(run) for run in active_dag_runs: self.log.debug("Examining active DAG run: %s", run) # this needs a fresh session sometimes tis get detached tis = run.get_task_instances(state=(State.NONE, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE)) # this loop is quite slow as it uses are_dependencies_met for # every task (in ti.is_runnable). This is also called in # update_state above which has already checked these tasks for ti in tis: task = dag.get_task(ti.task_id) # fixme: ti.task is transient but needs to be set ti.task = task if ti.are_dependencies_met( dep_context=DepContext(flag_upstream_failed=True), session=session): self.log.debug('Queuing task: %s', ti) queue.append(ti.key)
airflow/jobs.py
apache/airflow
SchedulerJob.__get_concurrency_maps
def __get_concurrency_maps(self, states, session=None): TI = models.TaskInstance ti_concurrency_query = ( session .query(TI.task_id, TI.dag_id, func.count('*')) .filter(TI.state.in_(states)) .group_by(TI.task_id, TI.dag_id) ).all() dag_map = defaultdict(int) task_map = defaultdict(int) for result in ti_concurrency_query: task_id, dag_id, count = result dag_map[dag_id] += count task_map[(dag_id, task_id)] = count return dag_map, task_map
Get the concurrency maps.
def __get_concurrency_maps(self, states, session=None): """ Get the concurrency maps. :param states: List of states to query for :type states: list[airflow.utils.state.State] :return: A map from (dag_id, task_id) to # of task instances and a map from (dag_id, task_id) to # of task instances in the given state list :rtype: dict[tuple[str, str], int] """ TI = models.TaskInstance ti_concurrency_query = ( session .query(TI.task_id, TI.dag_id, func.count('*')) .filter(TI.state.in_(states)) .group_by(TI.task_id, TI.dag_id) ).all() dag_map = defaultdict(int) task_map = defaultdict(int) for result in ti_concurrency_query: task_id, dag_id, count = result dag_map[dag_id] += count task_map[(dag_id, task_id)] = count return dag_map, task_map
airflow/jobs.py
apache/airflow
SchedulerJob._change_state_for_executable_task_instances
def _change_state_for_executable_task_instances(self, task_instances, acceptable_states, session=None): if len(task_instances) == 0: session.commit() return [] TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == ti.dag_id, TI.task_id == ti.task_id, TI.execution_date == ti.execution_date) for ti in task_instances]) ti_query = ( session .query(TI) .filter(or_(*filter_for_ti_state_change))) if None in acceptable_states: ti_query = ti_query.filter( or_(TI.state == None, TI.state.in_(acceptable_states)) ) else: ti_query = ti_query.filter(TI.state.in_(acceptable_states)) tis_to_set_to_queued = ( ti_query .with_for_update() .all()) if len(tis_to_set_to_queued) == 0: self.log.info("No tasks were able to have their state changed to queued.") session.commit() return [] for task_instance in tis_to_set_to_queued: task_instance.state = State.QUEUED task_instance.queued_dttm = (timezone.utcnow() if not task_instance.queued_dttm else task_instance.queued_dttm) session.merge(task_instance) simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued] task_instance_str = "\n\t".join( [repr(x) for x in tis_to_set_to_queued]) session.commit() self.log.info("Setting the following %s tasks to queued state:\n\t%s", len(tis_to_set_to_queued), task_instance_str) return simple_task_instances
Changes the state of task instances in the list with one of the given states to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
def _change_state_for_executable_task_instances(self, task_instances, acceptable_states, session=None): """ Changes the state of task instances in the list with one of the given states to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format. :param task_instances: TaskInstances to change the state of :type task_instances: list[airflow.models.TaskInstance] :param acceptable_states: Filters the TaskInstances updated to be in these states :type acceptable_states: Iterable[State] :rtype: list[airflow.utils.dag_processing.SimpleTaskInstance] """ if len(task_instances) == 0: session.commit() return [] TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == ti.dag_id, TI.task_id == ti.task_id, TI.execution_date == ti.execution_date) for ti in task_instances]) ti_query = ( session .query(TI) .filter(or_(*filter_for_ti_state_change))) if None in acceptable_states: ti_query = ti_query.filter( or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711 ) else: ti_query = ti_query.filter(TI.state.in_(acceptable_states)) tis_to_set_to_queued = ( ti_query .with_for_update() .all()) if len(tis_to_set_to_queued) == 0: self.log.info("No tasks were able to have their state changed to queued.") session.commit() return [] # set TIs to queued state for task_instance in tis_to_set_to_queued: task_instance.state = State.QUEUED task_instance.queued_dttm = (timezone.utcnow() if not task_instance.queued_dttm else task_instance.queued_dttm) session.merge(task_instance) # Generate a list of SimpleTaskInstance for the use of queuing # them in the executor. simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued] task_instance_str = "\n\t".join( [repr(x) for x in tis_to_set_to_queued]) session.commit() self.log.info("Setting the following %s tasks to queued state:\n\t%s", len(tis_to_set_to_queued), task_instance_str) return simple_task_instances
airflow/jobs.py
apache/airflow
SchedulerJob._enqueue_task_instances_with_queued_state
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag, simple_task_instances): TI = models.TaskInstance for simple_task_instance in simple_task_instances: simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id) command = TI.generate_command( simple_task_instance.dag_id, simple_task_instance.task_id, simple_task_instance.execution_date, local=True, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, pool=simple_task_instance.pool, file_path=simple_dag.full_filepath, pickle_id=simple_dag.pickle_id) priority = simple_task_instance.priority_weight queue = simple_task_instance.queue self.log.info( "Sending %s to executor with priority %s and queue %s", simple_task_instance.key, priority, queue ) self.executor.queue_command( simple_task_instance, command, priority=priority, queue=queue)
Takes task_instances, which should have been set to queued, and enqueues them with the executor.
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag, simple_task_instances): """ Takes task_instances, which should have been set to queued, and enqueues them with the executor. :param simple_task_instances: TaskInstances to enqueue :type simple_task_instances: list[SimpleTaskInstance] :param simple_dag_bag: Should contains all of the task_instances' dags :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag """ TI = models.TaskInstance # actually enqueue them for simple_task_instance in simple_task_instances: simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id) command = TI.generate_command( simple_task_instance.dag_id, simple_task_instance.task_id, simple_task_instance.execution_date, local=True, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, pool=simple_task_instance.pool, file_path=simple_dag.full_filepath, pickle_id=simple_dag.pickle_id) priority = simple_task_instance.priority_weight queue = simple_task_instance.queue self.log.info( "Sending %s to executor with priority %s and queue %s", simple_task_instance.key, priority, queue ) self.executor.queue_command( simple_task_instance, command, priority=priority, queue=queue)
airflow/jobs.py
apache/airflow
SchedulerJob._execute_task_instances
def _execute_task_instances(self, simple_dag_bag, states, session=None): executable_tis = self._find_executable_task_instances(simple_dag_bag, states, session=session) def query(result, items): simple_tis_with_state_changed = \ self._change_state_for_executable_task_instances(items, states, session=session) self._enqueue_task_instances_with_queued_state( simple_dag_bag, simple_tis_with_state_changed) session.commit() return result + len(simple_tis_with_state_changed) return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
Attempts to execute TaskInstances that should be executed by the scheduler. There are three
def _execute_task_instances(self, simple_dag_bag, states, session=None): """ Attempts to execute TaskInstances that should be executed by the scheduler. There are three steps: 1. Pick TIs by priority with the constraint that they are in the expected states and that we do exceed max_active_runs or pool limits. 2. Change the state for the TIs above atomically. 3. Enqueue the TIs in the executor. :param simple_dag_bag: TaskInstances associated with DAGs in the simple_dag_bag will be fetched from the DB and executed :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag :param states: Execute TaskInstances in these states :type states: tuple[airflow.utils.state.State] :return: Number of task instance with state changed. """ executable_tis = self._find_executable_task_instances(simple_dag_bag, states, session=session) def query(result, items): simple_tis_with_state_changed = \ self._change_state_for_executable_task_instances(items, states, session=session) self._enqueue_task_instances_with_queued_state( simple_dag_bag, simple_tis_with_state_changed) session.commit() return result + len(simple_tis_with_state_changed) return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
airflow/jobs.py
apache/airflow
SchedulerJob._change_state_for_tasks_failed_to_execute
def _change_state_for_tasks_failed_to_execute(self, session): if self.executor.queued_tasks: TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date, TI._try_number == try_number - 1, TI.state == State.QUEUED) for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()]) ti_query = (session.query(TI) .filter(or_(*filter_for_ti_state_change))) tis_to_set_to_scheduled = (ti_query .with_for_update() .all()) if len(tis_to_set_to_scheduled) == 0: session.commit() return for task_instance in tis_to_set_to_scheduled: task_instance.state = State.SCHEDULED task_instance_str = "\n\t".join( [repr(x) for x in tis_to_set_to_scheduled]) session.commit() self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
If there are tasks left over in the executor, we set them back to SCHEDULED to avoid creating hanging tasks.
def _change_state_for_tasks_failed_to_execute(self, session): """ If there are tasks left over in the executor, we set them back to SCHEDULED to avoid creating hanging tasks. :param session: session for ORM operations """ if self.executor.queued_tasks: TI = models.TaskInstance filter_for_ti_state_change = ( [and_( TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date, # The TI.try_number will return raw try_number+1 since the # ti is not running. And we need to -1 to match the DB record. TI._try_number == try_number - 1, TI.state == State.QUEUED) for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()]) ti_query = (session.query(TI) .filter(or_(*filter_for_ti_state_change))) tis_to_set_to_scheduled = (ti_query .with_for_update() .all()) if len(tis_to_set_to_scheduled) == 0: session.commit() return # set TIs to queued state for task_instance in tis_to_set_to_scheduled: task_instance.state = State.SCHEDULED task_instance_str = "\n\t".join( [repr(x) for x in tis_to_set_to_scheduled]) session.commit() self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
airflow/jobs.py
apache/airflow
SchedulerJob._process_executor_events
def _process_executor_events(self, simple_dag_bag, session=None): TI = models.TaskInstance for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids) .items()): dag_id, task_id, execution_date, try_number = key self.log.info( "Executor reports execution of %s.%s execution_date=%s " "exited with status %s for try_number %s", dag_id, task_id, execution_date, state, try_number ) if state == State.FAILED or state == State.SUCCESS: qry = session.query(TI).filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date) ti = qry.first() if not ti: self.log.warning("TaskInstance %s went missing from the database", ti) continue if ti.try_number == try_number and ti.state == State.QUEUED: msg = ("Executor reports task instance {} finished ({}) " "although the task says its {}. Was the task " "killed externally?".format(ti, state, ti.state)) self.log.error(msg) try: simple_dag = simple_dag_bag.get_dag(dag_id) dagbag = models.DagBag(simple_dag.full_filepath) dag = dagbag.get_dag(dag_id) ti.task = dag.get_task(task_id) ti.handle_failure(msg) except Exception: self.log.error("Cannot load the dag bag to handle failure for %s" ". Setting task to FAILED without callbacks or " "retries. Do you have enough resources?", ti) ti.state = State.FAILED session.merge(ti) session.commit()
Respond to executor events.
def _process_executor_events(self, simple_dag_bag, session=None): """ Respond to executor events. """ # TODO: this shares quite a lot of code with _manage_executor_state TI = models.TaskInstance for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids) .items()): dag_id, task_id, execution_date, try_number = key self.log.info( "Executor reports execution of %s.%s execution_date=%s " "exited with status %s for try_number %s", dag_id, task_id, execution_date, state, try_number ) if state == State.FAILED or state == State.SUCCESS: qry = session.query(TI).filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date) ti = qry.first() if not ti: self.log.warning("TaskInstance %s went missing from the database", ti) continue # TODO: should we fail RUNNING as well, as we do in Backfills? if ti.try_number == try_number and ti.state == State.QUEUED: msg = ("Executor reports task instance {} finished ({}) " "although the task says its {}. Was the task " "killed externally?".format(ti, state, ti.state)) self.log.error(msg) try: simple_dag = simple_dag_bag.get_dag(dag_id) dagbag = models.DagBag(simple_dag.full_filepath) dag = dagbag.get_dag(dag_id) ti.task = dag.get_task(task_id) ti.handle_failure(msg) except Exception: self.log.error("Cannot load the dag bag to handle failure for %s" ". Setting task to FAILED without callbacks or " "retries. Do you have enough resources?", ti) ti.state = State.FAILED session.merge(ti) session.commit()
airflow/jobs.py
apache/airflow
SchedulerJob.process_file
def process_file(self, file_path, zombies, pickle_dags=False, session=None): self.log.info("Processing file %s for tasks to queue", file_path) simple_dags = [] try: dagbag = models.DagBag(file_path, include_examples=False) except Exception: self.log.exception("Failed at reloading the DAG file %s", file_path) Stats.incr('dag_file_refresh_error', 1, 1) return [] if len(dagbag.dags) > 0: self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path) else: self.log.warning("No viable dags retrieved from %s", file_path) self.update_import_errors(session, dagbag) return [] for dag in dagbag.dags.values(): dag.sync_to_db() paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values() if dag.is_paused] for dag_id in dagbag.dags: if dag_id not in paused_dag_ids: dag = dagbag.get_dag(dag_id) pickle_id = None if pickle_dags: pickle_id = dag.pickle(session).id simple_dags.append(SimpleDag(dag, pickle_id=pickle_id)) if len(self.dag_ids) > 0: dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids and dag.dag_id not in paused_dag_ids] else: dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and dag.dag_id not in paused_dag_ids] ti_keys_to_schedule = [] self._process_dags(dagbag, dags, ti_keys_to_schedule) for ti_key in ti_keys_to_schedule: dag = dagbag.dags[ti_key[0]] task = dag.get_task(ti_key[1]) ti = models.TaskInstance(task, ti_key[2]) ti.refresh_from_db(session=session, lock_for_update=True) dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True) if ti.are_dependencies_met( dep_context=dep_context, session=session, verbose=True): ti.state = State.SCHEDULED self.log.info("Creating / updating %s in ORM", ti) session.merge(ti) session.commit() try: self.update_import_errors(session, dagbag) except Exception: self.log.exception("Error logging import errors!") try: dagbag.kill_zombies(zombies) except Exception: self.log.exception("Error killing zombies!") return simple_dags
Process a Python file containing Airflow DAGs. This
def process_file(self, file_path, zombies, pickle_dags=False, session=None): """ Process a Python file containing Airflow DAGs. This includes: 1. Execute the file and look for DAG objects in the namespace. 2. Pickle the DAG and save it to the DB (if necessary). 3. For each DAG, see what tasks should run and create appropriate task instances in the DB. 4. Record any errors importing the file into ORM 5. Kill (in ORM) any task instances belonging to the DAGs that haven't issued a heartbeat in a while. Returns a list of SimpleDag objects that represent the DAGs found in the file :param file_path: the path to the Python file that should be executed :type file_path: unicode :param zombies: zombie task instances to kill. :type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance] :param pickle_dags: whether serialize the DAGs found in the file and save them to the db :type pickle_dags: bool :return: a list of SimpleDags made from the Dags found in the file :rtype: list[airflow.utils.dag_processing.SimpleDagBag] """ self.log.info("Processing file %s for tasks to queue", file_path) # As DAGs are parsed from this file, they will be converted into SimpleDags simple_dags = [] try: dagbag = models.DagBag(file_path, include_examples=False) except Exception: self.log.exception("Failed at reloading the DAG file %s", file_path) Stats.incr('dag_file_refresh_error', 1, 1) return [] if len(dagbag.dags) > 0: self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path) else: self.log.warning("No viable dags retrieved from %s", file_path) self.update_import_errors(session, dagbag) return [] # Save individual DAGs in the ORM and update DagModel.last_scheduled_time for dag in dagbag.dags.values(): dag.sync_to_db() paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values() if dag.is_paused] # Pickle the DAGs (if necessary) and put them into a SimpleDag for dag_id in dagbag.dags: # Only return DAGs that are not paused if dag_id not in paused_dag_ids: dag = dagbag.get_dag(dag_id) pickle_id = None if pickle_dags: pickle_id = dag.pickle(session).id simple_dags.append(SimpleDag(dag, pickle_id=pickle_id)) if len(self.dag_ids) > 0: dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids and dag.dag_id not in paused_dag_ids] else: dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and dag.dag_id not in paused_dag_ids] # Not using multiprocessing.Queue() since it's no longer a separate # process and due to some unusual behavior. (empty() incorrectly # returns true?) ti_keys_to_schedule = [] self._process_dags(dagbag, dags, ti_keys_to_schedule) for ti_key in ti_keys_to_schedule: dag = dagbag.dags[ti_key[0]] task = dag.get_task(ti_key[1]) ti = models.TaskInstance(task, ti_key[2]) ti.refresh_from_db(session=session, lock_for_update=True) # We can defer checking the task dependency checks to the worker themselves # since they can be expensive to run in the scheduler. dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True) # Only schedule tasks that have their dependencies met, e.g. to avoid # a task that recently got its state changed to RUNNING from somewhere # other than the scheduler from getting its state overwritten. # TODO(aoen): It's not great that we have to check all the task instance # dependencies twice; once to get the task scheduled, and again to actually # run the task. We should try to come up with a way to only check them once. if ti.are_dependencies_met( dep_context=dep_context, session=session, verbose=True): # Task starts out in the scheduled state. All tasks in the # scheduled state will be sent to the executor ti.state = State.SCHEDULED # Also save this task instance to the DB. self.log.info("Creating / updating %s in ORM", ti) session.merge(ti) # commit batch session.commit() # Record import errors into the ORM try: self.update_import_errors(session, dagbag) except Exception: self.log.exception("Error logging import errors!") try: dagbag.kill_zombies(zombies) except Exception: self.log.exception("Error killing zombies!") return simple_dags
airflow/jobs.py
apache/airflow
BackfillJob._update_counters
def _update_counters(self, ti_status): for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key) ti_status.running.pop(key) continue elif ti.state == State.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti elif ti.state == State.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti ) ti.set_state(State.SCHEDULED) ti_status.running.pop(key) ti_status.to_run[key] = ti
Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required.
def _update_counters(self, ti_status): """ Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. :param ti_status: the internal status of the backfill job tasks :type ti_status: BackfillJob._DagRunTaskStatus """ for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key) ti_status.running.pop(key) continue # special case: if the task needs to run again put it back elif ti.state == State.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: if the task needs to be rescheduled put it back elif ti.state == State.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: The state of the task can be set to NONE by the task itself # when it reaches concurrency limits. It could also happen when the state # is changed externally, e.g. by clearing tasks from the ui. We need to cover # for that as otherwise those tasks would fall outside of the scope of # the backfill suddenly. elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti ) ti.set_state(State.SCHEDULED) ti_status.running.pop(key) ti_status.to_run[key] = ti
airflow/jobs.py
apache/airflow
BackfillJob._manage_executor_state
def _manage_executor_state(self, running): executor = self.executor for key, state in list(executor.get_event_buffer().items()): if key not in running: self.log.warning( "%s state %s not in running=%s", key, state, running.values() ) continue ti = running[key] ti.refresh_from_db() self.log.debug("Executor state: %s task %s", state, ti) if state == State.FAILED or state == State.SUCCESS: if ti.state == State.RUNNING or ti.state == State.QUEUED: msg = ("Executor reports task instance {} finished ({}) " "although the task says its {}. Was the task " "killed externally?".format(ti, state, ti.state)) self.log.error(msg) ti.handle_failure(msg)
Checks if the executor agrees with the state of task instances that are running
def _manage_executor_state(self, running): """ Checks if the executor agrees with the state of task instances that are running :param running: dict of key, task to verify """ executor = self.executor for key, state in list(executor.get_event_buffer().items()): if key not in running: self.log.warning( "%s state %s not in running=%s", key, state, running.values() ) continue ti = running[key] ti.refresh_from_db() self.log.debug("Executor state: %s task %s", state, ti) if state == State.FAILED or state == State.SUCCESS: if ti.state == State.RUNNING or ti.state == State.QUEUED: msg = ("Executor reports task instance {} finished ({}) " "although the task says its {}. Was the task " "killed externally?".format(ti, state, ti.state)) self.log.error(msg) ti.handle_failure(msg)
airflow/jobs.py
apache/airflow
BackfillJob._execute_for_run_dates
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id, start_date, session=None): for next_run_date in run_dates: dag_run = self._get_dag_run(next_run_date, session=session) tis_map = self._task_instances_for_dag_run(dag_run, session=session) if dag_run is None: continue ti_status.active_runs.append(dag_run) ti_status.to_run.update(tis_map or {}) processed_dag_run_dates = self._process_backfill_task_instances( ti_status=ti_status, executor=executor, pickle_id=pickle_id, start_date=start_date, session=session) ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
Computes the dag runs and their respective task instances for the given run dates and executes the task instances.
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id, start_date, session=None): """ Computes the dag runs and their respective task instances for the given run dates and executes the task instances. Returns a list of execution dates of the dag runs that were executed. :param run_dates: Execution dates for dag runs :type run_dates: list :param ti_status: internal BackfillJob status structure to tis track progress :type ti_status: BackfillJob._DagRunTaskStatus :param executor: the executor to use, it must be previously started :type executor: BaseExecutor :param pickle_id: numeric id of the pickled dag, None if not pickled :type pickle_id: int :param start_date: backfill start date :type start_date: datetime.datetime :param session: the current session object :type session: sqlalchemy.orm.session.Session """ for next_run_date in run_dates: dag_run = self._get_dag_run(next_run_date, session=session) tis_map = self._task_instances_for_dag_run(dag_run, session=session) if dag_run is None: continue ti_status.active_runs.append(dag_run) ti_status.to_run.update(tis_map or {}) processed_dag_run_dates = self._process_backfill_task_instances( ti_status=ti_status, executor=executor, pickle_id=pickle_id, start_date=start_date, session=session) ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
airflow/jobs.py