INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Opens a ssh connection to the remote host.
def get_conn(self): """ Opens a ssh connection to the remote host. :rtype: paramiko.client.SSHClient """ self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id) client = paramiko.SSHClient() if not self.allow_host_key_change: self.log.warning('Remote Identification Change is not verified. ' 'This wont protect against Man-In-The-Middle attacks') client.load_system_host_keys() if self.no_host_key_check: self.log.warning('No Host Key Verification. This wont protect ' 'against Man-In-The-Middle attacks') # Default is RejectPolicy client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password and self.password.strip(): client.connect(hostname=self.remote_host, username=self.username, password=self.password, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) else: client.connect(hostname=self.remote_host, username=self.username, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) if self.keepalive_interval: client.get_transport().set_keepalive(self.keepalive_interval) self.client = client return client
Creates a tunnel between two hosts. Like ssh - L <LOCAL_PORT >: host: <REMOTE_PORT >.
def get_tunnel(self, remote_port, remote_host="localhost", local_port=None): """ Creates a tunnel between two hosts. Like ssh -L <LOCAL_PORT>:host:<REMOTE_PORT>. :param remote_port: The remote port to create a tunnel to :type remote_port: int :param remote_host: The remote host to create a tunnel to (default localhost) :type remote_host: str :param local_port: The local port to attach the tunnel to :type local_port: int :return: sshtunnel.SSHTunnelForwarder object """ if local_port: local_bind_address = ('localhost', local_port) else: local_bind_address = ('localhost',) if self.password and self.password.strip(): client = SSHTunnelForwarder(self.remote_host, ssh_port=self.port, ssh_username=self.username, ssh_password=self.password, ssh_pkey=self.key_file, ssh_proxy=self.host_proxy, local_bind_address=local_bind_address, remote_bind_address=(remote_host, remote_port), logger=self.log) else: client = SSHTunnelForwarder(self.remote_host, ssh_port=self.port, ssh_username=self.username, ssh_pkey=self.key_file, ssh_proxy=self.host_proxy, local_bind_address=local_bind_address, remote_bind_address=(remote_host, remote_port), host_pkey_directories=[], logger=self.log) return client
Creates a transfer job that runs periodically.
def create_transfer_job(self, body): """ Creates a transfer job that runs periodically. :param body: (Required) A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: transfer job. See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :rtype: dict """ body = self._inject_project_id(body, BODY, PROJECT_ID) return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
Gets the latest state of a long - running operation in Google Storage Transfer Service.
def get_transfer_job(self, job_name, project_id=None): """ Gets the latest state of a long-running operation in Google Storage Transfer Service. :param job_name: (Required) Name of the job to be fetched :type job_name: str :param project_id: (Optional) the ID of the project that owns the Transfer Job. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Transfer Job :rtype: dict """ return ( self.get_conn() .transferJobs() .get(jobName=job_name, projectId=project_id) .execute(num_retries=self.num_retries) )
Lists long - running operations in Google Storage Transfer Service that match the specified filter.
def list_transfer_job(self, filter): """ Lists long-running operations in Google Storage Transfer Service that match the specified filter. :param filter: (Required) A request filter, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter :type filter: dict :return: List of Transfer Jobs :rtype: list[dict] """ conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) request = conn.transferJobs().list(filter=json.dumps(filter)) jobs = [] while request is not None: response = request.execute(num_retries=self.num_retries) jobs.extend(response[TRANSFER_JOBS]) request = conn.transferJobs().list_next(previous_request=request, previous_response=response) return jobs
Updates a transfer job that runs periodically.
def update_transfer_job(self, job_name, body): """ Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict """ body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
Deletes a transfer job. This is a soft delete. After a transfer job is deleted the job and all the transfer executions are subject to garbage collection. Transfer jobs become eligible for garbage collection 30 days after soft delete.
def delete_transfer_job(self, job_name, project_id): """ Deletes a transfer job. This is a soft delete. After a transfer job is deleted, the job and all the transfer executions are subject to garbage collection. Transfer jobs become eligible for garbage collection 30 days after soft delete. :param job_name: (Required) Name of the job to be deleted :type job_name: str :param project_id: (Optional) the ID of the project that owns the Transfer Job. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :rtype: None """ return ( self.get_conn() .transferJobs() .patch( jobName=job_name, body={ PROJECT_ID: project_id, TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED}, TRANSFER_JOB_FIELD_MASK: STATUS1, }, ) .execute(num_retries=self.num_retries) )
Cancels an transfer operation in Google Storage Transfer Service.
def cancel_transfer_operation(self, operation_name): """ Cancels an transfer operation in Google Storage Transfer Service. :param operation_name: Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
Gets an transfer operation in Google Storage Transfer Service.
def get_transfer_operation(self, operation_name): """ Gets an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :return: transfer operation See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation :rtype: dict """ return ( self.get_conn() .transferOperations() .get(name=operation_name) .execute(num_retries=self.num_retries) )
Gets an transfer operation in Google Storage Transfer Service.
def list_transfer_operations(self, filter): """ Gets an transfer operation in Google Storage Transfer Service. :param filter: (Required) A request filter, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter With one additional improvement: * project_id is optional if you have a project id defined in the connection See: :ref:`howto/connection:gcp` :type filter: dict :return: transfer operation :rtype: list[dict] """ conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) operations = [] request = conn.transferOperations().list(name=TRANSFER_OPERATIONS, filter=json.dumps(filter)) while request is not None: response = request.execute(num_retries=self.num_retries) if OPERATIONS in response: operations.extend(response[OPERATIONS]) request = conn.transferOperations().list_next( previous_request=request, previous_response=response ) return operations
Pauses an transfer operation in Google Storage Transfer Service.
def pause_transfer_operation(self, operation_name): """ Pauses an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries)
Resumes an transfer operation in Google Storage Transfer Service.
def resume_transfer_operation(self, operation_name): """ Resumes an transfer operation in Google Storage Transfer Service. :param operation_name: (Required) Name of the transfer operation. :type operation_name: str :rtype: None """ self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
Waits until the job reaches the expected state.
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): """ Waits until the job reaches the expected state. :param job: Transfer job See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob :type job: dict :param expected_statuses: State that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :param timeout: :type timeout: time in which the operation must end in seconds :rtype: None """ while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
Checks whether the operation list has an operation with the expected status then returns true If it encounters operations in FAILED or ABORTED state throw: class: airflow. exceptions. AirflowException.
def operations_contain_expected_statuses(operations, expected_statuses): """ Checks whether the operation list has an operation with the expected status, then returns true If it encounters operations in FAILED or ABORTED state throw :class:`airflow.exceptions.AirflowException`. :param operations: (Required) List of transfer operations to check. :type operations: list[dict] :param expected_statuses: (Required) status that is expected See: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status :type expected_statuses: set[str] :return: If there is an operation with the expected state in the operation list, returns true, :raises: airflow.exceptions.AirflowException If it encounters operations with a state in the list, :rtype: bool """ expected_statuses = ( {expected_statuses} if isinstance(expected_statuses, six.string_types) else set(expected_statuses) ) if len(operations) == 0: return False current_statuses = {operation[METADATA][STATUS] for operation in operations} if len(current_statuses - set(expected_statuses)) != len(current_statuses): return True if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES): raise AirflowException( 'An unexpected operation status was encountered. Expected: {}'.format( ", ".join(expected_statuses) ) ) return False
Returns all task reschedules for the task instance and try number in ascending order.
def find_for_task_instance(task_instance, session): """ Returns all task reschedules for the task instance and try number, in ascending order. :param task_instance: the task instance to find task reschedules for :type task_instance: airflow.models.TaskInstance """ TR = TaskReschedule return ( session .query(TR) .filter(TR.dag_id == task_instance.dag_id, TR.task_id == task_instance.task_id, TR.execution_date == task_instance.execution_date, TR.try_number == task_instance.try_number) .order_by(asc(TR.id)) .all() )
Kubernetes only supports lowercase alphanumeric characters and - and. in the pod name However there are special rules about how - and. can be used so let s only keep alphanumeric chars see here for detail: https:// kubernetes. io/ docs/ concepts/ overview/ working - with - objects/ names/
def _strip_unsafe_kubernetes_special_chars(string): """ Kubernetes only supports lowercase alphanumeric characters and "-" and "." in the pod name However, there are special rules about how "-" and "." can be used so let's only keep alphanumeric chars see here for detail: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ :param string: The requested Pod name :return: ``str`` Pod name stripped of any unsafe characters """ return ''.join(ch.lower() for ind, ch in enumerate(string) if ch.isalnum())
Kubernetes pod names must be < = 253 chars and must pass the following regex for validation ^ [ a - z0 - 9 ] ( [ - a - z0 - 9 ] * [ a - z0 - 9 ] ) ? ( \\. [ a - z0 - 9 ] ( [ - a - z0 - 9 ] * [ a - z0 - 9 ] ) ? ) * $
def _make_safe_pod_id(safe_dag_id, safe_task_id, safe_uuid): """ Kubernetes pod names must be <= 253 chars and must pass the following regex for validation "^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" :param safe_dag_id: a dag_id with only alphanumeric characters :param safe_task_id: a task_id with only alphanumeric characters :param random_uuid: a uuid :return: ``str`` valid Pod name of appropriate length """ MAX_POD_ID_LEN = 253 safe_key = safe_dag_id + safe_task_id safe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid return safe_pod_id
Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ( [ a - z0 - 9A - Z ] ) with dashes ( - ) underscores ( _ ) dots (. ) and alphanumerics between.
def _make_safe_label_value(string): """ Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. If the label value is then greater than 63 chars once made safe, or differs in any way from the original value sent to this function, then we need to truncate to 53chars, and append it with a unique hash. """ MAX_LABEL_LEN = 63 safe_label = re.sub(r'^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$', '', string) if len(safe_label) > MAX_LABEL_LEN or string != safe_label: safe_hash = hashlib.md5(string.encode()).hexdigest()[:9] safe_label = safe_label[:MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash return safe_label
If the airflow scheduler restarts with pending Queued tasks the tasks may or may not have been launched Thus on starting up the scheduler let s check every Queued task to see if it has been launched ( ie: if there is a corresponding pod on kubernetes )
def clear_not_launched_queued_tasks(self, session=None): """ If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or may not have been launched Thus, on starting up the scheduler let's check every "Queued" task to see if it has been launched (ie: if there is a corresponding pod on kubernetes) If it has been launched then do nothing, otherwise reset the state to "None" so the task will be rescheduled This will not be necessary in a future version of airflow in which there is proper support for State.LAUNCHED """ queued_tasks = session\ .query(TaskInstance)\ .filter(TaskInstance.state == State.QUEUED).all() self.log.info( 'When executor started up, found %s queued task instances', len(queued_tasks) ) for task in queued_tasks: dict_string = ( "dag_id={},task_id={},execution_date={},airflow-worker={}".format( AirflowKubernetesScheduler._make_safe_label_value(task.dag_id), AirflowKubernetesScheduler._make_safe_label_value(task.task_id), AirflowKubernetesScheduler._datetime_to_label_safe_datestring( task.execution_date ), self.worker_uuid ) ) kwargs = dict(label_selector=dict_string) pod_list = self.kube_client.list_namespaced_pod( self.kube_config.kube_namespace, **kwargs) if len(pod_list.items) == 0: self.log.info( 'TaskInstance: %s found in queued state but was not launched, ' 'rescheduling', task ) session.query(TaskInstance).filter( TaskInstance.dag_id == task.dag_id, TaskInstance.task_id == task.task_id, TaskInstance.execution_date == task.execution_date ).update({TaskInstance.state: State.NONE})
Returns the number of slots open at the moment
def open_slots(self, session): """ Returns the number of slots open at the moment """ from airflow.models.taskinstance import \ TaskInstance as TI # Avoid circular import used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter( TI.state.in_([State.RUNNING, State.QUEUED])).scalar() return self.slots - used_slots
Expands ( potentially nested ) env vars by repeatedly applying expandvars and expanduser until interpolation stops having any effect.
def expand_env_var(env_var): """ Expands (potentially nested) env vars by repeatedly applying `expandvars` and `expanduser` until interpolation stops having any effect. """ if not env_var: return env_var while True: interpolated = os.path.expanduser(os.path.expandvars(str(env_var))) if interpolated == env_var: return interpolated else: env_var = interpolated
Runs command and returns stdout
def run_command(command): """ Runs command and returns stdout """ process = subprocess.Popen( shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()] if process.returncode != 0: raise AirflowConfigException( "Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}" .format(command, process.returncode, output, stderr) ) return output
Generates a configuration from the provided template + variables defined in current scope: param template: a config content templated with {{ variables }}
def parameterized_config(template): """ Generates a configuration from the provided template + variables defined in current scope :param template: a config content templated with {{variables}} """ all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()} return template.format(**all_vars)
Remove an option if it exists in config from a file or default config. If both of config have the same option this removes the option in both configs unless remove_default = False.
def remove_option(self, section, option, remove_default=True): """ Remove an option if it exists in config from a file or default config. If both of config have the same option, this removes the option in both configs unless remove_default=False. """ if super().has_option(section, option): super().remove_option(section, option) if self.airflow_defaults.has_option(section, option) and remove_default: self.airflow_defaults.remove_option(section, option)
Returns the section as a dict. Values are converted to int float bool as required.
def getsection(self, section): """ Returns the section as a dict. Values are converted to int, float, bool as required. :param section: section from the config :rtype: dict """ if (section not in self._sections and section not in self.airflow_defaults._sections): return None _section = copy.deepcopy(self.airflow_defaults._sections[section]) if section in self._sections: _section.update(copy.deepcopy(self._sections[section])) section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper()) for env_var in sorted(os.environ.keys()): if env_var.startswith(section_prefix): key = env_var.replace(section_prefix, '').lower() _section[key] = self._get_env_var_option(section, key) for key, val in iteritems(_section): try: val = int(val) except ValueError: try: val = float(val) except ValueError: if val.lower() in ('t', 'true'): val = True elif val.lower() in ('f', 'false'): val = False _section[key] = val return _section
Returns the current configuration as an OrderedDict of OrderedDicts.: param display_source: If False the option value is returned. If True a tuple of ( option_value source ) is returned. Source is either airflow. cfg default env var or cmd.: type display_source: bool: param display_sensitive: If True the values of options set by env vars and bash commands will be displayed. If False those options are shown as < hidden >: type display_sensitive: bool: param raw: Should the values be output as interpolated values or the raw form that can be fed back in to ConfigParser: type raw: bool
def as_dict( self, display_source=False, display_sensitive=False, raw=False): """ Returns the current configuration as an OrderedDict of OrderedDicts. :param display_source: If False, the option value is returned. If True, a tuple of (option_value, source) is returned. Source is either 'airflow.cfg', 'default', 'env var', or 'cmd'. :type display_source: bool :param display_sensitive: If True, the values of options set by env vars and bash commands will be displayed. If False, those options are shown as '< hidden >' :type display_sensitive: bool :param raw: Should the values be output as interpolated values, or the "raw" form that can be fed back in to ConfigParser :type raw: bool """ cfg = {} configs = [ ('default', self.airflow_defaults), ('airflow.cfg', self), ] for (source_name, config) in configs: for section in config.sections(): sect = cfg.setdefault(section, OrderedDict()) for (k, val) in config.items(section=section, raw=raw): if display_source: val = (val, source_name) sect[k] = val # add env vars and overwrite because they have priority for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]: try: _, section, key = ev.split('__') opt = self._get_env_var_option(section, key) except ValueError: continue if not display_sensitive and ev != 'AIRFLOW__CORE__UNIT_TEST_MODE': opt = '< hidden >' elif raw: opt = opt.replace('%', '%%') if display_source: opt = (opt, 'env var') cfg.setdefault(section.lower(), OrderedDict()).update( {key.lower(): opt}) # add bash commands for (section, key) in self.as_command_stdout: opt = self._get_cmd_option(section, key) if opt: if not display_sensitive: opt = '< hidden >' if display_source: opt = (opt, 'cmd') elif raw: opt = opt.replace('%', '%%') cfg.setdefault(section, OrderedDict()).update({key: opt}) del cfg[section][key + '_cmd'] return cfg
Allocate IDs for incomplete keys.
def allocate_ids(self, partial_keys): """ Allocate IDs for incomplete keys. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds :param partial_keys: a list of partial keys. :type partial_keys: list :return: a list of full keys. :rtype: list """ conn = self.get_conn() resp = (conn .projects() .allocateIds(projectId=self.project_id, body={'keys': partial_keys}) .execute(num_retries=self.num_retries)) return resp['keys']
Begins a new transaction.
def begin_transaction(self): """ Begins a new transaction. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction :return: a transaction handle. :rtype: str """ conn = self.get_conn() resp = (conn .projects() .beginTransaction(projectId=self.project_id, body={}) .execute(num_retries=self.num_retries)) return resp['transaction']
Commit a transaction optionally creating deleting or modifying some entities.
def commit(self, body): """ Commit a transaction, optionally creating, deleting or modifying some entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit :param body: the body of the commit request. :type body: dict :return: the response body of the commit request. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .commit(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Lookup some entities by key.
def lookup(self, keys, read_consistency=None, transaction=None): """ Lookup some entities by key. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup :param keys: the keys to lookup. :type keys: list :param read_consistency: the read consistency to use. default, strong or eventual. Cannot be used with a transaction. :type read_consistency: str :param transaction: the transaction to use, if any. :type transaction: str :return: the response body of the lookup request. :rtype: dict """ conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency if transaction: body['transaction'] = transaction resp = (conn .projects() .lookup(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Roll back a transaction.
def rollback(self, transaction): """ Roll back a transaction. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback :param transaction: the transaction to roll back. :type transaction: str """ conn = self.get_conn() conn.projects().rollback( projectId=self.project_id, body={'transaction': transaction} ).execute(num_retries=self.num_retries)
Run a query for entities.
def run_query(self, body): """ Run a query for entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery :param body: the body of the query request. :type body: dict :return: the batch of query results. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .runQuery(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp['batch']
Gets the latest state of a long - running operation.
def get_operation(self, name): """ Gets the latest state of a long-running operation. .. seealso:: https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get :param name: the name of the operation resource. :type name: str :return: a resource operation instance. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .operations() .get(name=name) .execute(num_retries=self.num_retries)) return resp
Deletes the long - running operation.
def delete_operation(self, name): """ Deletes the long-running operation. .. seealso:: https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete :param name: the name of the operation resource. :type name: str :return: none if successful. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .operations() .delete(name=name) .execute(num_retries=self.num_retries)) return resp
Poll backup operation state until it s completed.
def poll_operation_until_done(self, name, polling_interval_in_seconds): """ Poll backup operation state until it's completed. :param name: the name of the operation resource :type name: str :param polling_interval_in_seconds: The number of seconds to wait before calling another request. :type polling_interval_in_seconds: int :return: a resource operation instance. :rtype: dict """ while True: result = self.get_operation(name) state = result['metadata']['common']['state'] if state == 'PROCESSING': self.log.info('Operation is processing. Re-polling state in {} seconds' .format(polling_interval_in_seconds)) time.sleep(polling_interval_in_seconds) else: return result
Export entities from Cloud Datastore to Cloud Storage for backup.
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None): """ Export entities from Cloud Datastore to Cloud Storage for backup. .. note:: Keep in mind that this requests the Admin API not the Data API. .. seealso:: https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export :param bucket: The name of the Cloud Storage bucket. :type bucket: str :param namespace: The Cloud Storage namespace path. :type namespace: str :param entity_filter: Description of what data from the project is included in the export. :type entity_filter: dict :param labels: Client-assigned labels. :type labels: dict of str :return: a resource operation instance. :rtype: dict """ admin_conn = self.get_conn() output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'outputUrlPrefix': output_uri_prefix, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .export(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Import a backup from Cloud Storage to Cloud Datastore.
def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None): """ Import a backup from Cloud Storage to Cloud Datastore. .. note:: Keep in mind that this requests the Admin API not the Data API. .. seealso:: https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import :param bucket: The name of the Cloud Storage bucket. :type bucket: str :param file: the metadata file written by the projects.export operation. :type file: str :param namespace: The Cloud Storage namespace path. :type namespace: str :param entity_filter: specify which kinds/namespaces are to be imported. :type entity_filter: dict :param labels: Client-assigned labels. :type labels: dict of str :return: a resource operation instance. :rtype: dict """ admin_conn = self.get_conn() input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'inputUrl': input_url, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .import_(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
Publish a message to a topic or an endpoint.
def publish_to_target(self, target_arn, message): """ Publish a message to a topic or an endpoint. :param target_arn: either a TopicArn or an EndpointArn :type target_arn: str :param message: the default message you want to send :param message: str """ conn = self.get_conn() messages = { 'default': message } return conn.publish( TargetArn=target_arn, Message=json.dumps(messages), MessageStructure='json' )
Fetch the hostname using the callable from the config or using socket. getfqdn as a fallback.
def get_hostname(): """ Fetch the hostname using the callable from the config or using `socket.getfqdn` as a fallback. """ # First we attempt to fetch the callable path from the config. try: callable_path = conf.get('core', 'hostname_callable') except AirflowConfigException: callable_path = None # Then we handle the case when the config is missing or empty. This is the # default behavior. if not callable_path: return socket.getfqdn() # Since we have a callable path, we try to import and run it next. module_path, attr_name = callable_path.split(':') module = importlib.import_module(module_path) callable = getattr(module, attr_name) return callable()
Retrieves connection to Cloud Natural Language service.
def get_conn(self): """ Retrieves connection to Cloud Natural Language service. :return: Cloud Natural Language service object :rtype: google.cloud.language_v1.LanguageServiceClient """ if not self._conn: self._conn = LanguageServiceClient(credentials=self._get_credentials()) return self._conn
Finds named entities in the text along with entity types salience mentions for each entity and other properties.
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None): """ Finds named entities in the text along with entity types, salience, mentions for each entity, and other properties. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or class google.cloud.language_v1.types.Document :param encoding_type: The encoding type used by the API to calculate offsets. :type encoding_type: google.cloud.language_v1.types.EncodingType :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse """ client = self.get_conn() return client.analyze_entities( document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata )
A convenience method that provides all the features that analyzeSentiment analyzeEntities and analyzeSyntax provide in one call.
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None): """ A convenience method that provides all the features that analyzeSentiment, analyzeEntities, and analyzeSyntax provide in one call. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or google.cloud.language_v1.types.Document :param features: The enabled features. If a dict is provided, it must be of the same form as the protobuf message Features :type features: dict or google.cloud.language_v1.enums.Features :param encoding_type: The encoding type used by the API to calculate offsets. :type encoding_type: google.cloud.language_v1.types.EncodingType :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnnotateTextResponse """ client = self.get_conn() return client.annotate_text( document=document, features=features, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata, )
Classifies a document into categories.
def classify_text(self, document, retry=None, timeout=None, metadata=None): """ Classifies a document into categories. :param document: Input document. If a dict is provided, it must be of the same form as the protobuf message Document :type document: dict or class google.cloud.language_v1.types.Document :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: sequence[tuple[str, str]]] :rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse """ client = self.get_conn() return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)
Return the task object identified by the given dag_id and task_id.
def get_task(dag_id, task_id): """Return the task object identified by the given dag_id and task_id.""" dagbag = DagBag() # Check DAG exists. if dag_id not in dagbag.dags: error_message = "Dag id {} not found".format(dag_id) raise DagNotFound(error_message) # Get DAG object and check Task Exists dag = dagbag.get_dag(dag_id) if not dag.has_task(task_id): error_message = 'Task {} not found in dag {}'.format(task_id, dag_id) raise TaskNotFound(error_message) # Return the task. return dag.get_task(task_id)
Gets template fields for specific operator class.
def get_template_field(env, fullname): """ Gets template fields for specific operator class. :param fullname: Full path to operator class. For example: ``airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`` :return: List of template field :rtype: list[str] """ modname, classname = fullname.rsplit(".", 1) try: with mock(env.config.autodoc_mock_imports): mod = import_module(modname) except ImportError: raise RoleException("Error loading %s module." % (modname, )) clazz = getattr(mod, classname) if not clazz: raise RoleException("Error finding %s class in %s module." % (classname, modname)) template_fields = getattr(clazz, "template_fields") if not template_fields: raise RoleException( "Could not find the template fields for %s class in %s module." % (classname, modname) ) return list(template_fields)
A role that allows you to include a list of template fields in the middle of the text. This is especially useful when writing guides describing how to use the operator. The result is a list of fields where each field is shorted in the literal block.
def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]): """ A role that allows you to include a list of template fields in the middle of the text. This is especially useful when writing guides describing how to use the operator. The result is a list of fields where each field is shorted in the literal block. Sample usage:: :template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator` For further information look at: * [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted Text Roles) """ text = utils.unescape(text) try: template_fields = get_template_field(app.env, text) except RoleException as e: msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.inline(rawtext=rawtext) for i, field in enumerate(template_fields): if i != 0: node += nodes.Text(", ") node += nodes.literal(field, "", nodes.Text(field)) return [node], []
Properly close pooled database connections
def dispose_orm(): """ Properly close pooled database connections """ log.debug("Disposing DB connection pool (PID %s)", os.getpid()) global engine global Session if Session: Session.remove() Session = None if engine: engine.dispose() engine = None
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
def prepare_classpath(): """ Ensures that certain subfolders of AIRFLOW_HOME are on the classpath """ if DAGS_FOLDER not in sys.path: sys.path.append(DAGS_FOLDER) # Add ./config/ for loading custom log parsers etc, or # airflow_local_settings etc. config_path = os.path.join(AIRFLOW_HOME, 'config') if config_path not in sys.path: sys.path.append(config_path) if PLUGINS_FOLDER not in sys.path: sys.path.append(PLUGINS_FOLDER)
Gets the returned Celery result from the Airflow task ID provided to the sensor and returns True if the celery result has been finished execution.
def _check_task_id(self, context): """ Gets the returned Celery result from the Airflow task ID provided to the sensor, and returns True if the celery result has been finished execution. :param context: Airflow's execution context :type context: dict :return: True if task has been executed, otherwise False :rtype: bool """ ti = context['ti'] celery_result = ti.xcom_pull(task_ids=self.target_task_id) return celery_result.ready()
Return true if the ticket cache contains conf information as is found in ticket caches of Kerberos 1. 8. 1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6 so we need to take an action to work around it.
def detect_conf_var(): """Return true if the ticket cache contains "conf" information as is found in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6, so we need to take an action to work around it. """ ticket_cache = configuration.conf.get('kerberos', 'ccache') with open(ticket_cache, 'rb') as f: # Note: this file is binary, so we check against a bytearray. return b'X-CACHECONF:' in f.read()
Transforms a SQLAlchemy model instance into a dictionary
def alchemy_to_dict(obj): """ Transforms a SQLAlchemy model instance into a dictionary """ if not obj: return None d = {} for c in obj.__table__.columns: value = getattr(obj, c.name) if type(value) == datetime: value = value.isoformat() d[c.name] = value return d
Yield successive chunks of a given size from a list of items
def chunks(items, chunk_size): """ Yield successive chunks of a given size from a list of items """ if chunk_size <= 0: raise ValueError('Chunk size must be a positive integer') for i in range(0, len(items), chunk_size): yield items[i:i + chunk_size]
Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0): """ Reduce the given list of items by splitting it into chunks of the given size and passing each chunk through the reducer """ if len(iterable) == 0: return initializer if chunk_size == 0: chunk_size = len(iterable) return reduce(fn, chunks(iterable, chunk_size), initializer)
Given a number of tasks builds a dependency chain.
def chain(*tasks): """ Given a number of tasks, builds a dependency chain. chain(task_1, task_2, task_3, task_4) is equivalent to task_1.set_downstream(task_2) task_2.set_downstream(task_3) task_3.set_downstream(task_4) """ for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
Returns a pretty ascii table from tuples
def pprinttable(rows): """Returns a pretty ascii table from tuples If namedtuple are used, the table will have headers """ if not rows: return if hasattr(rows[0], '_fields'): # if namedtuple headers = rows[0]._fields else: headers = ["col{}".format(i) for i in range(len(rows[0]))] lens = [len(s) for s in headers] for row in rows: for i in range(len(rows[0])): slenght = len("{}".format(row[i])) if slenght > lens[i]: lens[i] = slenght formats = [] hformats = [] for i in range(len(rows[0])): if isinstance(rows[0][i], int): formats.append("%%%dd" % lens[i]) else: formats.append("%%-%ds" % lens[i]) hformats.append("%%-%ds" % lens[i]) pattern = " | ".join(formats) hpattern = " | ".join(hformats) separator = "-+-".join(['-' * n for n in lens]) s = "" s += separator + '\n' s += (hpattern % tuple(headers)) + '\n' s += separator + '\n' def f(t): return "{}".format(t) if isinstance(t, basestring) else t for line in rows: s += pattern % tuple(f(t) for t in line) + '\n' s += separator + '\n' return s
Tries really hard to terminate all children ( including grandchildren ). Will send sig ( SIGTERM ) to the process group of pid. If any process is alive after timeout a SIGKILL will be send.
def reap_process_group(pid, log, sig=signal.SIGTERM, timeout=DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM): """ Tries really hard to terminate all children (including grandchildren). Will send sig (SIGTERM) to the process group of pid. If any process is alive after timeout a SIGKILL will be send. :param log: log handler :param pid: pid to kill :param sig: signal type :param timeout: how much time a process has to terminate """ def on_terminate(p): log.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode) if pid == os.getpid(): raise RuntimeError("I refuse to kill myself") parent = psutil.Process(pid) children = parent.children(recursive=True) children.append(parent) try: pg = os.getpgid(pid) except OSError as err: # Skip if not such process - we experience a race and it just terminated if err.errno == errno.ESRCH: return raise log.info("Sending %s to GPID %s", sig, pg) os.killpg(os.getpgid(pid), sig) gone, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.warn("process %s (%s) did not respond to SIGTERM. Trying SIGKILL", p, pid) os.killpg(os.getpgid(pid), signal.SIGKILL) gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.error("Process %s (%s) could not be killed. Giving up.", p, p.pid)
Given task instance try_number filename_template return the rendered log filename
def render_log_filename(ti, try_number, filename_template): """ Given task instance, try_number, filename_template, return the rendered log filename :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number return filename_jinja_template.render(**jinja_context) return filename_template.format(dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number)
Return the task object identified by the given dag_id and task_id.
def get_task_instance(dag_id, task_id, execution_date): """Return the task object identified by the given dag_id and task_id.""" dagbag = DagBag() # Check DAG exists. if dag_id not in dagbag.dags: error_message = "Dag id {} not found".format(dag_id) raise DagNotFound(error_message) # Get DAG object and check Task Exists dag = dagbag.get_dag(dag_id) if not dag.has_task(task_id): error_message = 'Task {} not found in dag {}'.format(task_id, dag_id) raise TaskNotFound(error_message) # Get DagRun object and check that it exists dagrun = dag.get_dagrun(execution_date=execution_date) if not dagrun: error_message = ('Dag Run for date {} not found in dag {}' .format(execution_date, dag_id)) raise DagRunNotFound(error_message) # Get task instance object and check that it exists task_instance = dagrun.get_task_instance(task_id) if not task_instance: error_message = ('Task {} instance for date {} not found' .format(task_id, execution_date)) raise TaskInstanceNotFound(error_message) return task_instance
Integrate plugins to the context
def _integrate_plugins(): """Integrate plugins to the context""" import sys from airflow.plugins_manager import operators_modules for operators_module in operators_modules: sys.modules[operators_module.__name__] = operators_module globals()[operators_module._name] = operators_module
Returns a Google Cloud Dataproc service object.
def get_conn(self): """Returns a Google Cloud Dataproc service object.""" http_authorized = self._authorize() return build( 'dataproc', self.api_version, http=http_authorized, cache_discovery=False)
Awaits for Google Cloud Dataproc Operation to complete.
def wait(self, operation): """Awaits for Google Cloud Dataproc Operation to complete.""" submitted = _DataProcOperation(self.get_conn(), operation, self.num_retries) submitted.wait_for_done()
Coerces content or all values of content if it is a dict to a string. The function will throw if content contains non - string or non - numeric types.
def _deep_string_coerce(content, json_path='json'): """ Coerces content or all values of content if it is a dict to a string. The function will throw if content contains non-string or non-numeric types. The reason why we have this function is because the ``self.json`` field must be a dict with only string values. This is because ``render_template`` will fail for numerical values. """ c = _deep_string_coerce if isinstance(content, six.string_types): return content elif isinstance(content, six.integer_types + (float,)): # Databricks can tolerate either numeric or string types in the API backend. return str(content) elif isinstance(content, (list, tuple)): return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)] elif isinstance(content, dict): return {k: c(v, '{0}[{1}]'.format(json_path, k)) for k, v in list(content.items())} else: param_type = type(content) msg = 'Type {0} used for parameter {1} is not a number or a string' \ .format(param_type, json_path) raise AirflowException(msg)
Handles the Airflow + Databricks lifecycle logic for a Databricks operator
def _handle_databricks_operator_execution(operator, hook, log, context): """ Handles the Airflow + Databricks lifecycle logic for a Databricks operator :param operator: Databricks operator being handled :param context: Airflow context """ if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id) log.info('Run submitted with run_id: %s', operator.run_id) run_page_url = hook.get_run_page_url(operator.run_id) if operator.do_xcom_push: context['ti'].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url) log.info('View run status, Spark UI, and logs at %s', run_page_url) while True: run_state = hook.get_run_state(operator.run_id) if run_state.is_terminal: if run_state.is_successful: log.info('%s completed successfully.', operator.task_id) log.info('View run status, Spark UI, and logs at %s', run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=operator.task_id, s=run_state) raise AirflowException(error_message) else: log.info('%s in run state: %s', operator.task_id, run_state) log.info('View run status, Spark UI, and logs at %s', run_page_url) log.info('Sleeping for %s seconds.', operator.polling_period_seconds) time.sleep(operator.polling_period_seconds)
Run an pig script using the pig cli
def run_cli(self, pig, verbose=True): """ Run an pig script using the pig cli >>> ph = PigCliHook() >>> result = ph.run_cli("ls /;") >>> ("hdfs://" in result) True """ with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir) as f: f.write(pig.encode('utf-8')) f.flush() fname = f.name pig_bin = 'pig' cmd_extra = [] pig_cmd = [pig_bin, '-f', fname] + cmd_extra if self.pig_properties: pig_properties_list = self.pig_properties.split() pig_cmd.extend(pig_properties_list) if verbose: self.log.info("%s", " ".join(pig_cmd)) sp = subprocess.Popen( pig_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True) self.sp = sp stdout = '' for line in iter(sp.stdout.readline, b''): stdout += line.decode('utf-8') if verbose: self.log.info(line.strip()) sp.wait() if sp.returncode: raise AirflowException(stdout) return stdout
Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool.
def fetch_celery_task_state(celery_task): """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param celery_task: a tuple of the Celery task key and the async Celery object used to fetch the task's state :type celery_task: tuple(str, celery.result.AsyncResult) :return: a tuple of the Celery task key and the Celery state of the task :rtype: tuple[str, str] """ try: with timeout(seconds=2): # Accessing state property of celery task will make actual network request # to get the current state of the task. res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
How many Celery tasks should each worker process send.
def _num_tasks_per_send_process(self, to_send_count): """ How many Celery tasks should each worker process send. :return: Number of tasks that should be sent per process :rtype: int """ return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
How many Celery tasks should be sent to each worker process.
def _num_tasks_per_fetch_process(self): """ How many Celery tasks should be sent to each worker process. :return: Number of tasks that should be used per process :rtype: int """ return max(1, int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
Like a Python builtin dict object setdefault returns the current value for a key and if it isn t there stores the default value and returns it.
def setdefault(cls, key, default, deserialize_json=False): """ Like a Python builtin dict object, setdefault returns the current value for a key, and if it isn't there, stores the default value and returns it. :param key: Dict key for this Variable :type key: str :param default: Default value to set and return if the variable isn't already in the DB :type default: Mixed :param deserialize_json: Store this as a JSON encoded value in the DB and un-encode it when retrieving a value :return: Mixed """ obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
Returns a Google MLEngine service object.
def get_conn(self): """ Returns a Google MLEngine service object. """ authed_http = self._authorize() return build('ml', 'v1', http=authed_http, cache_discovery=False)
Launches a MLEngine job and wait for it to reach a terminal state.
def create_job(self, project_id, job, use_existing_job_fn=None): """ Launches a MLEngine job and wait for it to reach a terminal state. :param project_id: The Google Cloud project id within which MLEngine job will be launched. :type project_id: str :param job: MLEngine Job object that should be provided to the MLEngine API, such as: :: { 'jobId': 'my_job_id', 'trainingInput': { 'scaleTier': 'STANDARD_1', ... } } :type job: dict :param use_existing_job_fn: In case that a MLEngine job with the same job_id already exist, this method (if provided) will decide whether we should use this existing job, continue waiting for it to finish and returning the job object. It should accepts a MLEngine job object, and returns a boolean value indicating whether it is OK to reuse the existing job. If 'use_existing_job_fn' is not provided, we by default reuse the existing MLEngine job. :type use_existing_job_fn: function :return: The MLEngine job object if the job successfully reach a terminal state (which might be FAILED or CANCELLED state). :rtype: dict """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
Gets a MLEngine job based on the job name.
def _get_job(self, project_id, job_id): """ Gets a MLEngine job based on the job name. :return: MLEngine job object if succeed. :rtype: dict Raises: googleapiclient.errors.HttpError: if HTTP error is returned from server """ job_name = 'projects/{}/jobs/{}'.format(project_id, job_id) request = self._mlengine.projects().jobs().get(name=job_name) while True: try: return request.execute() except HttpError as e: if e.resp.status == 429: # polling after 30 seconds when quota failure occurs time.sleep(30) else: self.log.error('Failed to get MLEngine job: {}'.format(e)) raise
Waits for the Job to reach a terminal state.
def _wait_for_job_done(self, project_id, job_id, interval=30): """ Waits for the Job to reach a terminal state. This method will periodically check the job state until the job reach a terminal state. Raises: googleapiclient.errors.HttpError: if HTTP error is returned when getting the job """ if interval <= 0: raise ValueError("Interval must be > 0") while True: job = self._get_job(project_id, job_id) if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']: return job time.sleep(interval)
Creates the Version on Google Cloud ML Engine.
def create_version(self, project_id, model_name, version_spec): """ Creates the Version on Google Cloud ML Engine. Returns the operation if the version was created successfully and raises an error otherwise. """ parent_name = 'projects/{}/models/{}'.format(project_id, model_name) create_request = self._mlengine.projects().models().versions().create( parent=parent_name, body=version_spec) response = create_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
Sets a version to be the default. Blocks until finished.
def set_default_version(self, project_id, model_name, version_name): """ Sets a version to be the default. Blocks until finished. """ full_version_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) request = self._mlengine.projects().models().versions().setDefault( name=full_version_name, body={}) try: response = request.execute() self.log.info('Successfully set version: %s to default', response) return response except HttpError as e: self.log.error('Something went wrong: %s', e) raise
Lists all available versions of a model. Blocks until finished.
def list_versions(self, project_id, model_name): """ Lists all available versions of a model. Blocks until finished. """ result = [] full_parent_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageSize=100) response = request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) while next_page_token is not None: next_request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageToken=next_page_token, pageSize=100) response = next_request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) time.sleep(5) return result
Deletes the given version of a model. Blocks until finished.
def delete_version(self, project_id, model_name, version_name): """ Deletes the given version of a model. Blocks until finished. """ full_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) delete_request = self._mlengine.projects().models().versions().delete( name=full_name) response = delete_request.execute() get_request = self._mlengine.projects().operations().get( name=response['name']) return _poll_with_exponential_delay( request=get_request, max_n=9, is_done_func=lambda resp: resp.get('done', False), is_error_func=lambda resp: resp.get('error', None) is not None)
Create a Model. Blocks until finished.
def create_model(self, project_id, model): """ Create a Model. Blocks until finished. """ if not model['name']: raise ValueError("Model name must be provided and " "could not be an empty string") project = 'projects/{}'.format(project_id) request = self._mlengine.projects().models().create( parent=project, body=model) return request.execute()
Gets a Model. Blocks until finished.
def get_model(self, project_id, model_name): """ Gets a Model. Blocks until finished. """ if not model_name: raise ValueError("Model name must be provided and " "it could not be an empty string") full_model_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().get(name=full_model_name) try: return request.execute() except HttpError as e: if e.resp.status == 404: self.log.error('Model was not found: %s', e) return None raise
Executes command received and stores result state in queue.: param key: the key to identify the TI: type key: tuple ( dag_id task_id execution_date ): param command: the command to execute: type command: str
def execute_work(self, key, command): """ Executes command received and stores result state in queue. :param key: the key to identify the TI :type key: tuple(dag_id, task_id, execution_date) :param command: the command to execute :type command: str """ if key is None: return self.log.info("%s running %s", self.__class__.__name__, command) try: subprocess.check_call(command, close_fds=True) state = State.SUCCESS except subprocess.CalledProcessError as e: state = State.FAILED self.log.error("Failed to execute task %s.", str(e)) # TODO: Why is this commented out? # raise e self.result_queue.put((key, state))
Write batch items to dynamodb table with provisioned throughout capacity.
def write_batch_data(self, items): """ Write batch items to dynamodb table with provisioned throughout capacity. """ dynamodb_conn = self.get_conn() try: table = dynamodb_conn.Table(self.table_name) with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch: for item in items: batch.put_item(Item=item) return True except Exception as general_error: raise AirflowException( 'Failed to insert items in dynamodb, error: {error}'.format( error=str(general_error) ) )
Integrate plugins to the context.
def _integrate_plugins(): """Integrate plugins to the context.""" from airflow.plugins_manager import executors_modules for executors_module in executors_modules: sys.modules[executors_module.__name__] = executors_module globals()[executors_module._name] = executors_module
Creates a new instance of the configured executor if none exists and returns it
def get_default_executor(): """Creates a new instance of the configured executor if none exists and returns it""" global DEFAULT_EXECUTOR if DEFAULT_EXECUTOR is not None: return DEFAULT_EXECUTOR executor_name = configuration.conf.get('core', 'EXECUTOR') DEFAULT_EXECUTOR = _get_executor(executor_name) log = LoggingMixin().log log.info("Using executor %s", executor_name) return DEFAULT_EXECUTOR
Creates a new instance of the named executor. In case the executor name is not know in airflow look for it in the plugins
def _get_executor(executor_name): """ Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
Handles error callbacks when using Segment with segment_debug_mode set to True
def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
Launches the pod synchronously and waits for completion. Args: pod ( Pod ): startup_timeout ( int ): Timeout for startup of the pod ( if pod is pending for too long considers task a failure
def run_pod(self, pod, startup_timeout=120, get_logs=True): # type: (Pod, int, bool) -> Tuple[State, Optional[str]] """ Launches the pod synchronously and waits for completion. Args: pod (Pod): startup_timeout (int): Timeout for startup of the pod (if pod is pending for too long, considers task a failure """ resp = self.run_pod_async(pod) curr_time = dt.now() if resp.status.start_time is None: while self.pod_not_started(pod): delta = dt.now() - curr_time if delta.seconds >= startup_timeout: raise AirflowException("Pod took too long to start") time.sleep(1) self.log.debug('Pod not yet started') return self._monitor_pod(pod, get_logs)
Returns a mssql connection object
def get_conn(self): """ Returns a mssql connection object """ conn = self.get_connection(self.mssql_conn_id) conn = pymssql.connect( server=conn.host, user=conn.login, password=conn.password, database=self.schema or conn.schema, port=conn.port) return conn
Call the SparkSubmitHook to run the provided spark job
def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, archives=self._archives, driver_class_path=self._driver_class_path, jars=self._jars, java_class=self._java_class, packages=self._packages, exclude_packages=self._exclude_packages, repositories=self._repositories, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, application_args=self._application_args, env_vars=self._env_vars, verbose=self._verbose, spark_binary=self._spark_binary ) self._hook.submit(self._application)
Trigger a new dag run for a Dag with an execution date of now unless specified in the data.
def trigger_dag(dag_id): """ Trigger a new dag run for a Dag with an execution date of now unless specified in the data. """ data = request.get_json(force=True) run_id = None if 'run_id' in data: run_id = data['run_id'] conf = None if 'conf' in data: conf = data['conf'] execution_date = None if 'execution_date' in data and data['execution_date'] is not None: execution_date = data['execution_date'] # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: dr = trigger.trigger_dag(dag_id, run_id, conf, execution_date) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response if getattr(g, 'user', None): _log.info("User %s created %s", g.user, dr) response = jsonify(message="Created {}".format(dr)) return response
Delete all DB records related to the specified Dag.
def delete_dag(dag_id): """ Delete all DB records related to the specified Dag. """ try: count = delete.delete_dag(dag_id) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(message="Removed {} record(s)".format(count), count=count)
Returns a list of Dag Runs for a specific DAG ID.: query param state: a query string parameter ?state = queued|running|success...: param dag_id: String identifier of a DAG: return: List of DAG runs of a DAG with requested state or all runs if the state is not specified
def dag_runs(dag_id): """ Returns a list of Dag Runs for a specific DAG ID. :query param state: a query string parameter '?state=queued|running|success...' :param dag_id: String identifier of a DAG :return: List of DAG runs of a DAG with requested state, or all runs if the state is not specified """ try: state = request.args.get('state') dagruns = get_dag_runs(dag_id, state) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = 400 return response return jsonify(dagruns)
Return python code of a given dag_id.
def get_dag_code(dag_id): """Return python code of a given dag_id.""" try: return get_code(dag_id) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response
Returns a JSON with a task s public instance variables.
def task_info(dag_id, task_id): """Returns a JSON with a task's public instance variables. """ try: info = get_task(dag_id, task_id) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response # JSONify and return. fields = {k: str(v) for k, v in vars(info).items() if not k.startswith('_')} return jsonify(fields)
( Un ) pauses a dag
def dag_paused(dag_id, paused): """(Un)pauses a dag""" DagModel = models.DagModel with create_session() as session: orm_dag = ( session.query(DagModel) .filter(DagModel.dag_id == dag_id).first() ) if paused == 'true': orm_dag.is_paused = True else: orm_dag.is_paused = False session.merge(orm_dag) session.commit() return jsonify({'response': 'ok'})
Returns a JSON with a task instance s public instance variables. The format for the exec_date is expected to be YYYY - mm - DDTHH: MM: SS for example: 2016 - 11 - 16T11: 34: 15. This will of course need to have been encoded for URL in the request.
def task_instance_info(dag_id, execution_date, task_id): """ Returns a JSON with a task instance's public instance variables. The format for the exec_date is expected to be "YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will of course need to have been encoded for URL in the request. """ # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: info = get_task_instance(dag_id, task_id, execution_date) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response # JSONify and return. fields = {k: str(v) for k, v in vars(info).items() if not k.startswith('_')} return jsonify(fields)
Returns a JSON with a dag_run s public instance variables. The format for the exec_date is expected to be YYYY - mm - DDTHH: MM: SS for example: 2016 - 11 - 16T11: 34: 15. This will of course need to have been encoded for URL in the request.
def dag_run_status(dag_id, execution_date): """ Returns a JSON with a dag_run's public instance variables. The format for the exec_date is expected to be "YYYY-mm-DDTHH:MM:SS", for example: "2016-11-16T11:34:15". This will of course need to have been encoded for URL in the request. """ # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format( execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: info = get_dag_run_state(dag_id, execution_date) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(info)
Get all pools.
def get_pools(): """Get all pools.""" try: pools = pool_api.get_pools() except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify([p.to_json() for p in pools])
Create a pool.
def create_pool(): """Create a pool.""" params = request.get_json(force=True) try: pool = pool_api.create_pool(**params) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
Delete pool.
def delete_pool(name): """Delete pool.""" try: pool = pool_api.delete_pool(name=name) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
Create a new container group
def create_or_update(self, resource_group, name, container_group): """ Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
Get the state and exitcode of a container group
def get_state_exitcode_details(self, resource_group, name): """ Get the state and exitcode of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A tuple with the state, exitcode, and details. If the exitcode is unknown 0 is returned. :rtype: tuple(state,exitcode,details) """ current_state = self._get_instance_view(resource_group, name).current_state return (current_state.state, current_state.exit_code, current_state.detail_status)