INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Get the messages of a container group
def get_messages(self, resource_group, name): """ Get the messages of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :return: A list of the event messages :rtype: list[str] """ instance_view = self._get_instance_view(resource_group, name) return [event.message for event in instance_view.events]
Get the tail from logs of a container group
def get_logs(self, resource_group, name, tail=1000): """ Get the tail from logs of a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param tail: the size of the tail :type tail: int :return: A list of log messages :rtype: list[str] """ logs = self.connection.container.list_logs(resource_group, name, name, tail=tail) return logs.content.splitlines(True)
Delete a container group
def delete(self, resource_group, name): """ Delete a container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ self.connection.container_groups.delete(resource_group, name)
Test if a container group exists
def exists(self, resource_group, name): """ Test if a container group exists :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str """ for container in self.connection.container_groups.list_by_resource_group(resource_group): if container.name == name: return True return False
Function decorator that Looks for an argument named default_args and fills the unspecified arguments from it.
def apply_defaults(func): """ Function decorator that Looks for an argument named "default_args", and fills the unspecified arguments from it. Since python2.* isn't clear about which arguments are missing when calling a function, and that this can be quite confusing with multi-level inheritance and argument defaults, this decorator also alerts with specific information about the missing arguments. """ # Cache inspect.signature for the wrapper closure to avoid calling it # at every decorated invocation. This is separate sig_cache created # per decoration, i.e. each function decorated using apply_defaults will # have a different sig_cache. sig_cache = signature(func) non_optional_args = { name for (name, param) in sig_cache.parameters.items() if param.default == param.empty and param.name != 'self' and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)} @wraps(func) def wrapper(*args, **kwargs): if len(args) > 1: raise AirflowException( "Use keyword arguments when initializing operators") dag_args = {} dag_params = {} dag = kwargs.get('dag', None) or settings.CONTEXT_MANAGER_DAG if dag: dag_args = copy(dag.default_args) or {} dag_params = copy(dag.params) or {} params = {} if 'params' in kwargs: params = kwargs['params'] dag_params.update(params) default_args = {} if 'default_args' in kwargs: default_args = kwargs['default_args'] if 'params' in default_args: dag_params.update(default_args['params']) del default_args['params'] dag_args.update(default_args) default_args = dag_args for arg in sig_cache.parameters: if arg not in kwargs and arg in default_args: kwargs[arg] = default_args[arg] missing_args = list(non_optional_args - set(kwargs)) if missing_args: msg = "Argument {0} is required".format(missing_args) raise AirflowException(msg) kwargs['params'] = dag_params result = func(*args, **kwargs) return result return wrapper
Builds an ingest query for an HDFS TSV load.
def construct_ingest_query(self, static_path, columns): """ Builds an ingest query for an HDFS TSV load. :param static_path: The path on hdfs where the data is :type static_path: str :param columns: List of all the columns that are available :type columns: list """ # backward compatibility for num_shards, # but target_partition_size is the default setting # and overwrites the num_shards num_shards = self.num_shards target_partition_size = self.target_partition_size if self.target_partition_size == -1: if self.num_shards == -1: target_partition_size = DEFAULT_TARGET_PARTITION_SIZE else: num_shards = -1 metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count'] # Take all the columns, which are not the time dimension # or a metric, as the dimension columns dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim] ingest_query_dict = { "type": "index_hadoop", "spec": { "dataSchema": { "metricsSpec": self.metric_spec, "granularitySpec": { "queryGranularity": self.query_granularity, "intervals": self.intervals, "type": "uniform", "segmentGranularity": self.segment_granularity, }, "parser": { "type": "string", "parseSpec": { "columns": columns, "dimensionsSpec": { "dimensionExclusions": [], "dimensions": dimensions, # list of names "spatialDimensions": [] }, "timestampSpec": { "column": self.ts_dim, "format": "auto" }, "format": "tsv" } }, "dataSource": self.druid_datasource }, "tuningConfig": { "type": "hadoop", "jobProperties": { "mapreduce.job.user.classpath.first": "false", "mapreduce.map.output.compress": "false", "mapreduce.output.fileoutputformat.compress": "false", }, "partitionsSpec": { "type": "hashed", "targetPartitionSize": target_partition_size, "numShards": num_shards, }, }, "ioConfig": { "inputSpec": { "paths": static_path, "type": "static" }, "type": "hadoop" } } } if self.job_properties: ingest_query_dict['spec']['tuningConfig']['jobProperties'] \ .update(self.job_properties) if self.hadoop_dependency_coordinates: ingest_query_dict['hadoopDependencyCoordinates'] \ = self.hadoop_dependency_coordinates return ingest_query_dict
This function executes the transfer from the email server ( via imap ) into s3.
def execute(self, context): """ This function executes the transfer from the email server (via imap) into s3. :param context: The context while executing. :type context: dict """ self.log.info( 'Transferring mail attachment %s from mail server via imap to s3 key %s...', self.imap_attachment_name, self.s3_key ) with ImapHook(imap_conn_id=self.imap_conn_id) as imap_hook: imap_mail_attachments = imap_hook.retrieve_mail_attachments( name=self.imap_attachment_name, mail_folder=self.imap_mail_folder, check_regex=self.imap_check_regex, latest_only=True ) s3_hook = S3Hook(aws_conn_id=self.s3_conn_id) s3_hook.load_bytes(bytes_data=imap_mail_attachments[0][1], key=self.s3_key)
Check for message on subscribed channels and write to xcom the message with key message
def poke(self, context): """ Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not """ self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels) message = self.pubsub.get_message() self.log.info('Message %s from channel %s', message, self.channels) # Process only message types if message and message['type'] == 'message': context['ti'].xcom_push(key='message', value=message) self.pubsub.unsubscribe(self.channels) return True return False
Reloads the current dagrun from the database: param session: database session
def refresh_from_db(self, session=None): """ Reloads the current dagrun from the database :param session: database session """ DR = DagRun exec_date = func.cast(self.execution_date, DateTime) dr = session.query(DR).filter( DR.dag_id == self.dag_id, func.cast(DR.execution_date, DateTime) == exec_date, DR.run_id == self.run_id ).one() self.id = dr.id self.state = dr.state
Returns a set of dag runs for the given search criteria.
def find(dag_id=None, run_id=None, execution_date=None, state=None, external_trigger=None, no_backfills=False, session=None): """ Returns a set of dag runs for the given search criteria. :param dag_id: the dag_id to find dag runs for :type dag_id: int, list :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param no_backfills: return no backfills (True), return all (False). Defaults to False :type no_backfills: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ DR = DagRun qry = session.query(DR) if dag_id: qry = qry.filter(DR.dag_id == dag_id) if run_id: qry = qry.filter(DR.run_id == run_id) if execution_date: if isinstance(execution_date, list): qry = qry.filter(DR.execution_date.in_(execution_date)) else: qry = qry.filter(DR.execution_date == execution_date) if state: qry = qry.filter(DR.state == state) if external_trigger is not None: qry = qry.filter(DR.external_trigger == external_trigger) if no_backfills: # in order to prevent a circular dependency from airflow.jobs import BackfillJob qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%')) dr = qry.order_by(DR.execution_date).all() return dr
Returns the task instances for this dag run
def get_task_instances(self, state=None, session=None): """ Returns the task instances for this dag run """ from airflow.models.taskinstance import TaskInstance # Avoid circular import tis = session.query(TaskInstance).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.execution_date == self.execution_date, ) if state: if isinstance(state, six.string_types): tis = tis.filter(TaskInstance.state == state) else: # this is required to deal with NULL values if None in state: tis = tis.filter( or_(TaskInstance.state.in_(state), TaskInstance.state.is_(None)) ) else: tis = tis.filter(TaskInstance.state.in_(state)) if self.dag and self.dag.partial: tis = tis.filter(TaskInstance.task_id.in_(self.dag.task_ids)) return tis.all()
Returns the task instance specified by task_id for this dag run
def get_task_instance(self, task_id, session=None): """ Returns the task instance specified by task_id for this dag run :param task_id: the task id """ from airflow.models.taskinstance import TaskInstance # Avoid circular import TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.execution_date == self.execution_date, TI.task_id == task_id ).first() return ti
The previous DagRun if there is one
def get_previous_dagrun(self, session=None): """The previous DagRun, if there is one""" return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date < self.execution_date ).order_by( DagRun.execution_date.desc() ).first()
The previous SCHEDULED DagRun if there is one
def get_previous_scheduled_dagrun(self, session=None): """The previous, SCHEDULED DagRun, if there is one""" dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first()
Determines the overall state of the DagRun based on the state of its TaskInstances.
def update_state(self, session=None): """ Determines the overall state of the DagRun based on the state of its TaskInstances. :return: State """ dag = self.get_dag() tis = self.get_task_instances(session=session) self.log.debug("Updating state for %s considering %s task(s)", self, len(tis)) for ti in list(tis): # skip in db? if ti.state == State.REMOVED: tis.remove(ti) else: ti.task = dag.get_task(ti.task_id) # pre-calculate # db is faster start_dttm = timezone.utcnow() unfinished_tasks = self.get_task_instances( state=State.unfinished(), session=session ) none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) # small speed up if unfinished_tasks and none_depends_on_past and none_task_concurrency: # todo: this can actually get pretty slow: one task costs between 0.01-015s no_dependencies_met = True for ut in unfinished_tasks: # We need to flag upstream and check for changes because upstream # failures/re-schedules can result in deadlock false positives old_state = ut.state deps_met = ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True), session=session) if deps_met or old_state != ut.current_state(session=session): no_dependencies_met = False break duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000 Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) root_ids = [t.task_id for t in dag.roots] roots = [t for t in tis if t.task_id in root_ids] # if all roots finished and at least one failed, the run failed if (not unfinished_tasks and any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) # if all roots succeeded and no unfinished tasks, the run succeeded elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED) for r in roots): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) # if *all tasks* are deadlocked, the run failed elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and no_dependencies_met): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) # finally, if the roots aren't done, the dag is still running else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() # todo: determine we want to use with_for_update to make sure to lock the run session.merge(self) session.commit() return self.state
Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required.
def verify_integrity(self, session=None): """ Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required. """ from airflow.models.taskinstance import TaskInstance # Avoid circular import dag = self.get_dag() tis = self.get_task_instances(session=session) # check for removed or restored tasks task_ids = [] for ti in tis: task_ids.append(ti.task_id) task = None try: task = dag.get_task(ti.task_id) except AirflowException: if ti.state == State.REMOVED: pass # ti has already been removed, just ignore it elif self.state is not State.RUNNING and not dag.partial: self.log.warning("Failed to get task '{}' for dag '{}'. " "Marking it as removed.".format(ti, dag)) Stats.incr( "task_removed_from_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.REMOVED is_task_in_dag = task is not None should_restore_task = is_task_in_dag and ti.state == State.REMOVED if should_restore_task: self.log.info("Restoring task '{}' which was previously " "removed from DAG '{}'".format(ti, dag)) Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.NONE # check for missing tasks for task in six.itervalues(dag.task_dict): if task.start_date > self.execution_date and not self.is_backfill: continue if task.task_id not in task_ids: Stats.incr( "task_instance_created-{}".format(task.__class__.__name__), 1, 1) ti = TaskInstance(task, self.execution_date) session.add(ti) session.commit()
: param dag_id: DAG ID: type dag_id: unicode: param execution_date: execution date: type execution_date: datetime: return: DagRun corresponding to the given dag_id and execution date if one exists. None otherwise.: rtype: airflow. models. DagRun
def get_run(session, dag_id, execution_date): """ :param dag_id: DAG ID :type dag_id: unicode :param execution_date: execution date :type execution_date: datetime :return: DagRun corresponding to the given dag_id and execution date if one exists. None otherwise. :rtype: airflow.models.DagRun """ qry = session.query(DagRun).filter( DagRun.dag_id == dag_id, DagRun.external_trigger == False, # noqa DagRun.execution_date == execution_date, ) return qry.first()
We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python - jenkins library with just the return call changed
def jenkins_request_with_headers(jenkins_server, req): """ We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python-jenkins library with just the return call changed :param jenkins_server: The server to query :param req: The request to execute :return: Dict containing the response body (key body) and the headers coming along (headers) """ try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( "Error communicating with server[%s]: " "empty response" % jenkins_server.server) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: # Jenkins's funky authentication means its nigh impossible to # distinguish errors. if e.code in [401, 403, 500]: # six.moves.urllib.error.HTTPError provides a 'reason' # attribute for all python version except for ver 2.6 # Falling back to HTTPError.msg since it contains the # same info as reason raise JenkinsException( 'Error in request. ' + 'Possibly authentication failed [%s]: %s' % ( e.code, e.msg) ) elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException('Error in request: %s' % e) except URLError as e: # python 2.6 compatibility to ensure same exception raised # since URLError wraps a socket timeout on python 2.6. if str(e.reason) == "timed out": raise jenkins.TimeoutException('Error in request: %s' % e.reason) raise JenkinsException('Error in request: %s' % e.reason)
This function makes an API call to Jenkins to trigger a build for job_name It returned a dict with 2 keys: body and headers. headers contains also a dict - like object which can be queried to get the location to poll in the queue.
def build_job(self, jenkins_server): """ This function makes an API call to Jenkins to trigger a build for 'job_name' It returned a dict with 2 keys : body and headers. headers contains also a dict-like object which can be queried to get the location to poll in the queue. :param jenkins_server: The jenkins server where the job should be triggered :return: Dict containing the response body (key body) and the headers coming along (headers) """ # Warning if the parameter is too long, the URL can be longer than # the maximum allowed size if self.parameters and isinstance(self.parameters, six.string_types): import ast self.parameters = ast.literal_eval(self.parameters) if not self.parameters: # We need a None to call the non parametrized jenkins api end point self.parameters = None request = Request(jenkins_server.build_job_url(self.job_name, self.parameters, None)) return jenkins_request_with_headers(jenkins_server, request)
This method poll the jenkins queue until the job is executed. When we trigger a job through an API call the job is first put in the queue without having a build number assigned. Thus we have to wait the job exit the queue to know its build number. To do so we have to add/ api/ json ( or/ api/ xml ) to the location returned by the build_job call and poll this file. When a executable block appears in the json it means the job execution started and the field number then contains the build number.
def poll_job_in_queue(self, location, jenkins_server): """ This method poll the jenkins queue until the job is executed. When we trigger a job through an API call, the job is first put in the queue without having a build number assigned. Thus we have to wait the job exit the queue to know its build number. To do so, we have to add /api/json (or /api/xml) to the location returned by the build_job call and poll this file. When a 'executable' block appears in the json, it means the job execution started and the field 'number' then contains the build number. :param location: Location to poll, returned in the header of the build_job call :param jenkins_server: The jenkins server to poll :return: The build_number corresponding to the triggered job """ try_count = 0 location = location + '/api/json' # TODO Use get_queue_info instead # once it will be available in python-jenkins (v > 0.4.15) self.log.info('Polling jenkins queue at the url %s', location) while try_count < self.max_try_before_job_appears: location_answer = jenkins_request_with_headers(jenkins_server, Request(location)) if location_answer is not None: json_response = json.loads(location_answer['body']) if 'executable' in json_response: build_number = json_response['executable']['number'] self.log.info('Job executed on Jenkins side with the build number %s', build_number) return build_number try_count += 1 time.sleep(self.sleep_time) raise AirflowException("The job hasn't been executed" " after polling the queue %d times", self.max_try_before_job_appears)
Given a context this function provides a dictionary of values that can be used to externally reconstruct relations between dags dag_runs tasks and task_instances. Default to abc. def. ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True.
def context_to_airflow_vars(context, in_env_var_format=False): """ Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :type context: dict :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :type in_env_var_format: bool :return: task_instance context as dict. """ params = dict() if in_env_var_format: name_format = 'env_var_format' else: name_format = 'default' task_instance = context.get('task_instance') if task_instance and task_instance.dag_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID'][ name_format]] = task_instance.dag_id if task_instance and task_instance.task_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID'][ name_format]] = task_instance.task_id if task_instance and task_instance.execution_date: params[ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ name_format]] = task_instance.execution_date.isoformat() dag_run = context.get('dag_run') if dag_run and dag_run.run_id: params[AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ name_format]] = dag_run.run_id return params
Calls callbacks before execution. Note that any exception from callback will be logged but won t be propagated.: param kwargs:: return: None
def on_pre_execution(**kwargs): """ Calls callbacks before execution. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None """ logging.debug("Calling callbacks: %s", __pre_exec_callbacks) for cb in __pre_exec_callbacks: try: cb(**kwargs) except Exception: logging.exception('Failed on pre-execution callback using %s', cb)
Calls callbacks after execution. As it s being called after execution it can capture status of execution duration etc. Note that any exception from callback will be logged but won t be propagated.: param kwargs:: return: None
def on_post_execution(**kwargs): """ Calls callbacks after execution. As it's being called after execution, it can capture status of execution, duration, etc. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None """ logging.debug("Calling callbacks: %s", __post_exec_callbacks) for cb in __post_exec_callbacks: try: cb(**kwargs) except Exception: logging.exception('Failed on post-execution callback using %s', cb)
This function decides whether or not to Trigger the remote DAG
def conditionally_trigger(context, dag_run_obj): """This function decides whether or not to Trigger the remote DAG""" c_p = context['params']['condition_param'] print("Controller DAG : conditionally_trigger = {}".format(c_p)) if context['params']['condition_param']: dag_run_obj.payload = {'message': context['params']['message']} pp.pprint(dag_run_obj.payload) return dag_run_obj
Sends a single datapoint metric to DataDog
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None): """ Sends a single datapoint metric to DataDog :param metric_name: The name of the metric :type metric_name: str :param datapoint: A single integer or float related to the metric :type datapoint: int or float :param tags: A list of tags associated with the metric :type tags: list :param type_: Type of your metric: gauge, rate, or count :type type_: str :param interval: If the type of the metric is rate or count, define the corresponding interval :type interval: int """ response = api.Metric.send( metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval) self.validate_response(response) return response
Queries datadog for a specific metric potentially with some function applied to it and returns the results.
def query_metric(self, query, from_seconds_ago, to_seconds_ago): """ Queries datadog for a specific metric, potentially with some function applied to it and returns the results. :param query: The datadog query to execute (see datadog docs) :type query: str :param from_seconds_ago: How many seconds ago to start querying for. :type from_seconds_ago: int :param to_seconds_ago: Up to how many seconds ago to query for. :type to_seconds_ago: int """ now = int(time.time()) response = api.Metric.query( start=now - from_seconds_ago, end=now - to_seconds_ago, query=query) self.validate_response(response) return response
Posts an event to datadog ( processing finished potentially alerts other issues ) Think about this as a means to maintain persistence of alerts rather than alerting itself.
def post_event(self, title, text, aggregation_key=None, alert_type=None, date_happened=None, handle=None, priority=None, related_event_id=None, tags=None, device_name=None): """ Posts an event to datadog (processing finished, potentially alerts, other issues) Think about this as a means to maintain persistence of alerts, rather than alerting itself. :param title: The title of the event :type title: str :param text: The body of the event (more information) :type text: str :param aggregation_key: Key that can be used to aggregate this event in a stream :type aggregation_key: str :param alert_type: The alert type for the event, one of ["error", "warning", "info", "success"] :type alert_type: str :param date_happened: POSIX timestamp of the event; defaults to now :type date_happened: int :handle: User to post the event as; defaults to owner of the application key used to submit. :param handle: str :param priority: Priority to post the event as. ("normal" or "low", defaults to "normal") :type priority: str :param related_event_id: Post event as a child of the given event :type related_event_id: id :param tags: List of tags to apply to the event :type tags: list[str] :param device_name: device_name to post the event with :type device_name: list """ response = api.Event.create( title=title, text=text, aggregation_key=aggregation_key, alert_type=alert_type, date_happened=date_happened, handle=handle, priority=priority, related_event_id=related_event_id, tags=tags, host=self.host, device_name=device_name, source_type_name=self.source_type_name) self.validate_response(response) return response
Given either a manually set token or a conn_id return the webhook_token to use: param token: The manually provided token: type token: str: param http_conn_id: The conn_id provided: type http_conn_id: str: return: webhook_token ( str ) to use
def _get_token(self, token, http_conn_id): """ Given either a manually set token or a conn_id, return the webhook_token to use :param token: The manually provided token :type token: str :param http_conn_id: The conn_id provided :type http_conn_id: str :return: webhook_token (str) to use """ if token: return token elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson return extra.get('webhook_token', '') else: raise AirflowException('Cannot get token: No valid Slack ' 'webhook token nor conn_id supplied')
Construct the Slack message. All relevant parameters are combined here to a valid Slack json message: return: Slack message ( str ) to send
def _build_slack_message(self): """ Construct the Slack message. All relevant parameters are combined here to a valid Slack json message :return: Slack message (str) to send """ cmd = {} if self.channel: cmd['channel'] = self.channel if self.username: cmd['username'] = self.username if self.icon_emoji: cmd['icon_emoji'] = self.icon_emoji if self.link_names: cmd['link_names'] = 1 if self.attachments: cmd['attachments'] = self.attachments cmd['text'] = self.message return json.dumps(cmd)
Remote Popen ( actually execute the slack webhook call )
def execute(self): """ Remote Popen (actually execute the slack webhook call) """ proxies = {} if self.proxy: # we only need https proxy for Slack, as the endpoint is https proxies = {'https': self.proxy} slack_message = self._build_slack_message() self.run(endpoint=self.webhook_token, data=slack_message, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
Gets the DAG out of the dictionary and refreshes it if expired
def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ from airflow.models.dag import DagModel # Avoid circular import # If asking for a known subdag, we want to refresh the parent root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id # If the dag corresponding to root_dag_id is absent or expired orm_dag = DagModel.get_current(root_dag_id) if orm_dag and ( root_dag_id not in self.dags or ( orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired ) ): # Reprocess source file found_dags = self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) # If the source file no longer exports `dag_id`, delete it from self.dags if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
Given a path to a python module or zip file this method imports the module and look for dag objects within it.
def process_file(self, filepath, only_if_updated=True, safe_mode=True): """ Given a path to a python module or zip file, this method imports the module and look for dag objects within it. """ from airflow.models.dag import DAG # Avoid circular import found_dags = [] # if the source file no longer exists in the DB or in the filesystem, # return an empty list # todo: raise exception? if filepath is None or not os.path.isfile(filepath): return found_dags try: # This failed before in what may have been a git sync # race condition file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath)) if only_if_updated \ and filepath in self.file_last_changed \ and file_last_changed_on_disk == self.file_last_changed[filepath]: return found_dags except Exception as e: self.log.exception(e) return found_dags mods = [] is_zipfile = zipfile.is_zipfile(filepath) if not is_zipfile: if safe_mode: with open(filepath, 'rb') as f: content = f.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[filepath] = file_last_changed_on_disk # Don't want to spam user with skip messages if not self.has_logged: self.has_logged = True self.log.info( "File %s assumed to contain no DAGs. Skipping.", filepath) return found_dags self.log.debug("Importing %s", filepath) org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1]) mod_name = ('unusual_prefix_' + hashlib.sha1(filepath.encode('utf-8')).hexdigest() + '_' + org_mod_name) if mod_name in sys.modules: del sys.modules[mod_name] with timeout(configuration.conf.getint('core', "DAGBAG_IMPORT_TIMEOUT")): try: m = imp.load_source(mod_name, filepath) mods.append(m) except Exception as e: self.log.exception("Failed to import: %s", filepath) self.import_errors[filepath] = str(e) self.file_last_changed[filepath] = file_last_changed_on_disk else: zip_file = zipfile.ZipFile(filepath) for mod in zip_file.infolist(): head, _ = os.path.split(mod.filename) mod_name, ext = os.path.splitext(mod.filename) if not head and (ext == '.py' or ext == '.pyc'): if mod_name == '__init__': self.log.warning("Found __init__.%s at root of %s", ext, filepath) if safe_mode: with zip_file.open(mod.filename) as zf: self.log.debug("Reading %s from %s", mod.filename, filepath) content = zf.read() if not all([s in content for s in (b'DAG', b'airflow')]): self.file_last_changed[filepath] = ( file_last_changed_on_disk) # todo: create ignore list # Don't want to spam user with skip messages if not self.has_logged: self.has_logged = True self.log.info( "File %s assumed to contain no DAGs. Skipping.", filepath) if mod_name in sys.modules: del sys.modules[mod_name] try: sys.path.insert(0, filepath) m = importlib.import_module(mod_name) mods.append(m) except Exception as e: self.log.exception("Failed to import: %s", filepath) self.import_errors[filepath] = str(e) self.file_last_changed[filepath] = file_last_changed_on_disk for m in mods: for dag in list(m.__dict__.values()): if isinstance(dag, DAG): if not dag.full_filepath: dag.full_filepath = filepath if dag.fileloc != filepath and not is_zipfile: dag.fileloc = filepath try: dag.is_subdag = False self.bag_dag(dag, parent_dag=dag, root_dag=dag) if isinstance(dag._schedule_interval, six.string_types): croniter(dag._schedule_interval) found_dags.append(dag) found_dags += dag.subdags except (CroniterBadCronError, CroniterBadDateError, CroniterNotAlphaError) as cron_e: self.log.exception("Failed to bag_dag: %s", dag.full_filepath) self.import_errors[dag.full_filepath] = \ "Invalid Cron expression: " + str(cron_e) self.file_last_changed[dag.full_filepath] = \ file_last_changed_on_disk except AirflowDagCycleException as cycle_exception: self.log.exception("Failed to bag_dag: %s", dag.full_filepath) self.import_errors[dag.full_filepath] = str(cycle_exception) self.file_last_changed[dag.full_filepath] = \ file_last_changed_on_disk self.file_last_changed[filepath] = file_last_changed_on_disk return found_dags
Fail given zombie tasks which are tasks that haven t had a heartbeat for too long in the current DagBag.
def kill_zombies(self, zombies, session=None): """ Fail given zombie tasks, which are tasks that haven't had a heartbeat for too long, in the current DagBag. :param zombies: zombie task instances to kill. :type zombies: airflow.utils.dag_processing.SimpleTaskInstance :param session: DB session. :type session: sqlalchemy.orm.session.Session """ from airflow.models.taskinstance import TaskInstance # Avoid circular import for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) # Get properties needed for failure handling from SimpleTaskInstance. ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
Adds the DAG into the bag recurses into sub dags. Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
def bag_dag(self, dag, parent_dag, root_dag): """ Adds the DAG into the bag, recurses into sub dags. Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags """ dag.test_cycle() # throws if a task cycle is found dag.resolve_template_files() dag.last_loaded = timezone.utcnow() for task in dag.tasks: settings.policy(task) subdags = dag.subdags try: for subdag in subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.dags[dag.dag_id] = dag self.log.debug('Loaded DAG %s', dag) except AirflowDagCycleException as cycle_exception: # There was an error in bagging the dag. Remove it from the list of dags self.log.exception('Exception bagging dag: %s', dag.dag_id) # Only necessary at the root level since DAG.subdags automatically # performs DFS to search through all subdags if dag == root_dag: for subdag in subdags: if subdag.dag_id in self.dags: del self.dags[subdag.dag_id] raise cycle_exception
Given a file path or a folder this method looks for python modules imports them and adds them to the dagbag collection.
def collect_dags( self, dag_folder=None, only_if_updated=True, include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'), safe_mode=configuration.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')): """ Given a file path or a folder, this method looks for python modules, imports them and adds them to the dagbag collection. Note that if a ``.airflowignore`` file is found while processing the directory, it will behave much like a ``.gitignore``, ignoring files that match any of the regex patterns specified in the file. **Note**: The patterns in .airflowignore are treated as un-anchored regexes, not shell-like glob patterns. """ start_dttm = timezone.utcnow() dag_folder = dag_folder or self.dag_folder # Used to store stats around DagBag processing stats = [] FileLoadStat = namedtuple( 'FileLoadStat', "file duration dag_num task_num dags") dag_folder = correct_maybe_zipped(dag_folder) for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode, include_examples=include_examples): try: ts = timezone.utcnow() found_dags = self.process_file( filepath, only_if_updated=only_if_updated, safe_mode=safe_mode) td = timezone.utcnow() - ts td = td.total_seconds() + ( float(td.microseconds) / 1000000) stats.append(FileLoadStat( filepath.replace(dag_folder, ''), td, len(found_dags), sum([len(dag.tasks) for dag in found_dags]), str([dag.dag_id for dag in found_dags]), )) except Exception as e: self.log.exception(e) Stats.gauge( 'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1) Stats.gauge( 'dagbag_size', len(self.dags), 1) Stats.gauge( 'dagbag_import_errors', len(self.import_errors), 1) self.dagbag_stats = sorted( stats, key=lambda x: x.duration, reverse=True)
Prints a report around DagBag loading stats
def dagbag_report(self): """Prints a report around DagBag loading stats""" report = textwrap.dedent("""\n ------------------------------------------------------------------- DagBag loading stats for {dag_folder} ------------------------------------------------------------------- Number of DAGs: {dag_num} Total task number: {task_num} DagBag parsing time: {duration} {table} """) stats = self.dagbag_stats return report.format( dag_folder=self.dag_folder, duration=sum([o.duration for o in stats]), dag_num=sum([o.dag_num for o in stats]), task_num=sum([o.task_num for o in stats]), table=pprinttable(stats), )
Call the SparkSubmitHook to run the provided spark job
def execute(self, context): """ Call the SparkSubmitHook to run the provided spark job """ self._hook = SparkJDBCHook( spark_app_name=self._spark_app_name, spark_conn_id=self._spark_conn_id, spark_conf=self._spark_conf, spark_py_files=self._spark_py_files, spark_files=self._spark_files, spark_jars=self._spark_jars, num_executors=self._num_executors, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, verbose=self._verbose, keytab=self._keytab, principal=self._principal, cmd_type=self._cmd_type, jdbc_table=self._jdbc_table, jdbc_conn_id=self._jdbc_conn_id, jdbc_driver=self._jdbc_driver, metastore_table=self._metastore_table, jdbc_truncate=self._jdbc_truncate, save_mode=self._save_mode, save_format=self._save_format, batch_size=self._batch_size, fetch_size=self._fetch_size, num_partitions=self._num_partitions, partition_column=self._partition_column, lower_bound=self._lower_bound, upper_bound=self._upper_bound, create_table_column_types=self._create_table_column_types ) self._hook.submit_jdbc_job()
Add or subtract days from a YYYY - MM - DD
def ds_add(ds, days): """ Add or subtract days from a YYYY-MM-DD :param ds: anchor date in ``YYYY-MM-DD`` format to add to :type ds: str :param days: number of days to add to the ds, you can use negative values :type days: int >>> ds_add('2015-01-01', 5) '2015-01-06' >>> ds_add('2015-01-06', -5) '2015-01-01' """ ds = datetime.strptime(ds, '%Y-%m-%d') if days: ds = ds + timedelta(days) return ds.isoformat()[:10]
Takes an input string and outputs another string as specified in the output format
def ds_format(ds, input_format, output_format): """ Takes an input string and outputs another string as specified in the output format :param ds: input string which contains a date :type ds: str :param input_format: input string format. E.g. %Y-%m-%d :type input_format: str :param output_format: output string format E.g. %Y-%m-%d :type output_format: str >>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y") '01-01-15' >>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d") '2015-01-05' """ return datetime.strptime(ds, input_format).strftime(output_format)
Integrate plugins to the context
def _integrate_plugins(): """Integrate plugins to the context""" import sys from airflow.plugins_manager import macros_modules for macros_module in macros_modules: sys.modules[macros_module.__name__] = macros_module globals()[macros_module._name] = macros_module
poke matching files in a directory with self. regex
def poke(self, context): """ poke matching files in a directory with self.regex :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() self.log.info( 'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern ) result = [f for f in sb.ls([self.filepath], include_toplevel=False) if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) return bool(result)
poke for a non empty directory
def poke(self, context): """ poke for a non empty directory :return: Bool depending on the search criteria """ sb = self.hook(self.hdfs_conn_id).get_conn() result = [f for f in sb.ls([self.filepath], include_toplevel=True)] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) if self.be_empty: self.log.info('Poking for filepath %s to a empty directory', self.filepath) return len(result) == 1 and result[0]['path'] == self.filepath else: self.log.info('Poking for filepath %s to a non empty directory', self.filepath) result.pop(0) return bool(result) and result[0]['file_type'] == 'f'
Clears a set of task instances but makes sure the running ones get killed.
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None, ): """ Clears a set of task instances, but makes sure the running ones get killed. :param tis: a list of task instances :param session: current session :param activate_dag_runs: flag to check for active dag run :param dag: DAG object """ job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) else: task_id = ti.task_id if dag and dag.has_task(task_id): task = dag.get_task(task_id) task_retries = task.retries ti.max_tries = ti.try_number + task_retries - 1 else: # Ignore errors when updating max_tries if dag is None or # task not found in dag since database records could be # outdated. We make max_tries the maximum value of its # original max_tries or the current task try number. ti.max_tries = max(ti.max_tries, ti.try_number - 1) ti.state = State.NONE session.merge(ti) if job_ids: from airflow.jobs import BaseJob as BJ for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN if activate_dag_runs and tis: from airflow.models.dagrun import DagRun # Avoid circular import drs = session.query(DagRun).filter( DagRun.dag_id.in_({ti.dag_id for ti in tis}), DagRun.execution_date.in_({ti.execution_date for ti in tis}), ).all() for dr in drs: dr.state = State.RUNNING dr.start_date = timezone.utcnow()
Return the try number that this task number will be when it is actually run.
def try_number(self): """ Return the try number that this task number will be when it is actually run. If the TI is currently running, this will match the column in the databse, in all othercases this will be incremenetd """ # This is designed so that task logs end up in the right file. if self.state == State.RUNNING: return self._try_number return self._try_number + 1
Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator.
def command( self, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, raw=False, job_id=None, pool=None, cfg_path=None): """ Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator. """ return " ".join(self.command_as_list( mark_success=mark_success, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state, local=local, pickle_id=pickle_id, raw=raw, job_id=job_id, pool=pool, cfg_path=cfg_path))
Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator.
def command_as_list( self, mark_success=False, ignore_all_deps=False, ignore_task_deps=False, ignore_depends_on_past=False, ignore_ti_state=False, local=False, pickle_id=None, raw=False, job_id=None, pool=None, cfg_path=None): """ Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator. """ dag = self.task.dag should_pass_filepath = not pickle_id and dag if should_pass_filepath and dag.full_filepath != dag.filepath: path = "DAGS_FOLDER/{}".format(dag.filepath) elif should_pass_filepath and dag.full_filepath: path = dag.full_filepath else: path = None return TaskInstance.generate_command( self.dag_id, self.task_id, self.execution_date, mark_success=mark_success, ignore_all_deps=ignore_all_deps, ignore_task_deps=ignore_task_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_ti_state=ignore_ti_state, local=local, pickle_id=pickle_id, file_path=path, raw=raw, job_id=job_id, pool=pool, cfg_path=cfg_path)
Generates the shell command required to execute this task instance.
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): """ Generates the shell command required to execute this task instance. :param dag_id: DAG ID :type dag_id: unicode :param task_id: Task ID :type task_id: unicode :param execution_date: Execution date for the task :type execution_date: datetime :param mark_success: Whether to mark the task as successful :type mark_success: bool :param ignore_all_deps: Ignore all ignorable dependencies. Overrides the other ignore_* parameters. :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :type ignore_depends_on_past: bool :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :type ignore_task_deps: bool :param ignore_ti_state: Ignore the task instance's previous failure/success :type ignore_ti_state: bool :param local: Whether to run the task locally :type local: bool :param pickle_id: If the DAG was serialized to the DB, the ID associated with the pickled DAG :type pickle_id: unicode :param file_path: path to the file containing the DAG definition :param raw: raw mode (needs more details) :param job_id: job ID (needs more details) :param pool: the Airflow pool that the task should run in :type pool: unicode :param cfg_path: the Path to the configuration file :type cfg_path: basestring :return: shell command that can be used to run the task instance """ iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
Get the very latest state from the database if a session is passed we use and looking up the state becomes part of the session otherwise a new session is used.
def current_state(self, session=None): """ Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. """ TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).all() if ti: state = ti[0].state else: state = None return state
Forces the task instance s state to FAILED in the database.
def error(self, session=None): """ Forces the task instance's state to FAILED in the database. """ self.log.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit()
Refreshes the task instance from the database based on the primary key
def refresh_from_db(self, session=None, lock_for_update=False): """ Refreshes the task instance from the database based on the primary key :param lock_for_update: if True, indicates that the database should lock the TaskInstance (issuing a FOR UPDATE clause) until the session is committed. """ TI = TaskInstance qry = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date # Get the raw value of try_number column, don't read through the # accessor here otherwise it will be incremeneted by one already. self.try_number = ti._try_number self.max_tries = ti.max_tries self.hostname = ti.hostname self.pid = ti.pid self.executor_config = ti.executor_config else: self.state = None
Clears all XCom data from the database for the task instance
def clear_xcom_data(self, session=None): """ Clears all XCom data from the database for the task instance """ session.query(XCom).filter( XCom.dag_id == self.dag_id, XCom.task_id == self.task_id, XCom.execution_date == self.execution_date ).delete() session.commit()
Returns a tuple that identifies the task instance uniquely
def key(self): """ Returns a tuple that identifies the task instance uniquely """ return self.dag_id, self.task_id, self.execution_date, self.try_number
Checks whether the dependents of this task instance have all succeeded. This is meant to be used by wait_for_downstream.
def are_dependents_done(self, session=None): """ Checks whether the dependents of this task instance have all succeeded. This is meant to be used by wait_for_downstream. This is useful when you do not want to start processing the next schedule of a task until the dependents are done. For instance, if the task DROPs and recreates a table. """ task = self.task if not task.downstream_task_ids: return True ti = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(task.downstream_task_ids), TaskInstance.execution_date == self.execution_date, TaskInstance.state == State.SUCCESS, ) count = ti[0][0] return count == len(task.downstream_task_ids)
Returns whether or not all the conditions are met for this task instance to be run given the context for the dependencies ( e. g. a task instance being force run from the UI will ignore some dependencies ).
def are_dependencies_met( self, dep_context=None, session=None, verbose=False): """ Returns whether or not all the conditions are met for this task instance to be run given the context for the dependencies (e.g. a task instance being force run from the UI will ignore some dependencies). :param dep_context: The execution context that determines the dependencies that should be evaluated. :type dep_context: DepContext :param session: database session :type session: sqlalchemy.orm.session.Session :param verbose: whether log details on failed dependencies on info or debug log level :type verbose: bool """ dep_context = dep_context or DepContext() failed = False verbose_aware_logger = self.log.info if verbose else self.log.debug for dep_status in self.get_failed_dep_statuses( dep_context=dep_context, session=session): failed = True verbose_aware_logger( "Dependencies not met for %s, dependency '%s' FAILED: %s", self, dep_status.dep_name, dep_status.reason ) if failed: return False verbose_aware_logger("Dependencies all met for %s", self) return True
Get datetime of the next retry if the task instance fails. For exponential backoff retry_delay is used as base and will be converted to seconds.
def next_retry_datetime(self): """ Get datetime of the next retry if the task instance fails. For exponential backoff, retry_delay is used as base and will be converted to seconds. """ delay = self.task.retry_delay if self.task.retry_exponential_backoff: min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2))) # deterministic per task instance hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id, self.task_id, self.execution_date, self.try_number) .encode('utf-8')).hexdigest(), 16) # between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number) modded_hash = min_backoff + hash % min_backoff # timedelta has a maximum representable value. The exponentiation # here means this value can be exceeded after a certain number # of tries (around 50 if the initial delay is 1s, even fewer if # the delay is larger). Cap the value here before creating a # timedelta object so the operation doesn't fail. delay_backoff_in_seconds = min( modded_hash, timedelta.max.total_seconds() - 1 ) delay = timedelta(seconds=delay_backoff_in_seconds) if self.task.max_retry_delay: delay = min(self.task.max_retry_delay, delay) return self.end_date + delay
Checks on whether the task instance is in the right state and timeframe to be retried.
def ready_for_retry(self): """ Checks on whether the task instance is in the right state and timeframe to be retried. """ return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow())
Returns a boolean as to whether the slot pool has room for this task to run
def pool_full(self, session): """ Returns a boolean as to whether the slot pool has room for this task to run """ if not self.task.pool: return False pool = ( session .query(Pool) .filter(Pool.pool == self.task.pool) .first() ) if not pool: return False open_slots = pool.open_slots(session=session) return open_slots <= 0
Returns the DagRun for this TaskInstance
def get_dagrun(self, session): """ Returns the DagRun for this TaskInstance :param session: :return: DagRun """ from airflow.models.dagrun import DagRun # Avoid circular import dr = session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == self.execution_date ).first() return dr
Checks dependencies and then sets state to RUNNING if they are met. Returns True if and only if state is set to RUNNING which implies that task should be executed in preparation for _run_raw_task
def _check_and_change_state_before_execution( self, verbose=True, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, mark_success=False, test_mode=False, job_id=None, pool=None, session=None): """ Checks dependencies and then sets state to RUNNING if they are met. Returns True if and only if state is set to RUNNING, which implies that task should be executed, in preparation for _run_raw_task :param verbose: whether to turn on more verbose logging :type verbose: bool :param ignore_all_deps: Ignore all of the non-critical dependencies, just runs :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past DAG attribute :type ignore_depends_on_past: bool :param ignore_task_deps: Don't check the dependencies of this TI's task :type ignore_task_deps: bool :param ignore_ti_state: Disregards previous task instance state :type ignore_ti_state: bool :param mark_success: Don't run the task, mark its state as success :type mark_success: bool :param test_mode: Doesn't record success or failure in the DB :type test_mode: bool :param pool: specifies the pool to use to run the task instance :type pool: str :return: whether the state was changed to running or not :rtype: bool """ task = self.task self.pool = pool or task.pool self.test_mode = test_mode self.refresh_from_db(session=session, lock_for_update=True) self.job_id = job_id self.hostname = get_hostname() self.operator = task.__class__.__name__ if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS: Stats.incr('previously_succeeded', 1, 1) queue_dep_context = DepContext( deps=QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_ti_state=ignore_ti_state, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps) if not self.are_dependencies_met( dep_context=queue_dep_context, session=session, verbose=True): session.commit() return False # TODO: Logging needs cleanup, not clear what is being printed hr = "\n" + ("-" * 80) # Line break # For reporting purposes, we report based on 1-indexed, # not 0-indexed lists (i.e. Attempt 1 instead of # Attempt 0 for the first attempt). # Set the task start date. In case it was re-scheduled use the initial # start date that is recorded in task_reschedule table self.start_date = timezone.utcnow() task_reschedules = TaskReschedule.find_for_task_instance(self, session) if task_reschedules: self.start_date = task_reschedules[0].start_date dep_context = DepContext( deps=RUN_DEPS - QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) runnable = self.are_dependencies_met( dep_context=dep_context, session=session, verbose=True) if not runnable and not mark_success: # FIXME: we might have hit concurrency limits, which means we probably # have been running prematurely. This should be handled in the # scheduling mechanism. self.state = State.NONE self.log.warning(hr) self.log.warning( "FIXME: Rescheduling due to concurrency limits reached at task runtime. Attempt %s of " "%s. State set to NONE.", self.try_number, self.max_tries + 1 ) self.log.warning(hr) self.queued_dttm = timezone.utcnow() self.log.info("Queuing into pool %s", self.pool) session.merge(self) session.commit() return False # Another worker might have started running this task instance while # the current worker process was blocked on refresh_from_db if self.state == State.RUNNING: self.log.warning("Task Instance already running %s", self) session.commit() return False # print status message self.log.info(hr) self.log.info("Starting attempt %s of %s", self.try_number, self.max_tries + 1) self.log.info(hr) self._try_number += 1 if not test_mode: session.add(Log(State.RUNNING, self)) self.state = State.RUNNING self.pid = os.getpid() self.end_date = None if not test_mode: session.merge(self) session.commit() # Closing all pooled connections to prevent # "max number of connections reached" settings.engine.dispose() if verbose: if mark_success: self.log.info("Marking success for %s on %s", self.task, self.execution_date) else: self.log.info("Executing %s on %s", self.task, self.execution_date) return True
Immediately runs the task ( without checking or changing db state before execution ) and then sets the appropriate final state after completion and runs any post - execute callbacks. Meant to be called only after another function changes the state to running.
def _run_raw_task( self, mark_success=False, test_mode=False, job_id=None, pool=None, session=None): """ Immediately runs the task (without checking or changing db state before execution) and then sets the appropriate final state after completion and runs any post-execute callbacks. Meant to be called only after another function changes the state to running. :param mark_success: Don't run the task, mark its state as success :type mark_success: bool :param test_mode: Doesn't record success or failure in the DB :type test_mode: bool :param pool: specifies the pool to use to run the task instance :type pool: str """ task = self.task self.pool = pool or task.pool self.test_mode = test_mode self.refresh_from_db(session=session) self.job_id = job_id self.hostname = get_hostname() self.operator = task.__class__.__name__ context = {} actual_start_date = timezone.utcnow() try: if not mark_success: context = self.get_template_context() task_copy = copy.copy(task) self.task = task_copy def signal_handler(signum, frame): self.log.error("Received SIGTERM. Terminating subprocesses.") task_copy.on_kill() raise AirflowException("Task received SIGTERM signal") signal.signal(signal.SIGTERM, signal_handler) # Don't clear Xcom until the task is certain to execute self.clear_xcom_data() start_time = time.time() self.render_templates() task_copy.pre_execute(context=context) # If a timeout is specified for the task, make it fail # if it goes beyond result = None if task_copy.execution_timeout: try: with timeout(int( task_copy.execution_timeout.total_seconds())): result = task_copy.execute(context=context) except AirflowTaskTimeout: task_copy.on_kill() raise else: result = task_copy.execute(context=context) # If the task returns a result, push an XCom containing it if task_copy.do_xcom_push and result is not None: self.xcom_push(key=XCOM_RETURN_KEY, value=result) task_copy.post_execute(context=context, result=result) end_time = time.time() duration = end_time - start_time Stats.timing( 'dag.{dag_id}.{task_id}.duration'.format( dag_id=task_copy.dag_id, task_id=task_copy.task_id), duration) Stats.incr('operator_successes_{}'.format( self.task.__class__.__name__), 1, 1) Stats.incr('ti_successes') self.refresh_from_db(lock_for_update=True) self.state = State.SUCCESS except AirflowSkipException: self.refresh_from_db(lock_for_update=True) self.state = State.SKIPPED except AirflowRescheduleException as reschedule_exception: self.refresh_from_db() self._handle_reschedule(actual_start_date, reschedule_exception, test_mode, context) return except AirflowException as e: self.refresh_from_db() # for case when task is marked as success/failed externally # current behavior doesn't hit the success callback if self.state in {State.SUCCESS, State.FAILED}: return else: self.handle_failure(e, test_mode, context) raise except (Exception, KeyboardInterrupt) as e: self.handle_failure(e, test_mode, context) raise # Success callback try: if task.on_success_callback: task.on_success_callback(context) except Exception as e3: self.log.error("Failed when executing success callback") self.log.exception(e3) # Recording SUCCESS self.end_date = timezone.utcnow() self.set_duration() if not test_mode: session.add(Log(self.state, self)) session.merge(self) session.commit()
Make an XCom available for tasks to pull.
def xcom_push( self, key, value, execution_date=None): """ Make an XCom available for tasks to pull. :param key: A key for the XCom :type key: str :param value: A value for the XCom. The value is pickled and stored in the database. :type value: any pickleable object :param execution_date: if provided, the XCom will not be visible until this date. This can be used, for example, to send a message to a task on a future date without it being immediately visible. :type execution_date: datetime """ if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date)
Pull XComs that optionally meet certain criteria.
def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): """ Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool """ if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids)
Sets the log context.
def init_run_context(self, raw=False): """ Sets the log context. """ self.raw = raw self._set_context(self)
Close and upload local log file to remote storage Wasb.
def close(self): """ Close and upload local log file to remote storage Wasb. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions with open(local_loc, 'r') as logfile: log = logfile.read() self.wasb_write(log, remote_loc, append=True) if self.delete_local_copy: shutil.rmtree(os.path.dirname(local_loc)) # Mark closed so we don't double write if close is called twice self.closed = True
Read logs of given task instance and try_number from Wasb remote storage. If failed read the log from task instance host machine.: param ti: task instance object: param try_number: task instance try_number to read logs from: param metadata: log metadata can be used for steaming log reading and auto - tailing.
def _read(self, ti, try_number, metadata=None): """ Read logs of given task instance and try_number from Wasb remote storage. If failed, read the log from task instance host machine. :param ti: task instance object :param try_number: task instance try_number to read logs from :param metadata: log metadata, can be used for steaming log reading and auto-tailing. """ # Explicitly getting log relative path is necessary as the given # task instance might be different than task instance passed in # in set_context method. log_relative_path = self._render_filename(ti, try_number) remote_loc = os.path.join(self.remote_base, log_relative_path) if self.wasb_log_exists(remote_loc): # If Wasb remote file exists, we do not fetch logs from task instance # local machine even if there are errors reading remote logs, as # returned remote_log will contain error messages. remote_log = self.wasb_read(remote_loc, return_error=True) log = '*** Reading remote log from {}.\n{}\n'.format( remote_loc, remote_log) return log, {'end_of_log': True} else: return super()._read(ti, try_number)
Check if remote_log_location exists in remote storage: param remote_log_location: log s location in remote storage: return: True if location exists else False
def wasb_log_exists(self, remote_log_location): """ Check if remote_log_location exists in remote storage :param remote_log_location: log's location in remote storage :return: True if location exists else False """ try: return self.hook.check_for_blob(self.wasb_container, remote_log_location) except Exception: pass return False
Returns the log found at the remote_log_location. Returns if no logs are found or there is an error.: param remote_log_location: the log s location in remote storage: type remote_log_location: str ( path ): param return_error: if True returns a string error message if an error occurs. Otherwise returns when an error occurs.: type return_error: bool
def wasb_read(self, remote_log_location, return_error=False): """ Returns the log found at the remote_log_location. Returns '' if no logs are found or there is an error. :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param return_error: if True, returns a string error message if an error occurs. Otherwise returns '' when an error occurs. :type return_error: bool """ try: return self.hook.read_file(self.wasb_container, remote_log_location) except AzureHttpError: msg = 'Could not read logs from {}'.format(remote_log_location) self.log.exception(msg) # return error if needed if return_error: return msg
Writes the log to the remote_log_location. Fails silently if no hook was created.: param log: the log to write to the remote_log_location: type log: str: param remote_log_location: the log s location in remote storage: type remote_log_location: str ( path ): param append: if False any existing log file is overwritten. If True the new log is appended to any existing logs.: type append: bool
def wasb_write(self, log, remote_log_location, append=True): """ Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool """ if append and self.wasb_log_exists(remote_log_location): old_log = self.wasb_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log try: self.hook.load_string( log, self.wasb_container, remote_log_location, ) except AzureHttpError: self.log.exception('Could not write logs to %s', remote_log_location)
Retrieves connection to Google Compute Engine.
def get_conn(self): """ Retrieves connection to Google Compute Engine. :return: Google Compute Engine services object :rtype: dict """ if not self._conn: http_authorized = self._authorize() self._conn = build('compute', self.api_version, http=http_authorized, cache_discovery=False) return self._conn
Starts an existing instance defined by project_id zone and resource_id. Must be called with keyword arguments rather than positional.
def start_instance(self, zone, resource_id, project_id=None): """ Starts an existing instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instances().start( project=project_id, zone=zone, instance=resource_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Sets machine type of an instance defined by project_id zone and resource_id. Must be called with keyword arguments rather than positional.
def set_machine_type(self, zone, resource_id, body, project_id=None): """ Sets machine type of an instance defined by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the instance exists. :type zone: str :param resource_id: Name of the Compute Engine instance resource :type resource_id: str :param body: Body required by the Compute Engine setMachineType API, as described in https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType :type body: dict :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self._execute_set_machine_type(zone, resource_id, body, project_id) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Retrieves instance template by project_id and resource_id. Must be called with keyword arguments rather than positional.
def get_instance_template(self, resource_id, project_id=None): """ Retrieves instance template by project_id and resource_id. Must be called with keyword arguments rather than positional. :param resource_id: Name of the instance template :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :rtype: dict """ response = self.get_conn().instanceTemplates().get( project=project_id, instanceTemplate=resource_id ).execute(num_retries=self.num_retries) return response
Inserts instance template using body specified Must be called with keyword arguments rather than positional.
def insert_instance_template(self, body, request_id=None, project_id=None): """ Inserts instance template using body specified Must be called with keyword arguments rather than positional. :param body: Instance template representation as object according to https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceTemplates().insert( project=project_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Retrieves Instance Group Manager by project_id zone and resource_id. Must be called with keyword arguments rather than positional.
def get_instance_group_manager(self, zone, resource_id, project_id=None): """ Retrieves Instance Group Manager by project_id, zone and resource_id. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Instance group manager representation as object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers :rtype: dict """ response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
Patches Instance Group Manager with the specified body. Must be called with keyword arguments rather than positional.
def patch_instance_group_manager(self, zone, resource_id, body, request_id=None, project_id=None): """ Patches Instance Group Manager with the specified body. Must be called with keyword arguments rather than positional. :param zone: Google Cloud Platform zone where the Instance Group Manager exists :type zone: str :param resource_id: Name of the Instance Group Manager :type resource_id: str :param body: Instance Group Manager representation as json-merge-patch object according to https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch :type body: dict :param request_id: Optional, unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again). It should be in UUID format as defined in RFC 4122 :type request_id: str :param project_id: Optional, Google Cloud Platform project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ response = self.get_conn().instanceGroupManagers().patch( project=project_id, zone=zone, instanceGroupManager=resource_id, body=body, requestId=request_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
Waits for the named operation to complete - checks status of the async call.
def _wait_for_operation_to_complete(self, project_id, operation_name, zone=None): """ Waits for the named operation to complete - checks status of the async call. :param operation_name: name of the operation :type operation_name: str :param zone: optional region of the request (might be None for global operations) :type zone: str :return: None """ service = self.get_conn() while True: if zone is None: # noinspection PyTypeChecker operation_response = self._check_global_operation_status( service, operation_name, project_id) else: # noinspection PyTypeChecker operation_response = self._check_zone_operation_status( service, operation_name, project_id, zone, self.num_retries) if operation_response.get("status") == GceOperationStatus.DONE: error = operation_response.get("error") if error: code = operation_response.get("httpErrorStatusCode") msg = operation_response.get("httpErrorMessage") # Extracting the errors list as string and trimming square braces error_msg = str(error.get("errors"))[1:-1] raise AirflowException("{} {}: ".format(code, msg) + error_msg) # No meaningful info to return from the response in case of success return time.sleep(TIME_TO_SLEEP_IN_SECONDS)
Check if bucket_name exists.
def check_for_bucket(self, bucket_name): """ Check if bucket_name exists. :param bucket_name: the name of the bucket :type bucket_name: str """ try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
Creates an Amazon S3 bucket.
def create_bucket(self, bucket_name, region_name=None): """ Creates an Amazon S3 bucket. :param bucket_name: The name of the bucket :type bucket_name: str :param region_name: The name of the aws region in which to create the bucket. :type region_name: str """ s3_conn = self.get_conn() if not region_name: region_name = s3_conn.meta.region_name if region_name == 'us-east-1': self.get_conn().create_bucket(Bucket=bucket_name) else: self.get_conn().create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': region_name })
Checks that a prefix exists in a bucket
def check_for_prefix(self, bucket_name, prefix, delimiter): """ Checks that a prefix exists in a bucket :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str """ prefix = prefix + delimiter if prefix[-1] != delimiter else prefix prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1) previous_level = prefix_split[0] plist = self.list_prefixes(bucket_name, previous_level, delimiter) return False if plist is None else prefix in plist
Lists prefixes in a bucket under prefix
def list_prefixes(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists prefixes in a bucket under prefix :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False prefixes = [] for page in response: if 'CommonPrefixes' in page: has_results = True for p in page['CommonPrefixes']: prefixes.append(p['Prefix']) if has_results: return prefixes
Lists keys in a bucket under prefix and not containing delimiter
def list_keys(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ Lists keys in a bucket under prefix and not containing delimiter :param bucket_name: the name of the bucket :type bucket_name: str :param prefix: a key prefix :type prefix: str :param delimiter: the delimiter marks key hierarchy. :type delimiter: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False keys = [] for page in response: if 'Contents' in page: has_results = True for k in page['Contents']: keys.append(k['Key']) if has_results: return keys
Checks if a key exists in a bucket
def check_for_key(self, key, bucket_name=None): """ Checks if a key exists in a bucket :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) try: self.get_conn().head_object(Bucket=bucket_name, Key=key) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
Returns a boto3. s3. Object
def get_key(self, key, bucket_name=None): """ Returns a boto3.s3.Object :param key: the path to the key :type key: str :param bucket_name: the name of the bucket :type bucket_name: str """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) obj = self.get_resource_type('s3').Object(bucket_name, key) obj.load() return obj
Reads a key from S3
def read_key(self, key, bucket_name=None): """ Reads a key from S3 :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str """ obj = self.get_key(key, bucket_name) return obj.get()['Body'].read().decode('utf-8')
Reads a key with S3 Select.
def select_key(self, key, bucket_name=None, expression='SELECT * FROM S3Object', expression_type='SQL', input_serialization=None, output_serialization=None): """ Reads a key with S3 Select. :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str :param expression: S3 Select expression :type expression: str :param expression_type: S3 Select expression type :type expression_type: str :param input_serialization: S3 Select input data serialization format :type input_serialization: dict :param output_serialization: S3 Select output data serialization format :type output_serialization: dict :return: retrieved subset of original data by S3 Select :rtype: str .. seealso:: For more details about S3 Select parameters: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content """ if input_serialization is None: input_serialization = {'CSV': {}} if output_serialization is None: output_serialization = {'CSV': {}} if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) response = self.get_conn().select_object_content( Bucket=bucket_name, Key=key, Expression=expression, ExpressionType=expression_type, InputSerialization=input_serialization, OutputSerialization=output_serialization) return ''.join(event['Records']['Payload'].decode('utf-8') for event in response['Payload'] if 'Records' in event)
Checks that a key matching a wildcard expression exists in a bucket
def check_for_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ Checks that a key matching a wildcard expression exists in a bucket :param wildcard_key: the path to the key :type wildcard_key: str :param bucket_name: the name of the bucket :type bucket_name: str :param delimiter: the delimiter marks key hierarchy :type delimiter: str """ return self.get_wildcard_key(wildcard_key=wildcard_key, bucket_name=bucket_name, delimiter=delimiter) is not None
Returns a boto3. s3. Object object matching the wildcard expression
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ Returns a boto3.s3.Object object matching the wildcard expression :param wildcard_key: the path to the key :type wildcard_key: str :param bucket_name: the name of the bucket :type bucket_name: str :param delimiter: the delimiter marks key hierarchy :type delimiter: str """ if not bucket_name: (bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key) prefix = re.split(r'[*]', wildcard_key, 1)[0] klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter) if klist: key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)] if key_matches: return self.get_key(key_matches[0], bucket_name)
Loads a local file to S3
def load_file(self, filename, key, bucket_name=None, replace=False, encrypt=False): """ Loads a local file to S3 :param filename: name of the file to load. :type filename: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists. If replace is False and the key exists, an error will be raised. :type replace: bool :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type encrypt: bool """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" client = self.get_conn() client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
Loads a string to S3
def load_string(self, string_data, key, bucket_name=None, replace=False, encrypt=False, encoding='utf-8'): """ Loads a string to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. :param string_data: str to set as content for the key. :type string_data: str :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type encrypt: bool """ self.load_bytes(string_data.encode(encoding), key=key, bucket_name=bucket_name, replace=replace, encrypt=encrypt)
Loads bytes to S3
def load_bytes(self, bytes_data, key, bucket_name=None, replace=False, encrypt=False): """ Loads bytes to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. :param bytes_data: bytes to set as content for the key. :type bytes_data: bytes :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type encrypt: bool """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" filelike_buffer = BytesIO(bytes_data) client = self.get_conn() client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
Loads a file object to S3
def load_file_obj(self, file_obj, key, bucket_name=None, replace=False, encrypt=False): """ Loads a file object to S3 :param file_obj: The file-like object to set as the content for the S3 key. :type file_obj: file-like object :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag that indicates whether to overwrite the key if it already exists. :type replace: bool :param encrypt: If True, S3 encrypts the file on the server, and the file is stored in encrypted form at rest in S3. :type encrypt: bool """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" client = self.get_conn() client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
Creates a copy of an object that is already stored in S3.
def copy_object(self, source_bucket_key, dest_bucket_key, source_bucket_name=None, dest_bucket_name=None, source_version_id=None): """ Creates a copy of an object that is already stored in S3. Note: the S3 connection used here needs to have access to both source and destination bucket/key. :param source_bucket_key: The key of the source object. It can be either full s3:// style url or relative path from root level. When it's specified as a full s3:// url, please omit source_bucket_name. :type source_bucket_key: str :param dest_bucket_key: The key of the object to copy to. The convention to specify `dest_bucket_key` is the same as `source_bucket_key`. :type dest_bucket_key: str :param source_bucket_name: Name of the S3 bucket where the source object is in. It should be omitted when `source_bucket_key` is provided as a full s3:// url. :type source_bucket_name: str :param dest_bucket_name: Name of the S3 bucket to where the object is copied. It should be omitted when `dest_bucket_key` is provided as a full s3:// url. :type dest_bucket_name: str :param source_version_id: Version ID of the source object (OPTIONAL) :type source_version_id: str """ if dest_bucket_name is None: dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key) else: parsed_url = urlparse(dest_bucket_key) if parsed_url.scheme != '' or parsed_url.netloc != '': raise AirflowException('If dest_bucket_name is provided, ' + 'dest_bucket_key should be relative path ' + 'from root level, rather than a full s3:// url') if source_bucket_name is None: source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key) else: parsed_url = urlparse(source_bucket_key) if parsed_url.scheme != '' or parsed_url.netloc != '': raise AirflowException('If source_bucket_name is provided, ' + 'source_bucket_key should be relative path ' + 'from root level, rather than a full s3:// url') CopySource = {'Bucket': source_bucket_name, 'Key': source_bucket_key, 'VersionId': source_version_id} response = self.get_conn().copy_object(Bucket=dest_bucket_name, Key=dest_bucket_key, CopySource=CopySource) return response
: param bucket: Name of the bucket in which you are going to delete object ( s ): type bucket: str: param keys: The key ( s ) to delete from S3 bucket.
def delete_objects(self, bucket, keys): """ :param bucket: Name of the bucket in which you are going to delete object(s) :type bucket: str :param keys: The key(s) to delete from S3 bucket. When ``keys`` is a string, it's supposed to be the key name of the single object to delete. When ``keys`` is a list, it's supposed to be the list of the keys to delete. :type keys: str or list """ if isinstance(keys, list): keys = keys else: keys = [keys] delete_dict = {"Objects": [{"Key": k} for k in keys]} response = self.get_conn().delete_objects(Bucket=bucket, Delete=delete_dict) return response
Queries cassandra and returns a cursor to the results.
def _query_cassandra(self): """ Queries cassandra and returns a cursor to the results. """ self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id) session = self.hook.get_conn() cursor = session.execute(self.cql) return cursor
Takes a cursor and writes results to a local file.
def _write_local_data_files(self, cursor): """ Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects. """ file_no = 0 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles = {self.filename.format(file_no): tmp_file_handle} for row in cursor: row_dict = self.generate_data_dict(row._fields, row) s = json.dumps(row_dict).encode('utf-8') tmp_file_handle.write(s) # Append newline to make dumps BigQuery compatible. tmp_file_handle.write(b'\n') if tmp_file_handle.tell() >= self.approx_max_file_size_bytes: file_no += 1 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle return tmp_file_handles
Takes a cursor and writes the BigQuery schema for the results to a local file system.
def _write_local_schema_file(self, cursor): """ Takes a cursor, and writes the BigQuery schema for the results to a local file system. :return: A dictionary where key is a filename to be used as an object name in GCS, and values are file handles to local files that contains the BigQuery schema fields in .json format. """ schema = [] tmp_schema_file_handle = NamedTemporaryFile(delete=True) for name, type in zip(cursor.column_names, cursor.column_types): schema.append(self.generate_schema_dict(name, type)) json_serialized_schema = json.dumps(schema).encode('utf-8') tmp_schema_file_handle.write(json_serialized_schema) return {self.schema_filename: tmp_schema_file_handle}
Converts a user type to RECORD that contains n fields where n is the number of attributes. Each element in the user type class will be converted to its corresponding data type in BQ.
def convert_user_type(cls, name, value): """ Converts a user type to RECORD that contains n fields, where n is the number of attributes. Each element in the user type class will be converted to its corresponding data type in BQ. """ names = value._fields values = [cls.convert_value(name, getattr(value, name)) for name in names] return cls.generate_data_dict(names, values)
Converts a tuple to RECORD that contains n fields each will be converted to its corresponding data type in bq and will be named field_<index > where index is determined by the order of the tuple elements defined in cassandra.
def convert_tuple_type(cls, name, value): """ Converts a tuple to RECORD that contains n fields, each will be converted to its corresponding data type in bq and will be named 'field_<index>', where index is determined by the order of the tuple elements defined in cassandra. """ names = ['field_' + str(i) for i in range(len(value))] values = [cls.convert_value(name, value) for name, value in zip(names, value)] return cls.generate_data_dict(names, values)
Converts a map to a repeated RECORD that contains two fields: key and value each will be converted to its corresponding data type in BQ.
def convert_map_type(cls, name, value): """ Converts a map to a repeated RECORD that contains two fields: 'key' and 'value', each will be converted to its corresponding data type in BQ. """ converted_map = [] for k, v in zip(value.keys(), value.values()): converted_map.append({ 'key': cls.convert_value('key', k), 'value': cls.convert_value('value', v) }) return converted_map
Send an email with html content using sendgrid.
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs): """ Send an email with html content using sendgrid. To use this plugin: 0. include sendgrid subpackage as part of your Airflow installation, e.g., pip install 'apache-airflow[sendgrid]' 1. update [email] backend in airflow.cfg, i.e., [email] email_backend = airflow.contrib.utils.sendgrid.send_email 2. configure Sendgrid specific environment variables at all Airflow instances: SENDGRID_MAIL_FROM={your-mail-from} SENDGRID_API_KEY={your-sendgrid-api-key}. """ if files is None: files = [] mail = Mail() from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM') from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER') mail.from_email = Email(from_email, from_name) mail.subject = subject mail.mail_settings = MailSettings() if sandbox_mode: mail.mail_settings.sandbox_mode = SandBoxMode(enable=True) # Add the recipient list of to emails. personalization = Personalization() to = get_email_address_list(to) for to_address in to: personalization.add_to(Email(to_address)) if cc: cc = get_email_address_list(cc) for cc_address in cc: personalization.add_cc(Email(cc_address)) if bcc: bcc = get_email_address_list(bcc) for bcc_address in bcc: personalization.add_bcc(Email(bcc_address)) # Add custom_args to personalization if present pers_custom_args = kwargs.get('personalization_custom_args', None) if isinstance(pers_custom_args, dict): for key in pers_custom_args.keys(): personalization.add_custom_arg(CustomArg(key, pers_custom_args[key])) mail.add_personalization(personalization) mail.add_content(Content('text/html', html_content)) categories = kwargs.get('categories', []) for cat in categories: mail.add_category(Category(cat)) # Add email attachment. for fname in files: basename = os.path.basename(fname) attachment = Attachment() attachment.type = mimetypes.guess_type(basename)[0] attachment.filename = basename attachment.disposition = "attachment" attachment.content_id = '<{0}>'.format(basename) with open(fname, "rb") as f: attachment.content = base64.b64encode(f.read()).decode('utf-8') mail.add_attachment(attachment) _post_sendgrid_mail(mail.get())