INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Retrieves connection to Cloud Speech.
def get_conn(self): """ Retrieves connection to Cloud Speech. :return: Google Cloud Speech client object. :rtype: google.cloud.speech_v1.SpeechClient """ if not self._client: self._client = SpeechClient(credentials=self._get_credentials()) return self._client
Recognizes audio input
def recognize_speech(self, config, audio, retry=None, timeout=None): """ Recognizes audio input :param config: information to the recognizer that specifies how to process the request. https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig :type config: dict or google.cloud.speech_v1.types.RecognitionConfig :param audio: audio data to be recognized https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio :type audio: dict or google.cloud.speech_v1.types.RecognitionAudio :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float """ client = self.get_conn() response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout) self.log.info("Recognised speech: %s" % response) return response
Call the SparkSqlHook to run the provided sql query
def execute(self, context): """ Call the SparkSqlHook to run the provided sql query """ self._hook = SparkSqlHook(sql=self._sql, conf=self._conf, conn_id=self._conn_id, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, master=self._master, yarn_queue=self._yarn_queue ) self._hook.run_query()
Provide task_instance context to airflow task handler.: param ti: task instance object
def set_context(self, ti): """ Provide task_instance context to airflow task handler. :param ti: task instance object """ local_loc = self._init_file(ti) self.handler = logging.FileHandler(local_loc) self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level)
Template method that contains custom logic of reading logs given the try_number.: param ti: task instance record: param try_number: current try_number to read log from: param metadata: log metadata can be used for steaming log reading and auto - tailing.: return: log message as a string and metadata.
def _read(self, ti, try_number, metadata=None): """ Template method that contains custom logic of reading logs given the try_number. :param ti: task instance record :param try_number: current try_number to read log from :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: log message as a string and metadata. """ # Task instance here might be different from task instance when # initializing the handler. Thus explicitly getting log location # is needed to get correct log path. log_relative_path = self._render_filename(ti, try_number) location = os.path.join(self.local_base, log_relative_path) log = "" if os.path.exists(location): try: with open(location) as f: log += "*** Reading local file: {}\n".format(location) log += "".join(f.readlines()) except Exception as e: log = "*** Failed to load local log file: {}\n".format(location) log += "*** {}\n".format(str(e)) else: url = os.path.join( "http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path ).format( ti=ti, worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT') ) log += "*** Log file does not exist: {}\n".format(location) log += "*** Fetching from: {}\n".format(url) try: timeout = None # No timeout try: timeout = conf.getint('webserver', 'log_fetch_timeout_sec') except (AirflowConfigException, ValueError): pass response = requests.get(url, timeout=timeout) # Check if the resource was properly fetched response.raise_for_status() log += '\n' + response.text except Exception as e: log += "*** Failed to fetch log file from worker. {}\n".format(str(e)) return log, {'end_of_log': True}
Read logs of given task instance from local machine.: param task_instance: task instance object: param try_number: task instance try_number to read logs from. If None it returns all logs separated by try_number: param metadata: log metadata can be used for steaming log reading and auto - tailing.: return: a list of logs
def read(self, task_instance, try_number=None, metadata=None): """ Read logs of given task instance from local machine. :param task_instance: task instance object :param try_number: task instance try_number to read logs from. If None it returns all logs separated by try_number :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of logs """ # Task instance increments its try number when it starts to run. # So the log for a particular task try will only show up when # try number gets incremented in DB, i.e logs produced the time # after cli run and before try_number + 1 in DB will not be displayed. if try_number is None: next_try = task_instance.next_try_number try_numbers = list(range(1, next_try)) elif try_number < 1: logs = [ 'Error fetching the logs. Try number {} is invalid.'.format(try_number), ] return logs else: try_numbers = [try_number] logs = [''] * len(try_numbers) metadatas = [{}] * len(try_numbers) for i, try_number in enumerate(try_numbers): log, metadata = self._read(task_instance, try_number, metadata) logs[i] += log metadatas[i] = metadata return logs, metadatas
Create log directory and give it correct permissions.: param ti: task instance object: return: relative log path of the given task instance
def _init_file(self, ti): """ Create log directory and give it correct permissions. :param ti: task instance object :return: relative log path of the given task instance """ # To handle log writing when tasks are impersonated, the log files need to # be writable by the user that runs the Airflow command and the user # that is impersonated. This is mainly to handle corner cases with the # SubDagOperator. When the SubDagOperator is run, all of the operators # run under the impersonated user and create appropriate log files # as the impersonated user. However, if the user manually runs tasks # of the SubDagOperator through the UI, then the log files are created # by the user that runs the Airflow command. For example, the Airflow # run command may be run by the `airflow_sudoable` user, but the Airflow # tasks may be run by the `airflow` user. If the log files are not # writable by both users, then it's possible that re-running a task # via the UI (or vice versa) results in a permission error as the task # tries to write to a log file created by the other user. relative_path = self._render_filename(ti, ti.try_number) full_path = os.path.join(self.local_base, relative_path) directory = os.path.dirname(full_path) # Create the log file and give it group writable permissions # TODO(aoen): Make log dirs and logs globally readable for now since the SubDag # operator is not compatible with impersonation (e.g. if a Celery executor is used # for a SubDag operator and the SubDag operator has a different owner than the # parent DAG) if not os.path.exists(directory): # Create the directory as globally writable using custom mkdirs # as os.makedirs doesn't set mode properly. mkdirs(directory, 0o777) if not os.path.exists(full_path): open(full_path, "a").close() # TODO: Investigate using 444 instead of 666. os.chmod(full_path, 0o666) return full_path
Load AirflowPlugin subclasses from the entrypoints provided. The entry_point group should be airflow. plugins.
def load_entrypoint_plugins(entry_points, airflow_plugins): """ Load AirflowPlugin subclasses from the entrypoints provided. The entry_point group should be 'airflow.plugins'. :param entry_points: A collection of entrypoints to search for plugins :type entry_points: Generator[setuptools.EntryPoint, None, None] :param airflow_plugins: A collection of existing airflow plugins to ensure we don't load duplicates :type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]] :rtype: list[airflow.plugins_manager.AirflowPlugin] """ for entry_point in entry_points: log.debug('Importing entry_point plugin %s', entry_point.name) plugin_obj = entry_point.load() if is_valid_plugin(plugin_obj, airflow_plugins): if callable(getattr(plugin_obj, 'on_load', None)): plugin_obj.on_load() airflow_plugins.append(plugin_obj) return airflow_plugins
Check whether a potential object is a subclass of the AirflowPlugin class.
def is_valid_plugin(plugin_obj, existing_plugins): """ Check whether a potential object is a subclass of the AirflowPlugin class. :param plugin_obj: potential subclass of AirflowPlugin :param existing_plugins: Existing list of AirflowPlugin subclasses :return: Whether or not the obj is a valid subclass of AirflowPlugin """ if ( inspect.isclass(plugin_obj) and issubclass(plugin_obj, AirflowPlugin) and (plugin_obj is not AirflowPlugin) ): plugin_obj.validate() return plugin_obj not in existing_plugins return False
Sets tasks instances to skipped from the same dag run.
def skip(self, dag_run, execution_date, tasks, session=None): """ Sets tasks instances to skipped from the same dag run. :param dag_run: the DagRun for which to set the tasks to skipped :param execution_date: execution_date :param tasks: tasks to skip (not task_ids) :param session: db session to use """ if not tasks: return task_ids = [d.task_id for d in tasks] now = timezone.utcnow() if dag_run: session.query(TaskInstance).filter( TaskInstance.dag_id == dag_run.dag_id, TaskInstance.execution_date == dag_run.execution_date, TaskInstance.task_id.in_(task_ids) ).update({TaskInstance.state: State.SKIPPED, TaskInstance.start_date: now, TaskInstance.end_date: now}, synchronize_session=False) session.commit() else: assert execution_date is not None, "Execution date is None and no dag run" self.log.warning("No DAG RUN present this should not happen") # this is defensive against dag runs that are not complete for task in tasks: ti = TaskInstance(task, execution_date=execution_date) ti.state = State.SKIPPED ti.start_date = now ti.end_date = now session.merge(ti) session.commit()
Return a AzureDLFileSystem object.
def get_conn(self): """Return a AzureDLFileSystem object.""" conn = self.get_connection(self.conn_id) service_options = conn.extra_dejson self.account_name = service_options.get('account_name') adlCreds = lib.auth(tenant_id=service_options.get('tenant'), client_secret=conn.password, client_id=conn.login) adlsFileSystemClient = core.AzureDLFileSystem(adlCreds, store_name=self.account_name) adlsFileSystemClient.connect() return adlsFileSystemClient
Check if a file exists on Azure Data Lake.
def check_for_file(self, file_path): """ Check if a file exists on Azure Data Lake. :param file_path: Path and name of the file. :type file_path: str :return: True if the file exists, False otherwise. :rtype: bool """ try: files = self.connection.glob(file_path, details=False, invalidate_cache=True) return len(files) == 1 except FileNotFoundError: return False
Upload a file to Azure Data Lake.
def upload_file(self, local_path, remote_path, nthreads=64, overwrite=True, buffersize=4194304, blocksize=4194304): """ Upload a file to Azure Data Lake. :param local_path: local path. Can be single file, directory (in which case, upload recursively) or glob pattern. Recursive glob patterns using `**` are not supported. :type local_path: str :param remote_path: Remote path to upload to; if multiple files, this is the directory root to write within. :type remote_path: str :param nthreads: Number of threads to use. If None, uses the number of cores. :type nthreads: int :param overwrite: Whether to forcibly overwrite existing files/directories. If False and remote path is a directory, will quit regardless if any files would be overwritten or not. If True, only matching filenames are actually overwritten. :type overwrite: bool :param buffersize: int [2**22] Number of bytes for internal buffer. This block cannot be bigger than a chunk and cannot be smaller than a block. :type buffersize: int :param blocksize: int [2**22] Number of bytes for a block. Within each chunk, we write a smaller block for each API call. This block cannot be bigger than a chunk. :type blocksize: int """ multithread.ADLUploader(self.connection, lpath=local_path, rpath=remote_path, nthreads=nthreads, overwrite=overwrite, buffersize=buffersize, blocksize=blocksize)
Download a file from Azure Blob Storage.
def download_file(self, local_path, remote_path, nthreads=64, overwrite=True, buffersize=4194304, blocksize=4194304): """ Download a file from Azure Blob Storage. :param local_path: local path. If downloading a single file, will write to this specific file, unless it is an existing directory, in which case a file is created within it. If downloading multiple files, this is the root directory to write within. Will create directories as required. :type local_path: str :param remote_path: remote path/globstring to use to find remote files. Recursive glob patterns using `**` are not supported. :type remote_path: str :param nthreads: Number of threads to use. If None, uses the number of cores. :type nthreads: int :param overwrite: Whether to forcibly overwrite existing files/directories. If False and remote path is a directory, will quit regardless if any files would be overwritten or not. If True, only matching filenames are actually overwritten. :type overwrite: bool :param buffersize: int [2**22] Number of bytes for internal buffer. This block cannot be bigger than a chunk and cannot be smaller than a block. :type buffersize: int :param blocksize: int [2**22] Number of bytes for a block. Within each chunk, we write a smaller block for each API call. This block cannot be bigger than a chunk. :type blocksize: int """ multithread.ADLDownloader(self.connection, lpath=local_path, rpath=remote_path, nthreads=nthreads, overwrite=overwrite, buffersize=buffersize, blocksize=blocksize)
List files in Azure Data Lake Storage
def list(self, path): """ List files in Azure Data Lake Storage :param path: full path/globstring to use to list files in ADLS :type path: str """ if "*" in path: return self.connection.glob(path) else: return self.connection.walk(path)
Run Presto Query on Athena
def execute(self, context): """ Run Presto Query on Athena """ self.hook = self.get_hook() self.hook.get_conn() self.query_execution_context['Database'] = self.database self.result_configuration['OutputLocation'] = self.output_location self.query_execution_id = self.hook.run_query(self.query, self.query_execution_context, self.result_configuration, self.client_request_token) query_status = self.hook.poll_query_status(self.query_execution_id, self.max_tries) if query_status in AWSAthenaHook.FAILURE_STATES: raise Exception( 'Final state of Athena job is {}, query_execution_id is {}.' .format(query_status, self.query_execution_id)) elif not query_status or query_status in AWSAthenaHook.INTERMEDIATE_STATES: raise Exception( 'Final state of Athena job is {}. ' 'Max tries of poll status exceeded, query_execution_id is {}.' .format(query_status, self.query_execution_id))
Cancel the submitted athena query
def on_kill(self): """ Cancel the submitted athena query """ if self.query_execution_id: self.log.info('⚰️⚰️⚰️ Received a kill Signal. Time to Die') self.log.info( 'Stopping Query with executionId - %s', self.query_execution_id ) response = self.hook.stop_query(self.query_execution_id) http_status_code = None try: http_status_code = response['ResponseMetadata']['HTTPStatusCode'] except Exception as ex: self.log.error('Exception while cancelling query', ex) finally: if http_status_code is None or http_status_code != 200: self.log.error('Unable to request query cancel on athena. Exiting') else: self.log.info( 'Polling Athena for query with id %s to reach final state', self.query_execution_id ) self.hook.poll_query_status(self.query_execution_id)
Uncompress gz and bz2 files
def uncompress_file(input_file_name, file_extension, dest_dir): """ Uncompress gz and bz2 files """ if file_extension.lower() not in ('.gz', '.bz2'): raise NotImplementedError("Received {} format. Only gz and bz2 " "files can currently be uncompressed." .format(file_extension)) if file_extension.lower() == '.gz': fmodule = gzip.GzipFile elif file_extension.lower() == '.bz2': fmodule = bz2.BZ2File with fmodule(input_file_name, mode='rb') as f_compressed,\ NamedTemporaryFile(dir=dest_dir, mode='wb', delete=False) as f_uncompressed: shutil.copyfileobj(f_compressed, f_uncompressed) return f_uncompressed.name
Queries MSSQL and returns a cursor of results.
def _query_mssql(self): """ Queries MSSQL and returns a cursor of results. :return: mssql cursor """ mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id) conn = mssql.get_conn() cursor = conn.cursor() cursor.execute(self.sql) return cursor
Takes a cursor and writes results to a local file.
def _write_local_data_files(self, cursor): """ Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects. """ schema = list(map(lambda schema_tuple: schema_tuple[0].replace(' ', '_'), cursor.description)) file_no = 0 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles = {self.filename.format(file_no): tmp_file_handle} for row in cursor: # Convert if needed row = map(self.convert_types, row) row_dict = dict(zip(schema, row)) s = json.dumps(row_dict, sort_keys=True) s = s.encode('utf-8') tmp_file_handle.write(s) # Append newline to make dumps BQ compatible tmp_file_handle.write(b'\n') # Stop if the file exceeds the file size limit if tmp_file_handle.tell() >= self.approx_max_file_size_bytes: file_no += 1 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle return tmp_file_handles
Upload all of the file splits ( and optionally the schema. json file ) to Google cloud storage.
def _upload_to_gcs(self, files_to_upload): """ Upload all of the file splits (and optionally the schema .json file) to Google cloud storage. """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) for object_name, tmp_file_handle in files_to_upload.items(): hook.upload(self.bucket, object_name, tmp_file_handle.name, 'application/json', (self.gzip if object_name != self.schema_filename else False))
Takes a value from MSSQL and converts it to a value that s safe for JSON/ Google Cloud Storage/ BigQuery.
def convert_types(cls, value): """ Takes a value from MSSQL, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. """ if isinstance(value, decimal.Decimal): return float(value) else: return value
Decorates function to execute function at the same time submitting action_logging but in CLI context. It will call action logger callbacks twice one for pre - execution and the other one for post - execution.
def action_logging(f): """ Decorates function to execute function at the same time submitting action_logging but in CLI context. It will call action logger callbacks twice, one for pre-execution and the other one for post-execution. Action logger will be called with below keyword parameters: sub_command : name of sub-command start_datetime : start datetime instance by utc end_datetime : end datetime instance by utc full_command : full command line arguments user : current user log : airflow.models.log.Log ORM instance dag_id : dag id (optional) task_id : task_id (optional) execution_date : execution date (optional) error : exception instance if there's an exception :param f: function instance :return: wrapped function """ @functools.wraps(f) def wrapper(*args, **kwargs): """ An wrapper for cli functions. It assumes to have Namespace instance at 1st positional argument :param args: Positional argument. It assumes to have Namespace instance at 1st positional argument :param kwargs: A passthrough keyword argument """ assert args assert isinstance(args[0], Namespace), \ "1st positional argument should be argparse.Namespace instance, " \ "but {}".format(args[0]) metrics = _build_metrics(f.__name__, args[0]) cli_action_loggers.on_pre_execution(**metrics) try: return f(*args, **kwargs) except Exception as e: metrics['error'] = e raise finally: metrics['end_datetime'] = datetime.utcnow() cli_action_loggers.on_post_execution(**metrics) return wrapper
Builds metrics dict from function args It assumes that function arguments is from airflow. bin. cli module s function and has Namespace instance where it optionally contains dag_id task_id and execution_date.
def _build_metrics(func_name, namespace): """ Builds metrics dict from function args It assumes that function arguments is from airflow.bin.cli module's function and has Namespace instance where it optionally contains "dag_id", "task_id", and "execution_date". :param func_name: name of function :param namespace: Namespace instance from argparse :return: dict with metrics """ metrics = {'sub_command': func_name, 'start_datetime': datetime.utcnow(), 'full_command': '{}'.format(list(sys.argv)), 'user': getpass.getuser()} assert isinstance(namespace, Namespace) tmp_dic = vars(namespace) metrics['dag_id'] = tmp_dic.get('dag_id') metrics['task_id'] = tmp_dic.get('task_id') metrics['execution_date'] = tmp_dic.get('execution_date') metrics['host_name'] = socket.gethostname() extra = json.dumps(dict((k, metrics[k]) for k in ('host_name', 'full_command'))) log = Log( event='cli_{}'.format(func_name), task_instance=None, owner=metrics['user'], extra=extra, task_id=metrics.get('task_id'), dag_id=metrics.get('dag_id'), execution_date=metrics.get('execution_date')) metrics['log'] = log return metrics
Yields a dependency status that indicate whether the given task instance s trigger rule was met.
def _evaluate_trigger_rule( self, ti, successes, skipped, failed, upstream_failed, done, flag_upstream_failed, session): """ Yields a dependency status that indicate whether the given task instance's trigger rule was met. :param ti: the task instance to evaluate the trigger rule of :type ti: airflow.models.TaskInstance :param successes: Number of successful upstream tasks :type successes: bool :param skipped: Number of skipped upstream tasks :type skipped: bool :param failed: Number of failed upstream tasks :type failed: bool :param upstream_failed: Number of upstream_failed upstream tasks :type upstream_failed: bool :param done: Number of completed upstream tasks :type done: bool :param flag_upstream_failed: This is a hack to generate the upstream_failed state creation while checking to see whether the task instance is runnable. It was the shortest path to add the feature :type flag_upstream_failed: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ TR = airflow.utils.trigger_rule.TriggerRule task = ti.task upstream = len(task.upstream_task_ids) tr = task.trigger_rule upstream_done = done >= upstream upstream_tasks_state = { "total": upstream, "successes": successes, "skipped": skipped, "failed": failed, "upstream_failed": upstream_failed, "done": done } # TODO(aoen): Ideally each individual trigger rules would be its own class, but # this isn't very feasible at the moment since the database queries need to be # bundled together for efficiency. # handling instant state assignment based on trigger rules if flag_upstream_failed: if tr == TR.ALL_SUCCESS: if upstream_failed or failed: ti.set_state(State.UPSTREAM_FAILED, session) elif skipped: ti.set_state(State.SKIPPED, session) elif tr == TR.ALL_FAILED: if successes or skipped: ti.set_state(State.SKIPPED, session) elif tr == TR.ONE_SUCCESS: if upstream_done and not successes: ti.set_state(State.SKIPPED, session) elif tr == TR.ONE_FAILED: if upstream_done and not (failed or upstream_failed): ti.set_state(State.SKIPPED, session) elif tr == TR.NONE_FAILED: if upstream_failed or failed: ti.set_state(State.UPSTREAM_FAILED, session) elif skipped == upstream: ti.set_state(State.SKIPPED, session) elif tr == TR.NONE_SKIPPED: if skipped: ti.set_state(State.SKIPPED, session) if tr == TR.ONE_SUCCESS: if successes <= 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires one upstream " "task success, but none were found. " "upstream_tasks_state={1}, upstream_task_ids={2}" .format(tr, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.ONE_FAILED: if not failed and not upstream_failed: yield self._failing_status( reason="Task's trigger rule '{0}' requires one upstream " "task failure, but none were found. " "upstream_tasks_state={1}, upstream_task_ids={2}" .format(tr, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.ALL_SUCCESS: num_failures = upstream - successes if num_failures > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have succeeded, but found {1} non-success(es). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(tr, num_failures, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.ALL_FAILED: num_successes = upstream - failed - upstream_failed if num_successes > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have failed, but found {1} non-failure(s). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(tr, num_successes, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.ALL_DONE: if not upstream_done: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have completed, but found {1} task(s) that " "weren't done. upstream_tasks_state={2}, " "upstream_task_ids={3}" .format(tr, upstream_done, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.NONE_FAILED: num_failures = upstream - successes - skipped if num_failures > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have succeeded or been skipped, but found {1} non-success(es). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(tr, num_failures, upstream_tasks_state, task.upstream_task_ids)) elif tr == TR.NONE_SKIPPED: if skipped > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to not have been skipped, but found {1} task(s) skipped. " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(tr, skipped, upstream_tasks_state, task.upstream_task_ids)) else: yield self._failing_status( reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
Create the specified cgroup.
def _create_cgroup(self, path): """ Create the specified cgroup. :param path: The path of the cgroup to create. E.g. cpu/mygroup/mysubgroup :return: the Node associated with the created cgroup. :rtype: cgroupspy.nodes.Node """ node = trees.Tree().root path_split = path.split(os.sep) for path_element in path_split: name_to_node = {x.name: x for x in node.children} if path_element not in name_to_node: self.log.debug("Creating cgroup %s in %s", path_element, node.path) node = node.create_cgroup(path_element) else: self.log.debug( "Not creating cgroup %s in %s since it already exists", path_element, node.path ) node = name_to_node[path_element] return node
Delete the specified cgroup.
def _delete_cgroup(self, path): """ Delete the specified cgroup. :param path: The path of the cgroup to delete. E.g. cpu/mygroup/mysubgroup """ node = trees.Tree().root path_split = path.split("/") for path_element in path_split: name_to_node = {x.name: x for x in node.children} if path_element not in name_to_node: self.log.warning("Cgroup does not exist: %s", path) return else: node = name_to_node[path_element] # node is now the leaf node parent = node.parent self.log.debug("Deleting cgroup %s/%s", parent, node.name) parent.delete_cgroup(node.name)
: return: a mapping between the subsystem name to the cgroup name: rtype: dict [ str str ]
def _get_cgroup_names(): """ :return: a mapping between the subsystem name to the cgroup name :rtype: dict[str, str] """ with open("/proc/self/cgroup") as f: lines = f.readlines() d = {} for line in lines: line_split = line.rstrip().split(":") subsystem = line_split[1] group_name = line_split[2] d[subsystem] = group_name return d
The purpose of this function is to be robust to improper connections settings provided by users specifically in the host field.
def _parse_host(host): """ The purpose of this function is to be robust to improper connections settings provided by users, specifically in the host field. For example -- when users supply ``https://xx.cloud.databricks.com`` as the host, we must strip out the protocol to get the host.:: h = DatabricksHook() assert h._parse_host('https://xx.cloud.databricks.com') == \ 'xx.cloud.databricks.com' In the case where users supply the correct ``xx.cloud.databricks.com`` as the host, this function is a no-op.:: assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com' """ urlparse_host = urlparse.urlparse(host).hostname if urlparse_host: # In this case, host = https://xx.cloud.databricks.com return urlparse_host else: # In this case, host = xx.cloud.databricks.com return host
Utility function to perform an API call with retries
def _do_api_call(self, endpoint_info, json): """ Utility function to perform an API call with retries :param endpoint_info: Tuple of method and endpoint :type endpoint_info: tuple[string, string] :param json: Parameters for this API call. :type json: dict :return: If the api call returns a OK status code, this function returns the response in JSON. Otherwise, we throw an AirflowException. :rtype: dict """ method, endpoint = endpoint_info url = 'https://{host}/{endpoint}'.format( host=self._parse_host(self.databricks_conn.host), endpoint=endpoint) if 'token' in self.databricks_conn.extra_dejson: self.log.info('Using token auth.') auth = _TokenAuth(self.databricks_conn.extra_dejson['token']) else: self.log.info('Using basic auth.') auth = (self.databricks_conn.login, self.databricks_conn.password) if method == 'GET': request_func = requests.get elif method == 'POST': request_func = requests.post else: raise AirflowException('Unexpected HTTP Method: ' + method) attempt_num = 1 while True: try: response = request_func( url, json=json, auth=auth, headers=USER_AGENT_HEADER, timeout=self.timeout_seconds) response.raise_for_status() return response.json() except requests_exceptions.RequestException as e: if not _retryable_error(e): # In this case, the user probably made a mistake. # Don't retry. raise AirflowException('Response: {0}, Status Code: {1}'.format( e.response.content, e.response.status_code)) self._log_request_error(attempt_num, e) if attempt_num == self.retry_limit: raise AirflowException(('API requests to Databricks failed {} times. ' + 'Giving up.').format(self.retry_limit)) attempt_num += 1 sleep(self.retry_delay)
Sign into Salesforce only if we are not already signed in.
def get_conn(self): """ Sign into Salesforce, only if we are not already signed in. """ if not self.conn: connection = self.get_connection(self.conn_id) extras = connection.extra_dejson self.conn = Salesforce( username=connection.login, password=connection.password, security_token=extras['security_token'], instance_url=connection.host, sandbox=extras.get('sandbox', False) ) return self.conn
Make a query to Salesforce.
def make_query(self, query): """ Make a query to Salesforce. :param query: The query to make to Salesforce. :type query: str :return: The query result. :rtype: dict """ conn = self.get_conn() self.log.info("Querying for all objects") query_results = conn.query_all(query) self.log.info("Received results: Total size: %s; Done: %s", query_results['totalSize'], query_results['done']) return query_results
Get the description of an object from Salesforce. This description is the object s schema and some extra metadata that Salesforce stores for each object.
def describe_object(self, obj): """ Get the description of an object from Salesforce. This description is the object's schema and some extra metadata that Salesforce stores for each object. :param obj: The name of the Salesforce object that we are getting a description of. :type obj: str :return: the description of the Salesforce object. :rtype: dict """ conn = self.get_conn() return conn.__getattr__(obj).describe()
Get a list of all available fields for an object.
def get_available_fields(self, obj): """ Get a list of all available fields for an object. :param obj: The name of the Salesforce object that we are getting a description of. :type obj: str :return: the names of the fields. :rtype: list of str """ self.get_conn() obj_description = self.describe_object(obj) return [field['name'] for field in obj_description['fields']]
Get all instances of the object from Salesforce. For each model only get the fields specified in fields.
def get_object_from_salesforce(self, obj, fields): """ Get all instances of the `object` from Salesforce. For each model, only get the fields specified in fields. All we really do underneath the hood is run: SELECT <fields> FROM <obj>; :param obj: The object name to get from Salesforce. :type obj: str :param fields: The fields to get from the object. :type fields: iterable :return: all instances of the object from Salesforce. :rtype: dict """ query = "SELECT {} FROM {}".format(",".join(fields), obj) self.log.info("Making query to Salesforce: %s", query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])) return self.make_query(query)
Convert a column of a dataframe to UNIX timestamps if applicable
def _to_timestamp(cls, column): """ Convert a column of a dataframe to UNIX timestamps if applicable :param column: A Series object representing a column of a dataframe. :type column: pd.Series :return: a new series that maintains the same index as the original :rtype: pd.Series """ # try and convert the column to datetimes # the column MUST have a four digit year somewhere in the string # there should be a better way to do this, # but just letting pandas try and convert every column without a format # caused it to convert floats as well # For example, a column of integers # between 0 and 10 are turned into timestamps # if the column cannot be converted, # just return the original column untouched try: column = pd.to_datetime(column) except ValueError: log = LoggingMixin().log log.warning("Could not convert field to timestamps: %s", column.name) return column # now convert the newly created datetimes into timestamps # we have to be careful here # because NaT cannot be converted to a timestamp # so we have to return NaN converted = [] for value in column: try: converted.append(value.timestamp()) except (ValueError, AttributeError): converted.append(pd.np.NaN) return pd.Series(converted, index=column.index)
Write query results to file.
def write_object_to_file(self, query_results, filename, fmt="csv", coerce_to_timestamp=False, record_time_added=False): """ Write query results to file. Acceptable formats are: - csv: comma-separated-values file. This is the default format. - json: JSON array. Each element in the array is a different row. - ndjson: JSON array but each element is new-line delimited instead of comma delimited like in `json` This requires a significant amount of cleanup. Pandas doesn't handle output to CSV and json in a uniform way. This is especially painful for datetime types. Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps. By default, this function will try and leave all values as they are represented in Salesforce. You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC). This is can be greatly beneficial as it will make all of your datetime fields look the same, and makes it easier to work with in other database environments :param query_results: the results from a SQL query :type query_results: list of dict :param filename: the name of the file where the data should be dumped to :type filename: str :param fmt: the format you want the output in. Default: 'csv' :type fmt: str :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps. False if you want them to be left in the same format as they were in Salesforce. Leaving the value as False will result in datetimes being strings. Default: False :type coerce_to_timestamp: bool :param record_time_added: True if you want to add a Unix timestamp field to the resulting data that marks when the data was fetched from Salesforce. Default: False :type record_time_added: bool :return: the dataframe that gets written to the file. :rtype: pd.Dataframe """ fmt = fmt.lower() if fmt not in ['csv', 'json', 'ndjson']: raise ValueError("Format value is not recognized: {}".format(fmt)) # this line right here will convert all integers to floats # if there are any None/np.nan values in the column # that's because None/np.nan cannot exist in an integer column # we should write all of our timestamps as FLOATS in our final schema df = pd.DataFrame.from_records(query_results, exclude=["attributes"]) df.columns = [column.lower() for column in df.columns] # convert columns with datetime strings to datetimes # not all strings will be datetimes, so we ignore any errors that occur # we get the object's definition at this point and only consider # features that are DATE or DATETIME if coerce_to_timestamp and df.shape[0] > 0: # get the object name out of the query results # it's stored in the "attributes" dictionary # for each returned record object_name = query_results[0]['attributes']['type'] self.log.info("Coercing timestamps for: %s", object_name) schema = self.describe_object(object_name) # possible columns that can be converted to timestamps # are the ones that are either date or datetime types # strings are too general and we risk unintentional conversion possible_timestamp_cols = [ field['name'].lower() for field in schema['fields'] if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns ] df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp) if record_time_added: fetched_time = time.time() df["time_fetched_from_salesforce"] = fetched_time # write the CSV or JSON file depending on the option # NOTE: # datetimes here are an issue. # There is no good way to manage the difference # for to_json, the options are an epoch or a ISO string # but for to_csv, it will be a string output by datetime # For JSON we decided to output the epoch timestamp in seconds # (as is fairly standard for JavaScript) # And for csv, we do a string if fmt == "csv": # there are also a ton of newline objects that mess up our ability to write to csv # we remove these newlines so that the output is a valid CSV format self.log.info("Cleaning data and writing to CSV") possible_strings = df.columns[df.dtypes == "object"] df[possible_strings] = df[possible_strings].apply( lambda x: x.str.replace("\r\n", "").str.replace("\n", "") ) # write the dataframe df.to_csv(filename, index=False) elif fmt == "json": df.to_json(filename, "records", date_unit="s") elif fmt == "ndjson": df.to_json(filename, "records", lines=True, date_unit="s") return df
Read logs of given task instance and try_number from GCS. If failed read the log from task instance host machine.: param ti: task instance object: param try_number: task instance try_number to read logs from: param metadata: log metadata can be used for steaming log reading and auto - tailing.
def _read(self, ti, try_number, metadata=None): """ Read logs of given task instance and try_number from GCS. If failed, read the log from task instance host machine. :param ti: task instance object :param try_number: task instance try_number to read logs from :param metadata: log metadata, can be used for steaming log reading and auto-tailing. """ # Explicitly getting log relative path is necessary as the given # task instance might be different than task instance passed in # in set_context method. log_relative_path = self._render_filename(ti, try_number) remote_loc = os.path.join(self.remote_base, log_relative_path) try: remote_log = self.gcs_read(remote_loc) log = '*** Reading remote log from {}.\n{}\n'.format( remote_loc, remote_log) return log, {'end_of_log': True} except Exception as e: log = '*** Unable to read remote log from {}\n*** {}\n\n'.format( remote_loc, str(e)) self.log.error(log) local_log, metadata = super()._read(ti, try_number) log += local_log return log, metadata
Returns the log found at the remote_log_location.: param remote_log_location: the log s location in remote storage: type remote_log_location: str ( path )
def gcs_read(self, remote_log_location): """ Returns the log found at the remote_log_location. :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) """ bkt, blob = self.parse_gcs_url(remote_log_location) return self.hook.download(bkt, blob).decode('utf-8')
Writes the log to the remote_log_location. Fails silently if no hook was created.: param log: the log to write to the remote_log_location: type log: str: param remote_log_location: the log s location in remote storage: type remote_log_location: str ( path ): param append: if False any existing log file is overwritten. If True the new log is appended to any existing logs.: type append: bool
def gcs_write(self, log, remote_log_location, append=True): """ Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool """ if append: try: old_log = self.gcs_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log except Exception as e: if not hasattr(e, 'resp') or e.resp.get('status') != '404': log = '*** Previous log discarded: {}\n\n'.format(str(e)) + log try: bkt, blob = self.parse_gcs_url(remote_log_location) from tempfile import NamedTemporaryFile with NamedTemporaryFile(mode='w+') as tmpfile: tmpfile.write(log) # Force the file to be flushed, since we're doing the # upload from within the file context (it hasn't been # closed). tmpfile.flush() self.hook.upload(bkt, blob, tmpfile.name) except Exception as e: self.log.error('Could not write logs to %s: %s', remote_log_location, e)
Given a Google Cloud Storage URL ( gs:// <bucket >/ <blob > ) returns a tuple containing the corresponding bucket and blob.
def parse_gcs_url(gsurl): """ Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a tuple containing the corresponding bucket and blob. """ parsed_url = urlparse(gsurl) if not parsed_url.netloc: raise AirflowException('Please provide a bucket name') else: bucket = parsed_url.netloc blob = parsed_url.path.strip('/') return bucket, blob
Fetches PyMongo Client
def get_conn(self): """ Fetches PyMongo Client """ if self.client is not None: return self.client # Mongo Connection Options dict that is unpacked when passed to MongoClient options = self.extras # If we are using SSL disable requiring certs from specific hostname if options.get('ssl', False): options.update({'ssl_cert_reqs': CERT_NONE}) self.client = MongoClient(self.uri, **options) return self.client
Fetches a mongo collection object for querying.
def get_collection(self, mongo_collection, mongo_db=None): """ Fetches a mongo collection object for querying. Uses connection schema as DB unless specified. """ mongo_db = mongo_db if mongo_db is not None else self.connection.schema mongo_conn = self.get_conn() return mongo_conn.get_database(mongo_db).get_collection(mongo_collection)
Runs an aggregation pipeline and returns the results https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. aggregate https:// api. mongodb. com/ python/ current/ examples/ aggregation. html
def aggregate(self, mongo_collection, aggregate_query, mongo_db=None, **kwargs): """ Runs an aggregation pipeline and returns the results https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.aggregate https://api.mongodb.com/python/current/examples/aggregation.html """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.aggregate(aggregate_query, **kwargs)
Runs a mongo find query and returns the results https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. find
def find(self, mongo_collection, query, find_one=False, mongo_db=None, **kwargs): """ Runs a mongo find query and returns the results https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) if find_one: return collection.find_one(query, **kwargs) else: return collection.find(query, **kwargs)
Inserts a single document into a mongo collection https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. insert_one
def insert_one(self, mongo_collection, doc, mongo_db=None, **kwargs): """ Inserts a single document into a mongo collection https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_one """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.insert_one(doc, **kwargs)
Inserts many docs into a mongo collection. https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. insert_many
def insert_many(self, mongo_collection, docs, mongo_db=None, **kwargs): """ Inserts many docs into a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_many """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.insert_many(docs, **kwargs)
Updates a single document in a mongo collection. https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. update_one
def update_one(self, mongo_collection, filter_doc, update_doc, mongo_db=None, **kwargs): """ Updates a single document in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one :param mongo_collection: The name of the collection to update. :type mongo_collection: str :param filter_doc: A query that matches the documents to update. :type filter_doc: dict :param update_doc: The modifications to apply. :type update_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.update_one(filter_doc, update_doc, **kwargs)
Replaces a single document in a mongo collection. https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. replace_one
def replace_one(self, mongo_collection, doc, filter_doc=None, mongo_db=None, **kwargs): """ Replaces a single document in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.replace_one .. note:: If no ``filter_doc`` is given, it is assumed that the replacement document contain the ``_id`` field which is then used as filters. :param mongo_collection: The name of the collection to update. :type mongo_collection: str :param doc: The new document. :type doc: dict :param filter_doc: A query that matches the documents to replace. Can be omitted; then the _id field from doc will be used. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) if not filter_doc: filter_doc = {'_id': doc['_id']} return collection.replace_one(filter_doc, doc, **kwargs)
Replaces many documents in a mongo collection.
def replace_many(self, mongo_collection, docs, filter_docs=None, mongo_db=None, upsert=False, collation=None, **kwargs): """ Replaces many documents in a mongo collection. Uses bulk_write with multiple ReplaceOne operations https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write .. note:: If no ``filter_docs``are given, it is assumed that all replacement documents contain the ``_id`` field which are then used as filters. :param mongo_collection: The name of the collection to update. :type mongo_collection: str :param docs: The new documents. :type docs: list[dict] :param filter_docs: A list of queries that match the documents to replace. Can be omitted; then the _id fields from docs will be used. :type filter_docs: list[dict] :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str :param upsert: If ``True``, perform an insert if no documents match the filters for the replace operation. :type upsert: bool :param collation: An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. :type collation: pymongo.collation.Collation """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) if not filter_docs: filter_docs = [{'_id': doc['_id']} for doc in docs] requests = [ ReplaceOne( filter_docs[i], docs[i], upsert=upsert, collation=collation) for i in range(len(docs)) ] return collection.bulk_write(requests, **kwargs)
Deletes a single document in a mongo collection. https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. delete_one
def delete_one(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ Deletes a single document in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_one :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the document to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_one(filter_doc, **kwargs)
Deletes one or more documents in a mongo collection. https:// api. mongodb. com/ python/ current/ api/ pymongo/ collection. html#pymongo. collection. Collection. delete_many
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ Deletes one or more documents in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the documents to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_many(filter_doc, **kwargs)
Checks the mail folder for mails containing attachments with the given name.
def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False): """ Checks the mail folder for mails containing attachments with the given name. :param name: The name of the attachment that will be searched for. :type name: str :param mail_folder: The mail folder where to look at. :type mail_folder: str :param check_regex: Checks the name for a regular expression. :type check_regex: bool :returns: True if there is an attachment with the given name and False if not. :rtype: bool """ mail_attachments = self._retrieve_mails_attachments_by_name(name, mail_folder, check_regex, latest_only=True) return len(mail_attachments) > 0
Retrieves mail s attachments in the mail folder by its name.
def retrieve_mail_attachments(self, name, mail_folder='INBOX', check_regex=False, latest_only=False, not_found_mode='raise'): """ Retrieves mail's attachments in the mail folder by its name. :param name: The name of the attachment that will be downloaded. :type name: str :param mail_folder: The mail folder where to look at. :type mail_folder: str :param check_regex: Checks the name for a regular expression. :type check_regex: bool :param latest_only: If set to True it will only retrieve the first matched attachment. :type latest_only: bool :param not_found_mode: Specify what should happen if no attachment has been found. Supported values are 'raise', 'warn' and 'ignore'. If it is set to 'raise' it will raise an exception, if set to 'warn' it will only print a warning and if set to 'ignore' it won't notify you at all. :type not_found_mode: str :returns: a list of tuple each containing the attachment filename and its payload. :rtype: a list of tuple """ mail_attachments = self._retrieve_mails_attachments_by_name(name, mail_folder, check_regex, latest_only) if not mail_attachments: self._handle_not_found_mode(not_found_mode) return mail_attachments
Downloads mail s attachments in the mail folder by its name to the local directory.
def download_mail_attachments(self, name, local_output_directory, mail_folder='INBOX', check_regex=False, latest_only=False, not_found_mode='raise'): """ Downloads mail's attachments in the mail folder by its name to the local directory. :param name: The name of the attachment that will be downloaded. :type name: str :param local_output_directory: The output directory on the local machine where the files will be downloaded to. :type local_output_directory: str :param mail_folder: The mail folder where to look at. :type mail_folder: str :param check_regex: Checks the name for a regular expression. :type check_regex: bool :param latest_only: If set to True it will only download the first matched attachment. :type latest_only: bool :param not_found_mode: Specify what should happen if no attachment has been found. Supported values are 'raise', 'warn' and 'ignore'. If it is set to 'raise' it will raise an exception, if set to 'warn' it will only print a warning and if set to 'ignore' it won't notify you at all. :type not_found_mode: str """ mail_attachments = self._retrieve_mails_attachments_by_name(name, mail_folder, check_regex, latest_only) if not mail_attachments: self._handle_not_found_mode(not_found_mode) self._create_files(mail_attachments, local_output_directory)
Gets all attachments by name for the mail.
def get_attachments_by_name(self, name, check_regex, find_first=False): """ Gets all attachments by name for the mail. :param name: The name of the attachment to look for. :type name: str :param check_regex: Checks the name for a regular expression. :type check_regex: bool :param find_first: If set to True it will only find the first match and then quit. :type find_first: bool :returns: a list of tuples each containing name and payload where the attachments name matches the given name. :rtype: list of tuple """ attachments = [] for part in self.mail.walk(): mail_part = MailPart(part) if mail_part.is_attachment(): found_attachment = mail_part.has_matching_name(name) if check_regex \ else mail_part.has_equal_name(name) if found_attachment: file_name, file_payload = mail_part.get_file() self.log.info('Found attachment: {}'.format(file_name)) attachments.append((file_name, file_payload)) if find_first: break return attachments
Gets the file including name and payload.
def get_file(self): """ Gets the file including name and payload. :returns: the part's name and payload. :rtype: tuple """ return self.part.get_filename(), self.part.get_payload(decode=True)
Write batch records to Kinesis Firehose
def put_records(self, records): """ Write batch records to Kinesis Firehose """ firehose_conn = self.get_conn() response = firehose_conn.put_record_batch( DeliveryStreamName=self.delivery_stream, Records=records ) return response
Determines whether a task is ready to be rescheduled. Only tasks in NONE state with at least one row in task_reschedule table are handled by this dependency class otherwise this dependency is considered as passed. This dependency fails if the latest reschedule request s reschedule date is still in future.
def _get_dep_statuses(self, ti, session, dep_context): """ Determines whether a task is ready to be rescheduled. Only tasks in NONE state with at least one row in task_reschedule table are handled by this dependency class, otherwise this dependency is considered as passed. This dependency fails if the latest reschedule request's reschedule date is still in future. """ if dep_context.ignore_in_reschedule_period: yield self._passing_status( reason="The context specified that being in a reschedule period was " "permitted.") return if ti.state not in self.RESCHEDULEABLE_STATES: yield self._passing_status( reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.") return task_reschedules = TaskReschedule.find_for_task_instance(task_instance=ti) if not task_reschedules: yield self._passing_status( reason="There is no reschedule request for this task instance.") return now = timezone.utcnow() next_reschedule_date = task_reschedules[-1].reschedule_date if now >= next_reschedule_date: yield self._passing_status( reason="Task instance id ready for reschedule.") return yield self._failing_status( reason="Task is not ready for reschedule yet but will be rescheduled " "automatically. Current date is {0} and task will be rescheduled " "at {1}.".format(now.isoformat(), next_reschedule_date.isoformat()))
Send email using backend specified in EMAIL_BACKEND.
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', mime_charset='utf-8', **kwargs): """ Send email using backend specified in EMAIL_BACKEND. """ path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1) module = importlib.import_module(path) backend = getattr(module, attr) to = get_email_address_list(to) to = ", ".join(to) return backend(to, subject, html_content, files=files, dryrun=dryrun, cc=cc, bcc=bcc, mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs)
Send an email with html content
def send_email_smtp(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', mime_charset='utf-8', **kwargs): """ Send an email with html content >>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True) """ smtp_mail_from = configuration.conf.get('smtp', 'SMTP_MAIL_FROM') to = get_email_address_list(to) msg = MIMEMultipart(mime_subtype) msg['Subject'] = subject msg['From'] = smtp_mail_from msg['To'] = ", ".join(to) recipients = to if cc: cc = get_email_address_list(cc) msg['CC'] = ", ".join(cc) recipients = recipients + cc if bcc: # don't add bcc in header bcc = get_email_address_list(bcc) recipients = recipients + bcc msg['Date'] = formatdate(localtime=True) mime_text = MIMEText(html_content, 'html', mime_charset) msg.attach(mime_text) for fname in files or []: basename = os.path.basename(fname) with open(fname, "rb") as f: part = MIMEApplication( f.read(), Name=basename ) part['Content-Disposition'] = 'attachment; filename="%s"' % basename part['Content-ID'] = '<%s>' % basename msg.attach(part) send_MIME_email(smtp_mail_from, recipients, msg, dryrun)
Processes DateTimes from the DB making sure it is always returning UTC. Not using timezone. convert_to_utc as that converts to configured TIMEZONE while the DB might be running with some other setting. We assume UTC datetimes in the database.
def process_result_value(self, value, dialect): """ Processes DateTimes from the DB making sure it is always returning UTC. Not using timezone.convert_to_utc as that converts to configured TIMEZONE while the DB might be running with some other setting. We assume UTC datetimes in the database. """ if value is not None: if value.tzinfo is None: value = value.replace(tzinfo=utc) else: value = value.astimezone(utc) return value
Check if a blob exists on Azure Blob Storage.
def check_for_blob(self, container_name, blob_name, **kwargs): """ Check if a blob exists on Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.exists()` takes. :type kwargs: object :return: True if the blob exists, False otherwise. :rtype: bool """ return self.connection.exists(container_name, blob_name, **kwargs)
Check if a prefix exists on Azure Blob storage.
def check_for_prefix(self, container_name, prefix, **kwargs): """ Check if a prefix exists on Azure Blob storage. :param container_name: Name of the container. :type container_name: str :param prefix: Prefix of the blob. :type prefix: str :param kwargs: Optional keyword arguments that `BlockBlobService.list_blobs()` takes. :type kwargs: object :return: True if blobs matching the prefix exist, False otherwise. :rtype: bool """ matches = self.connection.list_blobs(container_name, prefix, num_results=1, **kwargs) return len(list(matches)) > 0
Upload a file to Azure Blob Storage.
def load_file(self, file_path, container_name, blob_name, **kwargs): """ Upload a file to Azure Blob Storage. :param file_path: Path to the file to load. :type file_path: str :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.create_blob_from_path()` takes. :type kwargs: object """ # Reorder the argument order from airflow.hooks.S3_hook.load_file. self.connection.create_blob_from_path(container_name, blob_name, file_path, **kwargs)
Upload a string to Azure Blob Storage.
def load_string(self, string_data, container_name, blob_name, **kwargs): """ Upload a string to Azure Blob Storage. :param string_data: String to load. :type string_data: str :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.create_blob_from_text()` takes. :type kwargs: object """ # Reorder the argument order from airflow.hooks.S3_hook.load_string. self.connection.create_blob_from_text(container_name, blob_name, string_data, **kwargs)
Download a file from Azure Blob Storage.
def get_file(self, file_path, container_name, blob_name, **kwargs): """ Download a file from Azure Blob Storage. :param file_path: Path to the file to download. :type file_path: str :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.create_blob_from_path()` takes. :type kwargs: object """ return self.connection.get_blob_to_path(container_name, blob_name, file_path, **kwargs)
Read a file from Azure Blob Storage and return as a string.
def read_file(self, container_name, blob_name, **kwargs): """ Read a file from Azure Blob Storage and return as a string. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.create_blob_from_path()` takes. :type kwargs: object """ return self.connection.get_blob_to_text(container_name, blob_name, **kwargs).content
Delete a file from Azure Blob Storage.
def delete_file(self, container_name, blob_name, is_prefix=False, ignore_if_missing=False, **kwargs): """ Delete a file from Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param is_prefix: If blob_name is a prefix, delete all matching files :type is_prefix: bool :param ignore_if_missing: if True, then return success even if the blob does not exist. :type ignore_if_missing: bool :param kwargs: Optional keyword arguments that `BlockBlobService.create_blob_from_path()` takes. :type kwargs: object """ if is_prefix: blobs_to_delete = [ blob.name for blob in self.connection.list_blobs( container_name, prefix=blob_name, **kwargs ) ] elif self.check_for_blob(container_name, blob_name): blobs_to_delete = [blob_name] else: blobs_to_delete = [] if not ignore_if_missing and len(blobs_to_delete) == 0: raise AirflowException('Blob(s) not found: {}'.format(blob_name)) for blob_uri in blobs_to_delete: self.log.info("Deleting blob: " + blob_uri) self.connection.delete_blob(container_name, blob_uri, delete_snapshots='include', **kwargs)
BACKPORT FROM PYTHON3 FTPLIB.
def mlsd(conn, path="", facts=None): """ BACKPORT FROM PYTHON3 FTPLIB. List a directory in a standardized format by using MLSD command (RFC-3659). If path is omitted the current directory is assumed. "facts" is a list of strings representing the type of information desired (e.g. ["type", "size", "perm"]). Return a generator object yielding a tuple of two elements for every file found in path. First element is the file name, the second one is a dictionary including a variable number of "facts" depending on the server and whether "facts" argument has been provided. """ facts = facts or [] if facts: conn.sendcmd("OPTS MLST " + ";".join(facts) + ";") if path: cmd = "MLSD %s" % path else: cmd = "MLSD" lines = [] conn.retrlines(cmd, lines.append) for line in lines: facts_found, _, name = line.rstrip(ftplib.CRLF).partition(' ') entry = {} for fact in facts_found[:-1].split(";"): key, _, value = fact.partition("=") entry[key.lower()] = value yield (name, entry)
Returns a FTP connection object
def get_conn(self): """ Returns a FTP connection object """ if self.conn is None: params = self.get_connection(self.ftp_conn_id) pasv = params.extra_dejson.get("passive", True) self.conn = ftplib.FTP(params.host, params.login, params.password) self.conn.set_pasv(pasv) return self.conn
Returns a dictionary of { filename: { attributes }} for all files on the remote system ( where the MLSD command is supported ).
def describe_directory(self, path): """ Returns a dictionary of {filename: {attributes}} for all files on the remote system (where the MLSD command is supported). :param path: full path to the remote directory :type path: str """ conn = self.get_conn() conn.cwd(path) try: # only works in Python 3 files = dict(conn.mlsd()) except AttributeError: files = dict(mlsd(conn)) return files
Returns a list of files on the remote system.
def list_directory(self, path, nlst=False): """ Returns a list of files on the remote system. :param path: full path to the remote directory to list :type path: str """ conn = self.get_conn() conn.cwd(path) files = conn.nlst() return files
Transfers the remote file to a local location.
def retrieve_file( self, remote_full_path, local_full_path_or_buffer, callback=None): """ Transfers the remote file to a local location. If local_full_path_or_buffer is a string path, the file will be put at that location; if it is a file-like buffer, the file will be written to the buffer but not closed. :param remote_full_path: full path to the remote file :type remote_full_path: str :param local_full_path_or_buffer: full path to the local file or a file-like buffer :type local_full_path_or_buffer: str or file-like buffer :param callback: callback which is called each time a block of data is read. if you do not use a callback, these blocks will be written to the file or buffer passed in. if you do pass in a callback, note that writing to a file or buffer will need to be handled inside the callback. [default: output_handle.write()] :type callback: callable :Example:: hook = FTPHook(ftp_conn_id='my_conn') remote_path = '/path/to/remote/file' local_path = '/path/to/local/file' # with a custom callback (in this case displaying progress on each read) def print_progress(percent_progress): self.log.info('Percent Downloaded: %s%%' % percent_progress) total_downloaded = 0 total_file_size = hook.get_size(remote_path) output_handle = open(local_path, 'wb') def write_to_file_with_progress(data): total_downloaded += len(data) output_handle.write(data) percent_progress = (total_downloaded / total_file_size) * 100 print_progress(percent_progress) hook.retrieve_file(remote_path, None, callback=write_to_file_with_progress) # without a custom callback data is written to the local_path hook.retrieve_file(remote_path, local_path) """ conn = self.get_conn() is_path = isinstance(local_full_path_or_buffer, basestring) # without a callback, default to writing to a user-provided file or # file-like buffer if not callback: if is_path: output_handle = open(local_full_path_or_buffer, 'wb') else: output_handle = local_full_path_or_buffer callback = output_handle.write else: output_handle = None remote_path, remote_file_name = os.path.split(remote_full_path) conn.cwd(remote_path) self.log.info('Retrieving file from FTP: %s', remote_full_path) conn.retrbinary('RETR %s' % remote_file_name, callback) self.log.info('Finished retrieving file from FTP: %s', remote_full_path) if is_path and output_handle: output_handle.close()
Transfers a local file to the remote location.
def store_file(self, remote_full_path, local_full_path_or_buffer): """ Transfers a local file to the remote location. If local_full_path_or_buffer is a string path, the file will be read from that location; if it is a file-like buffer, the file will be read from the buffer but not closed. :param remote_full_path: full path to the remote file :type remote_full_path: str :param local_full_path_or_buffer: full path to the local file or a file-like buffer :type local_full_path_or_buffer: str or file-like buffer """ conn = self.get_conn() is_path = isinstance(local_full_path_or_buffer, basestring) if is_path: input_handle = open(local_full_path_or_buffer, 'rb') else: input_handle = local_full_path_or_buffer remote_path, remote_file_name = os.path.split(remote_full_path) conn.cwd(remote_path) conn.storbinary('STOR %s' % remote_file_name, input_handle) if is_path: input_handle.close()
Rename a file.
def rename(self, from_name, to_name): """ Rename a file. :param from_name: rename file from name :param to_name: rename file to name """ conn = self.get_conn() return conn.rename(from_name, to_name)
Returns a datetime object representing the last time the file was modified
def get_mod_time(self, path): """ Returns a datetime object representing the last time the file was modified :param path: remote file path :type path: string """ conn = self.get_conn() ftp_mdtm = conn.sendcmd('MDTM ' + path) time_val = ftp_mdtm[4:] # time_val optionally has microseconds try: return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f") except ValueError: return datetime.datetime.strptime(time_val, '%Y%m%d%H%M%S')
Infers from the dates which dag runs need to be created and does so.: param dag: the dag to create dag runs for: param execution_dates: list of execution dates to evaluate: param state: the state to set the dag run to: param run_id_template: the template for run id to be with the execution date: return: newly created and existing dag runs for the execution dates supplied
def _create_dagruns(dag, execution_dates, state, run_id_template): """ Infers from the dates which dag runs need to be created and does so. :param dag: the dag to create dag runs for :param execution_dates: list of execution dates to evaluate :param state: the state to set the dag run to :param run_id_template:the template for run id to be with the execution date :return: newly created and existing dag runs for the execution dates supplied """ # find out if we need to create any dag runs drs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates) dates_to_create = list(set(execution_dates) - set([dr.execution_date for dr in drs])) for date in dates_to_create: dr = dag.create_dagrun( run_id=run_id_template.format(date.isoformat()), execution_date=date, start_date=timezone.utcnow(), external_trigger=False, state=state, ) drs.append(dr) return drs
Set the state of a task instance and if needed its relatives. Can set state for future tasks ( calculated from execution_date ) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule ( but it will as for subdag dag runs if needed ).: param task: the task from which to work. task. task. dag needs to be set: param execution_date: the execution date from which to start looking: param upstream: Mark all parents ( upstream tasks ): param downstream: Mark all siblings ( downstream tasks ) of task_id including SubDags: param future: Mark all future tasks on the interval of the dag up until last execution date.: param past: Retroactively mark all tasks starting from start_date of the DAG: param state: State to which the tasks need to be set: param commit: Commit tasks to be altered to the database: param session: database session: return: list of tasks that have been created and updated
def set_state(task, execution_date, upstream=False, downstream=False, future=False, past=False, state=State.SUCCESS, commit=False, session=None): """ Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from execution_date) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will as for subdag dag runs if needed). :param task: the task from which to work. task.task.dag needs to be set :param execution_date: the execution date from which to start looking :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated """ assert timezone.is_localized(execution_date) assert task.dag is not None dag = task.dag latest_execution_date = dag.latest_execution_date assert latest_execution_date is not None # determine date range of dag runs and tasks to consider end_date = latest_execution_date if future else execution_date if 'start_date' in dag.default_args: start_date = dag.default_args['start_date'] elif dag.start_date: start_date = dag.start_date else: start_date = execution_date start_date = execution_date if not past else start_date if dag.schedule_interval == '@once': dates = [start_date] else: dates = dag.date_range(start_date=start_date, end_date=end_date) # find relatives (siblings = downstream, parents = upstream) if needed task_ids = [task.task_id] if downstream: relatives = task.get_flat_relatives(upstream=False) task_ids += [t.task_id for t in relatives] if upstream: relatives = task.get_flat_relatives(upstream=True) task_ids += [t.task_id for t in relatives] # verify the integrity of the dag runs in case a task was added or removed # set the confirmed execution dates as they might be different # from what was provided confirmed_dates = [] drs = DagRun.find(dag_id=dag.dag_id, execution_date=dates) for dr in drs: dr.dag = dag dr.verify_integrity() confirmed_dates.append(dr.execution_date) # go through subdagoperators and create dag runs. We will only work # within the scope of the subdag. We wont propagate to the parent dag, # but we will propagate from parent to subdag. dags = [dag] sub_dag_ids = [] while len(dags) > 0: current_dag = dags.pop() for task_id in task_ids: if not current_dag.has_task(task_id): continue current_task = current_dag.get_task(task_id) if isinstance(current_task, SubDagOperator): # this works as a kind of integrity check # it creates missing dag runs for subdagoperators, # maybe this should be moved to dagrun.verify_integrity drs = _create_dagruns(current_task.subdag, execution_dates=confirmed_dates, state=State.RUNNING, run_id_template=BackfillJob.ID_FORMAT_PREFIX) for dr in drs: dr.dag = current_task.subdag dr.verify_integrity() if commit: dr.state = state session.merge(dr) dags.append(current_task.subdag) sub_dag_ids.append(current_task.subdag.dag_id) # now look for the task instances that are affected TI = TaskInstance # get all tasks of the main dag that will be affected by a state change qry_dag = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.execution_date.in_(confirmed_dates), TI.task_id.in_(task_ids)).filter( or_(TI.state.is_(None), TI.state != state) ) # get *all* tasks of the sub dags if len(sub_dag_ids) > 0: qry_sub_dag = session.query(TI).filter( TI.dag_id.in_(sub_dag_ids), TI.execution_date.in_(confirmed_dates)).filter( or_(TI.state.is_(None), TI.state != state) ) if commit: tis_altered = qry_dag.with_for_update().all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.with_for_update().all() for ti in tis_altered: ti.state = state else: tis_altered = qry_dag.all() if len(sub_dag_ids) > 0: tis_altered += qry_sub_dag.all() return tis_altered
Helper method that set dag run state in the DB.: param dag_id: dag_id of target dag run: param execution_date: the execution date from which to start looking: param state: target state: param session: database session
def _set_dag_run_state(dag_id, execution_date, state, session=None): """ Helper method that set dag run state in the DB. :param dag_id: dag_id of target dag run :param execution_date: the execution date from which to start looking :param state: target state :param session: database session """ DR = DagRun dr = session.query(DR).filter( DR.dag_id == dag_id, DR.execution_date == execution_date ).one() dr.state = state if state == State.RUNNING: dr.start_date = timezone.utcnow() dr.end_date = None else: dr.end_date = timezone.utcnow() session.merge(dr)
Set the dag run for a specific execution date and its task instances to success.: param dag: the DAG of which to alter state: param execution_date: the execution date from which to start looking: param commit: commit DAG and tasks to be altered to the database: param session: database session: return: If commit is true list of tasks that have been updated otherwise list of tasks that will be updated: raises: AssertionError if dag or execution_date is invalid
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None): """ Set the dag run for a specific execution date and its task instances to success. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: AssertionError if dag or execution_date is invalid """ res = [] if not dag or not execution_date: return res # Mark the dag run to success. if commit: _set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session) # Mark all task instances of the dag run to success. for task in dag.tasks: task.dag = dag new_state = set_state(task=task, execution_date=execution_date, state=State.SUCCESS, commit=commit) res.extend(new_state) return res
Set the dag run for a specific execution date and its running task instances to failed.: param dag: the DAG of which to alter state: param execution_date: the execution date from which to start looking: param commit: commit DAG and tasks to be altered to the database: param session: database session: return: If commit is true list of tasks that have been updated otherwise list of tasks that will be updated: raises: AssertionError if dag or execution_date is invalid
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None): """ Set the dag run for a specific execution date and its running task instances to failed. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: AssertionError if dag or execution_date is invalid """ res = [] if not dag or not execution_date: return res # Mark the dag run to failed. if commit: _set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session) # Mark only RUNNING task instances. TI = TaskInstance task_ids = [task.task_id for task in dag.tasks] tis = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.execution_date == execution_date, TI.task_id.in_(task_ids)).filter(TI.state == State.RUNNING) task_ids_of_running_tis = [ti.task_id for ti in tis] for task in dag.tasks: if task.task_id not in task_ids_of_running_tis: continue task.dag = dag new_state = set_state(task=task, execution_date=execution_date, state=State.FAILED, commit=commit) res.extend(new_state) return res
Set the dag run for a specific execution date to running.: param dag: the DAG of which to alter state: param execution_date: the execution date from which to start looking: param commit: commit DAG and tasks to be altered to the database: param session: database session: return: If commit is true list of tasks that have been updated otherwise list of tasks that will be updated
def set_dag_run_state_to_running(dag, execution_date, commit=False, session=None): """ Set the dag run for a specific execution date to running. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated """ res = [] if not dag or not execution_date: return res # Mark the dag run to running. if commit: _set_dag_run_state(dag.dag_id, execution_date, State.RUNNING, session) # To keep the return type consistent with the other similar functions. return res
Return a version to identify the state of the underlying git repo. The version will indicate whether the head of the current git - backed working directory is tied to a release tag or not: it will indicate the former with a release: { version } prefix and the latter with a dev0 prefix. Following the prefix will be a sha of the current branch head. Finally a dirty suffix is appended to indicate that uncommitted changes are present.
def git_version(version): """ Return a version to identify the state of the underlying git repo. The version will indicate whether the head of the current git-backed working directory is tied to a release tag or not : it will indicate the former with a 'release:{version}' prefix and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes are present. """ repo = None try: import git repo = git.Repo('.git') except ImportError: logger.warning('gitpython not found: Cannot compute the git version.') return '' except Exception as e: logger.warning('Cannot compute the git version. {}'.format(e)) return '' if repo: sha = repo.head.commit.hexsha if repo.is_dirty(): return '.dev0+{sha}.dirty'.format(sha=sha) # commit is clean return '.release:{version}+{sha}'.format(version=version, sha=sha) else: return 'no_git_version'
Call the DiscordWebhookHook to post message
def execute(self, context): """ Call the DiscordWebhookHook to post message """ self.hook = DiscordWebhookHook( self.http_conn_id, self.webhook_endpoint, self.message, self.username, self.avatar_url, self.tts, self.proxy ) self.hook.execute()
Validates if field is OK.
def _validate_field(self, validation_spec, dictionary_to_validate, parent=None, force_optional=False): """ Validates if field is OK. :param validation_spec: specification of the field :type validation_spec: dict :param dictionary_to_validate: dictionary where the field should be present :type dictionary_to_validate: dict :param parent: full path of parent field :type parent: str :param force_optional: forces the field to be optional (all union fields have force_optional set to True) :type force_optional: bool :return: True if the field is present """ field_name = validation_spec['name'] field_type = validation_spec.get('type') optional = validation_spec.get('optional') regexp = validation_spec.get('regexp') allow_empty = validation_spec.get('allow_empty') children_validation_specs = validation_spec.get('fields') required_api_version = validation_spec.get('api_version') custom_validation = validation_spec.get('custom_validation') full_field_path = self._get_field_name_with_parent(field_name=field_name, parent=parent) if required_api_version and required_api_version != self._api_version: self.log.debug( "Skipping validation of the field '%s' for API version '%s' " "as it is only valid for API version '%s'", field_name, self._api_version, required_api_version) return False value = dictionary_to_validate.get(field_name) if (optional or force_optional) and value is None: self.log.debug("The optional field '%s' is missing. That's perfectly OK.", full_field_path) return False # Certainly down from here the field is present (value is not None) # so we should only return True from now on self._sanity_checks(children_validation_specs=children_validation_specs, field_type=field_type, full_field_path=full_field_path, regexp=regexp, allow_empty=allow_empty, custom_validation=custom_validation, value=value) if allow_empty is False: self._validate_is_empty(full_field_path, value) if regexp: self._validate_regexp(full_field_path, regexp, value) elif field_type == 'dict': if not isinstance(value, dict): raise GcpFieldValidationException( "The field '{}' should be of dictionary type according to the " "specification '{}' but it is '{}'". format(full_field_path, validation_spec, value)) if children_validation_specs is None: self.log.debug( "The dict field '%s' has no nested fields defined in the " "specification '%s'. That's perfectly ok - it's content will " "not be validated.", full_field_path, validation_spec) else: self._validate_dict(children_validation_specs, full_field_path, value) elif field_type == 'union': if not children_validation_specs: raise GcpValidationSpecificationException( "The union field '%s' has no nested fields " "defined in specification '%s'. Unions should have at least one " "nested field defined.", full_field_path, validation_spec) self._validate_union(children_validation_specs, full_field_path, dictionary_to_validate) elif field_type == 'list': if not isinstance(value, list): raise GcpFieldValidationException( "The field '{}' should be of list type according to the " "specification '{}' but it is '{}'". format(full_field_path, validation_spec, value)) elif custom_validation: try: custom_validation(value) except Exception as e: raise GcpFieldValidationException( "Error while validating custom field '{}' specified by '{}': '{}'". format(full_field_path, validation_spec, e)) elif field_type is None: self.log.debug("The type of field '%s' is not specified in '%s'. " "Not validating its content.", full_field_path, validation_spec) else: raise GcpValidationSpecificationException( "The field '{}' is of type '{}' in specification '{}'." "This type is unknown to validation!".format( full_field_path, field_type, validation_spec)) return True
Validates if the body ( dictionary ) follows specification that the validator was instantiated with. Raises ValidationSpecificationException or ValidationFieldException in case of problems with specification or the body not conforming to the specification respectively.
def validate(self, body_to_validate): """ Validates if the body (dictionary) follows specification that the validator was instantiated with. Raises ValidationSpecificationException or ValidationFieldException in case of problems with specification or the body not conforming to the specification respectively. :param body_to_validate: body that must follow the specification :type body_to_validate: dict :return: None """ try: for validation_spec in self._validation_specs: self._validate_field(validation_spec=validation_spec, dictionary_to_validate=body_to_validate) except GcpFieldValidationException as e: raise GcpFieldValidationException( "There was an error when validating: body '{}': '{}'". format(body_to_validate, e)) all_field_names = [spec['name'] for spec in self._validation_specs if spec.get('type') != 'union' and spec.get('api_version') != self._api_version] all_union_fields = [spec for spec in self._validation_specs if spec.get('type') == 'union'] for union_field in all_union_fields: all_field_names.extend( [nested_union_spec['name'] for nested_union_spec in union_field['fields'] if nested_union_spec.get('type') != 'union' and nested_union_spec.get('api_version') != self._api_version]) for field_name in body_to_validate.keys(): if field_name not in all_field_names: self.log.warning( "The field '%s' is in the body, but is not specified in the " "validation specification '%s'. " "This might be because you are using newer API version and " "new field names defined for that version. Then the warning " "can be safely ignored, or you might want to upgrade the operator" "to the version that supports the new API version.", field_name, self._validation_specs)
Return the FileService object.
def get_conn(self): """Return the FileService object.""" conn = self.get_connection(self.conn_id) service_options = conn.extra_dejson return FileService(account_name=conn.login, account_key=conn.password, **service_options)
Check if a directory exists on Azure File Share.
def check_for_directory(self, share_name, directory_name, **kwargs): """ Check if a directory exists on Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param kwargs: Optional keyword arguments that `FileService.exists()` takes. :type kwargs: object :return: True if the file exists, False otherwise. :rtype: bool """ return self.connection.exists(share_name, directory_name, **kwargs)
Check if a file exists on Azure File Share.
def check_for_file(self, share_name, directory_name, file_name, **kwargs): """ Check if a file exists on Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.exists()` takes. :type kwargs: object :return: True if the file exists, False otherwise. :rtype: bool """ return self.connection.exists(share_name, directory_name, file_name, **kwargs)
Return the list of directories and files stored on a Azure File Share.
def list_directories_and_files(self, share_name, directory_name=None, **kwargs): """ Return the list of directories and files stored on a Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param kwargs: Optional keyword arguments that `FileService.list_directories_and_files()` takes. :type kwargs: object :return: A list of files and directories :rtype: list """ return self.connection.list_directories_and_files(share_name, directory_name, **kwargs)
Create a new directory on a Azure File Share.
def create_directory(self, share_name, directory_name, **kwargs): """ Create a new directory on a Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param kwargs: Optional keyword arguments that `FileService.create_directory()` takes. :type kwargs: object :return: A list of files and directories :rtype: list """ return self.connection.create_directory(share_name, directory_name, **kwargs)
Download a file from Azure File Share.
def get_file(self, file_path, share_name, directory_name, file_name, **kwargs): """ Download a file from Azure File Share. :param file_path: Where to store the file. :type file_path: str :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.get_file_to_path()` takes. :type kwargs: object """ self.connection.get_file_to_path(share_name, directory_name, file_name, file_path, **kwargs)
Download a file from Azure File Share.
def get_file_to_stream(self, stream, share_name, directory_name, file_name, **kwargs): """ Download a file from Azure File Share. :param stream: A filehandle to store the file to. :type stream: file-like object :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.get_file_to_stream()` takes. :type kwargs: object """ self.connection.get_file_to_stream(share_name, directory_name, file_name, stream, **kwargs)
Upload a file to Azure File Share.
def load_file(self, file_path, share_name, directory_name, file_name, **kwargs): """ Upload a file to Azure File Share. :param file_path: Path to the file to load. :type file_path: str :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.create_file_from_path()` takes. :type kwargs: object """ self.connection.create_file_from_path(share_name, directory_name, file_name, file_path, **kwargs)
Upload a string to Azure File Share.
def load_string(self, string_data, share_name, directory_name, file_name, **kwargs): """ Upload a string to Azure File Share. :param string_data: String to load. :type string_data: str :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.create_file_from_text()` takes. :type kwargs: object """ self.connection.create_file_from_text(share_name, directory_name, file_name, string_data, **kwargs)
Upload a stream to Azure File Share.
def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs): """ Upload a stream to Azure File Share. :param stream: Opened file/stream to upload as the file content. :type stream: file-like :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param count: Size of the stream in bytes :type count: int :param kwargs: Optional keyword arguments that `FileService.create_file_from_stream()` takes. :type kwargs: object """ self.connection.create_file_from_stream(share_name, directory_name, file_name, stream, count, **kwargs)
Provide filename context to airflow task handler.: param filename: filename in which the dag is located
def set_context(self, filename): """ Provide filename context to airflow task handler. :param filename: filename in which the dag is located """ local_loc = self._init_file(filename) self.handler = logging.FileHandler(local_loc) self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level) if self._cur_date < datetime.today(): self._symlink_latest_log_directory() self._cur_date = datetime.today()
Create log file and directory if required.: param filename: task instance object: return: relative log path of the given task instance
def _init_file(self, filename): """ Create log file and directory if required. :param filename: task instance object :return: relative log path of the given task instance """ relative_path = self._render_filename(filename) full_path = os.path.join(self._get_log_directory(), relative_path) directory = os.path.dirname(full_path) if not os.path.exists(directory): try: os.makedirs(directory) except OSError: if not os.path.isdir(directory): raise if not os.path.exists(full_path): open(full_path, "a").close() return full_path
Given a Google Cloud Storage URL ( gs:// <bucket >/ <blob > ) returns a tuple containing the corresponding bucket and blob.
def _parse_gcs_url(gsurl): """ Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a tuple containing the corresponding bucket and blob. """ parsed_url = urlparse(gsurl) if not parsed_url.netloc: raise AirflowException('Please provide a bucket name') else: bucket = parsed_url.netloc # Remove leading '/' but NOT trailing one blob = parsed_url.path.lstrip('/') return bucket, blob