repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
apache/airflow
BackfillJob._set_unfinished_dag_runs_to_failed
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None): for dag_run in dag_runs: dag_run.update_state() if dag_run.state not in State.finished(): dag_run.set_state(State.FAILED) session.merge(dag_run)
Go through the dag_runs and update the state based on the task_instance state. Then set DAG runs that are not finished to failed.
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None): """ Go through the dag_runs and update the state based on the task_instance state. Then set DAG runs that are not finished to failed. :param dag_runs: DAG runs :param session: session :return: None """ for dag_run in dag_runs: dag_run.update_state() if dag_run.state not in State.finished(): dag_run.set_state(State.FAILED) session.merge(dag_run)
airflow/jobs.py
apache/airflow
BackfillJob._execute
def _execute(self, session=None): ti_status = BackfillJob._DagRunTaskStatus() start_date = self.bf_start_date run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date) if self.run_backwards: tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past] if tasks_that_depend_on_past: raise AirflowException( 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format( ",".join(tasks_that_depend_on_past))) run_dates = run_dates[::-1] if len(run_dates) == 0: self.log.info("No run dates were found for the given dates and dag interval.") return pickle_id = None if not self.donot_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle = DagPickle(self.dag) session.add(pickle) session.commit() pickle_id = pickle.id executor = self.executor executor.start() ti_status.total_runs = len(run_dates) try: remaining_dates = ti_status.total_runs while remaining_dates > 0: dates_to_process = [run_date for run_date in run_dates if run_date not in ti_status.executed_dag_run_dates] self._execute_for_run_dates(run_dates=dates_to_process, ti_status=ti_status, executor=executor, pickle_id=pickle_id, start_date=start_date, session=session) remaining_dates = ( ti_status.total_runs - len(ti_status.executed_dag_run_dates) ) err = self._collect_errors(ti_status=ti_status, session=session) if err: raise AirflowException(err) if remaining_dates > 0: self.log.info( "max_active_runs limit for dag %s has been reached " " - waiting for other dag runs to finish", self.dag_id ) time.sleep(self.delay_on_limit_secs) except (KeyboardInterrupt, SystemExit): self.log.warning("Backfill terminated by user.") self._set_unfinished_dag_runs_to_failed(ti_status.active_runs) finally: session.commit() executor.end() self.log.info("Backfill done. Exiting.")
Initializes all components required to run a dag for a specified date range and calls helper method to execute the tasks.
def _execute(self, session=None): """ Initializes all components required to run a dag for a specified date range and calls helper method to execute the tasks. """ ti_status = BackfillJob._DagRunTaskStatus() start_date = self.bf_start_date # Get intervals between the start/end dates, which will turn into dag runs run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date) if self.run_backwards: tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past] if tasks_that_depend_on_past: raise AirflowException( 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format( ",".join(tasks_that_depend_on_past))) run_dates = run_dates[::-1] if len(run_dates) == 0: self.log.info("No run dates were found for the given dates and dag interval.") return # picklin' pickle_id = None if not self.donot_pickle and self.executor.__class__ not in ( executors.LocalExecutor, executors.SequentialExecutor): pickle = DagPickle(self.dag) session.add(pickle) session.commit() pickle_id = pickle.id executor = self.executor executor.start() ti_status.total_runs = len(run_dates) # total dag runs in backfill try: remaining_dates = ti_status.total_runs while remaining_dates > 0: dates_to_process = [run_date for run_date in run_dates if run_date not in ti_status.executed_dag_run_dates] self._execute_for_run_dates(run_dates=dates_to_process, ti_status=ti_status, executor=executor, pickle_id=pickle_id, start_date=start_date, session=session) remaining_dates = ( ti_status.total_runs - len(ti_status.executed_dag_run_dates) ) err = self._collect_errors(ti_status=ti_status, session=session) if err: raise AirflowException(err) if remaining_dates > 0: self.log.info( "max_active_runs limit for dag %s has been reached " " - waiting for other dag runs to finish", self.dag_id ) time.sleep(self.delay_on_limit_secs) except (KeyboardInterrupt, SystemExit): self.log.warning("Backfill terminated by user.") # TODO: we will need to terminate running task instances and set the # state to failed. self._set_unfinished_dag_runs_to_failed(ti_status.active_runs) finally: session.commit() executor.end() self.log.info("Backfill done. Exiting.")
airflow/jobs.py
apache/airflow
LocalTaskJob.heartbeat_callback
def heartbeat_callback(self, session=None): if self.terminating: self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid() self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid) raise AirflowException("PID of job runner does not match") elif ( self.task_runner.return_code() is None and hasattr(self.task_runner, 'process') ): self.log.warning( "State of this instance has been externally set to %s. " "Taking the poison pill.", ti.state ) self.task_runner.terminate() self.terminating = True
Self destruct task if state has been moved away from running externally
def heartbeat_callback(self, session=None): """Self destruct task if state has been moved away from running externally""" if self.terminating: # ensure termination if processes are created later self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid() self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid) raise AirflowException("PID of job runner does not match") elif ( self.task_runner.return_code() is None and hasattr(self.task_runner, 'process') ): self.log.warning( "State of this instance has been externally set to %s. " "Taking the poison pill.", ti.state ) self.task_runner.terminate() self.terminating = True
airflow/jobs.py
apache/airflow
CloudSpannerHook._get_client
def _get_client(self, project_id): if not self._client: self._client = Client(project=project_id, credentials=self._get_credentials()) return self._client
Provides a client for interacting with the Cloud Spanner API.
def _get_client(self, project_id): """ Provides a client for interacting with the Cloud Spanner API. :param project_id: The ID of the GCP project. :type project_id: str :return: google.cloud.spanner_v1.client.Client :rtype: object """ if not self._client: self._client = Client(project=project_id, credentials=self._get_credentials()) return self._client
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.get_instance
def get_instance(self, instance_id, project_id=None): instance = self._get_client(project_id=project_id).instance(instance_id=instance_id) if not instance.exists(): return None return instance
Gets information about a particular instance.
def get_instance(self, instance_id, project_id=None): """ Gets information about a particular instance. :param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :return: google.cloud.spanner_v1.instance.Instance :rtype: object """ instance = self._get_client(project_id=project_id).instance(instance_id=instance_id) if not instance.exists(): return None return instance
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook._apply_to_instance
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count, display_name, func): instance = self._get_client(project_id=project_id).instance( instance_id=instance_id, configuration_name=configuration_name, node_count=node_count, display_name=display_name) try: operation = func(instance) except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result)
Invokes a method on a given instance by applying a specified Callable.
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count, display_name, func): """ Invokes a method on a given instance by applying a specified Callable. :param project_id: The ID of the GCP project that owns the Cloud Spanner database. :type project_id: str :param instance_id: The ID of the instance. :type instance_id: str :param configuration_name: Name of the instance configuration defining how the instance will be created. Required for instances which do not yet exist. :type configuration_name: str :param node_count: (Optional) Number of nodes allocated to the instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. :type display_name: str :param func: Method of the instance to be called. :type func: Callable """ # noinspection PyUnresolvedReferences instance = self._get_client(project_id=project_id).instance( instance_id=instance_id, configuration_name=configuration_name, node_count=node_count, display_name=display_name) try: operation = func(instance) # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result)
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.create_instance
def create_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): self._apply_to_instance(project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.create())
Creates a new Cloud Spanner instance.
def create_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): """ Creates a new Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param configuration_name: The name of the instance configuration defining how the instance will be created. Possible configuration values can be retrieved via https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list :type configuration_name: str :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the GCP Console. Must be between 4 and 30 characters. If this value is not set in the constructor, the name falls back to the instance ID. :type display_name: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ self._apply_to_instance(project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.create())
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.update_instance
def update_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): return self._apply_to_instance(project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.update())
Updates an existing Cloud Spanner instance.
def update_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): """ Updates an existing Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param configuration_name: The name of the instance configuration defining how the instance will be created. Possible configuration values can be retrieved via https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list :type configuration_name: str :param node_count: (Optional) The number of nodes allocated to the Cloud Spanner instance. :type node_count: int :param display_name: (Optional) The display name for the instance in the GCP Console. Must be between 4 and 30 characters. If this value is not set in the constructor, the name falls back to the instance ID. :type display_name: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ return self._apply_to_instance(project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.update())
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.delete_instance
def delete_instance(self, instance_id, project_id=None): instance = self._get_client(project_id=project_id).instance(instance_id) try: instance.delete() return except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e
Deletes an existing Cloud Spanner instance.
def delete_instance(self, instance_id, project_id=None): """ Deletes an existing Cloud Spanner instance. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None """ instance = self._get_client(project_id=project_id).instance(instance_id) try: instance.delete() return except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.get_database
def get_database(self, instance_id, database_id, project_id=None): instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): return None else: return database
Retrieves a database in Cloud Spanner. If the database does not exist in the specified instance, it returns None.
def get_database(self, instance_id, database_id, project_id=None): """ Retrieves a database in Cloud Spanner. If the database does not exist in the specified instance, it returns None. :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: Database object or None if database does not exist :rtype: google.cloud.spanner_v1.database.Database or None """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): return None else: return database
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.create_database
def create_database(self, instance_id, database_id, ddl_statements, project_id=None): instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id, ddl_statements=ddl_statements) try: operation = database.create() except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
Creates a new database in Cloud Spanner.
def create_database(self, instance_id, database_id, ddl_statements, project_id=None): """ Creates a new database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database to create in Cloud Spanner. :type database_id: str :param ddl_statements: The string list containing DDL for the new database. :type ddl_statements: list[str] :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: None """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id, ddl_statements=ddl_statements) try: operation = database.create() # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.update_database
def update_database(self, instance_id, database_id, ddl_statements, project_id=None, operation_id=None): instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) try: operation = database.update_ddl( ddl_statements=ddl_statements, operation_id=operation_id) if operation: result = operation.result() self.log.info(result) return except AlreadyExists as e: if e.code == 409 and operation_id in e.message: self.log.info("Replayed update_ddl message - the operation id %s " "was already done before.", operation_id) return except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e
Updates DDL of a database in Cloud Spanner.
def update_database(self, instance_id, database_id, ddl_statements, project_id=None, operation_id=None): """ Updates DDL of a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param ddl_statements: The string list containing DDL for the new database. :type ddl_statements: list[str] :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :param operation_id: (Optional) The unique per database operation ID that can be specified to implement idempotency check. :type operation_id: str :return: None """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) try: operation = database.update_ddl( ddl_statements=ddl_statements, operation_id=operation_id) if operation: result = operation.result() self.log.info(result) return except AlreadyExists as e: if e.code == 409 and operation_id in e.message: self.log.info("Replayed update_ddl message - the operation id %s " "was already done before.", operation_id) return except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
CloudSpannerHook.delete_database
def delete_database(self, instance_id, database_id, project_id=None): instance = self._get_client(project_id=project_id).\ instance(instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): self.log.info("The database {} is already deleted from instance {}. " "Exiting.".format(database_id, instance_id)) return try: operation = database.drop() except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
Drops a database in Cloud Spanner.
def delete_database(self, instance_id, database_id, project_id=None): """ Drops a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: True if everything succeeded :rtype: bool """ instance = self._get_client(project_id=project_id).\ instance(instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): self.log.info("The database {} is already deleted from instance {}. " "Exiting.".format(database_id, instance_id)) return try: operation = database.drop() # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
airflow/contrib/hooks/gcp_spanner_hook.py
apache/airflow
ImapAttachmentSensor.poke
def poke(self, context): self.log.info('Poking for %s', self.attachment_name) with ImapHook(imap_conn_id=self.conn_id) as imap_hook: return imap_hook.has_mail_attachment( name=self.attachment_name, mail_folder=self.mail_folder, check_regex=self.check_regex )
Pokes for a mail attachment on the mail server.
def poke(self, context): """ Pokes for a mail attachment on the mail server. :param context: The context that is being provided when poking. :type context: dict :return: True if attachment with the given name is present and False if not. :rtype: bool """ self.log.info('Poking for %s', self.attachment_name) with ImapHook(imap_conn_id=self.conn_id) as imap_hook: return imap_hook.has_mail_attachment( name=self.attachment_name, mail_folder=self.mail_folder, check_regex=self.check_regex )
airflow/contrib/sensors/imap_attachment_sensor.py
apache/airflow
prepare_additional_parameters
def prepare_additional_parameters(additional_properties, language_hints, web_detection_params): if language_hints is None and web_detection_params is None: return additional_properties if additional_properties is None: return {} merged_additional_parameters = deepcopy(additional_properties) if 'image_context' not in merged_additional_parameters: merged_additional_parameters['image_context'] = {} merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[ 'image_context' ].get('language_hints', language_hints) merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[ 'image_context' ].get('web_detection_params', web_detection_params) return merged_additional_parameters
Creates additional_properties parameter based on language_hints, web_detection_params and additional_properties parameters specified by the user
def prepare_additional_parameters(additional_properties, language_hints, web_detection_params): """ Creates additional_properties parameter based on language_hints, web_detection_params and additional_properties parameters specified by the user """ if language_hints is None and web_detection_params is None: return additional_properties if additional_properties is None: return {} merged_additional_parameters = deepcopy(additional_properties) if 'image_context' not in merged_additional_parameters: merged_additional_parameters['image_context'] = {} merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[ 'image_context' ].get('language_hints', language_hints) merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[ 'image_context' ].get('web_detection_params', web_detection_params) return merged_additional_parameters
airflow/contrib/operators/gcp_vision_operator.py
apache/airflow
CassandraHook.table_exists
def table_exists(self, table): keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) cluster_metadata = self.get_conn().cluster.metadata return (keyspace in cluster_metadata.keyspaces and table in cluster_metadata.keyspaces[keyspace].tables)
Checks if a table exists in Cassandra
def table_exists(self, table): """ Checks if a table exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str """ keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) cluster_metadata = self.get_conn().cluster.metadata return (keyspace in cluster_metadata.keyspaces and table in cluster_metadata.keyspaces[keyspace].tables)
airflow/contrib/hooks/cassandra_hook.py
apache/airflow
CassandraHook.record_exists
def record_exists(self, table, keys): keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys()) cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format( keyspace=keyspace, table=table, keys=ks) try: rs = self.get_conn().execute(cql, keys) return rs.one() is not None except Exception: return False
Checks if a record exists in Cassandra
def record_exists(self, table, keys): """ Checks if a record exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str :param keys: The keys and their values to check the existence. :type keys: dict """ keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys()) cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format( keyspace=keyspace, table=table, keys=ks) try: rs = self.get_conn().execute(cql, keys) return rs.one() is not None except Exception: return False
airflow/contrib/hooks/cassandra_hook.py
apache/airflow
SparkSubmitHook._build_track_driver_status_command
def _build_track_driver_status_command(self): connection_cmd = self._get_spark_binary_path() connection_cmd += ["--master", self._connection['master']] if self._driver_id: connection_cmd += ["--status", self._driver_id] else: raise AirflowException( "Invalid status: attempted to poll driver " + "status but no driver id is known. Giving up.") self.log.debug("Poll driver status cmd: %s", connection_cmd) return connection_cmd
Construct the command to poll the driver status.
def _build_track_driver_status_command(self): """ Construct the command to poll the driver status. :return: full command to be executed """ connection_cmd = self._get_spark_binary_path() # The url ot the spark master connection_cmd += ["--master", self._connection['master']] # The driver id so we can poll for its status if self._driver_id: connection_cmd += ["--status", self._driver_id] else: raise AirflowException( "Invalid status: attempted to poll driver " + "status but no driver id is known. Giving up.") self.log.debug("Poll driver status cmd: %s", connection_cmd) return connection_cmd
airflow/contrib/hooks/spark_submit_hook.py
apache/airflow
SparkSubmitHook.submit
def submit(self, application="", **kwargs): spark_submit_cmd = self._build_spark_submit_command(application) if hasattr(self, '_env'): env = os.environ.copy() env.update(self._env) kwargs["env"] = env self._submit_sp = subprocess.Popen(spark_submit_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1, universal_newlines=True, **kwargs) self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, '')) returncode = self._submit_sp.wait() if returncode or (self._is_kubernetes and self._spark_exit_code != 0): raise AirflowException( "Cannot execute: {}. Error code is: {}.".format( spark_submit_cmd, returncode ) ) self.log.debug("Should track driver: {}".format(self._should_track_driver_status)) if self._should_track_driver_status: if self._driver_id is None: raise AirflowException( "No driver id is known: something went wrong when executing " + "the spark submit command" ) self._driver_status = "SUBMITTED" self._start_driver_status_tracking() if self._driver_status != "FINISHED": raise AirflowException( "ERROR : Driver {} badly exited with status {}" .format(self._driver_id, self._driver_status) )
Remote Popen to execute the spark-submit job
def submit(self, application="", **kwargs): """ Remote Popen to execute the spark-submit job :param application: Submitted application, jar or py file :type application: str :param kwargs: extra arguments to Popen (see subprocess.Popen) """ spark_submit_cmd = self._build_spark_submit_command(application) if hasattr(self, '_env'): env = os.environ.copy() env.update(self._env) kwargs["env"] = env self._submit_sp = subprocess.Popen(spark_submit_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1, universal_newlines=True, **kwargs) self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, '')) returncode = self._submit_sp.wait() # Check spark-submit return code. In Kubernetes mode, also check the value # of exit code in the log, as it may differ. if returncode or (self._is_kubernetes and self._spark_exit_code != 0): raise AirflowException( "Cannot execute: {}. Error code is: {}.".format( spark_submit_cmd, returncode ) ) self.log.debug("Should track driver: {}".format(self._should_track_driver_status)) # We want the Airflow job to wait until the Spark driver is finished if self._should_track_driver_status: if self._driver_id is None: raise AirflowException( "No driver id is known: something went wrong when executing " + "the spark submit command" ) # We start with the SUBMITTED status as initial status self._driver_status = "SUBMITTED" # Start tracking the driver status (blocking function) self._start_driver_status_tracking() if self._driver_status != "FINISHED": raise AirflowException( "ERROR : Driver {} badly exited with status {}" .format(self._driver_id, self._driver_status) )
airflow/contrib/hooks/spark_submit_hook.py
apache/airflow
SparkSubmitHook._process_spark_submit_log
def _process_spark_submit_log(self, itr): for line in itr: line = line.strip() if self._is_yarn and self._connection['deploy_mode'] == 'cluster': match = re.search('(application[0-9_]+)', line) if match: self._yarn_application_id = match.groups()[0] self.log.info("Identified spark driver id: %s", self._yarn_application_id) elif self._is_kubernetes: match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line) if match: self._kubernetes_driver_pod = match.groups()[0] self.log.info("Identified spark driver pod: %s", self._kubernetes_driver_pod) match_exit_code = re.search(r'\s*Exit code: (\d+)', line) if match_exit_code: self._spark_exit_code = int(match_exit_code.groups()[0]) elif self._should_track_driver_status and not self._driver_id: match_driver_id = re.search(r'(driver-[0-9\-]+)', line) if match_driver_id: self._driver_id = match_driver_id.groups()[0] self.log.info("identified spark driver id: {}" .format(self._driver_id)) else: self.log.info(line) self.log.debug("spark submit log: {}".format(line))
Processes the log files and extracts useful information out of it. If the deploy-mode is 'client', log the output of the submit command as those are the output logs of the Spark worker directly.
def _process_spark_submit_log(self, itr): """ Processes the log files and extracts useful information out of it. If the deploy-mode is 'client', log the output of the submit command as those are the output logs of the Spark worker directly. Remark: If the driver needs to be tracked for its status, the log-level of the spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO) :param itr: An iterator which iterates over the input of the subprocess """ # Consume the iterator for line in itr: line = line.strip() # If we run yarn cluster mode, we want to extract the application id from # the logs so we can kill the application when we stop it unexpectedly if self._is_yarn and self._connection['deploy_mode'] == 'cluster': match = re.search('(application[0-9_]+)', line) if match: self._yarn_application_id = match.groups()[0] self.log.info("Identified spark driver id: %s", self._yarn_application_id) # If we run Kubernetes cluster mode, we want to extract the driver pod id # from the logs so we can kill the application when we stop it unexpectedly elif self._is_kubernetes: match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line) if match: self._kubernetes_driver_pod = match.groups()[0] self.log.info("Identified spark driver pod: %s", self._kubernetes_driver_pod) # Store the Spark Exit code match_exit_code = re.search(r'\s*Exit code: (\d+)', line) if match_exit_code: self._spark_exit_code = int(match_exit_code.groups()[0]) # if we run in standalone cluster mode and we want to track the driver status # we need to extract the driver id from the logs. This allows us to poll for # the status using the driver id. Also, we can kill the driver when needed. elif self._should_track_driver_status and not self._driver_id: match_driver_id = re.search(r'(driver-[0-9\-]+)', line) if match_driver_id: self._driver_id = match_driver_id.groups()[0] self.log.info("identified spark driver id: {}" .format(self._driver_id)) else: self.log.info(line) self.log.debug("spark submit log: {}".format(line))
airflow/contrib/hooks/spark_submit_hook.py
apache/airflow
SparkSubmitHook._process_spark_status_log
def _process_spark_status_log(self, itr): for line in itr: line = line.strip() if "driverState" in line: self._driver_status = line.split(' : ')[1] \ .replace(',', '').replace('\"', '').strip() self.log.debug("spark driver status log: {}".format(line))
parses the logs of the spark driver status query process
def _process_spark_status_log(self, itr): """ parses the logs of the spark driver status query process :param itr: An iterator which iterates over the input of the subprocess """ # Consume the iterator for line in itr: line = line.strip() # Check if the log line is about the driver status and extract the status. if "driverState" in line: self._driver_status = line.split(' : ')[1] \ .replace(',', '').replace('\"', '').strip() self.log.debug("spark driver status log: {}".format(line))
airflow/contrib/hooks/spark_submit_hook.py
apache/airflow
get_task_runner
def get_task_runner(local_task_job): if _TASK_RUNNER == "StandardTaskRunner": return StandardTaskRunner(local_task_job) elif _TASK_RUNNER == "CgroupTaskRunner": from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner return CgroupTaskRunner(local_task_job) else: raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
Get the task runner that can be used to run the given job.
def get_task_runner(local_task_job): """ Get the task runner that can be used to run the given job. :param local_task_job: The LocalTaskJob associated with the TaskInstance that needs to be executed. :type local_task_job: airflow.jobs.LocalTaskJob :return: The task runner to use to run the task. :rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner """ if _TASK_RUNNER == "StandardTaskRunner": return StandardTaskRunner(local_task_job) elif _TASK_RUNNER == "CgroupTaskRunner": from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner return CgroupTaskRunner(local_task_job) else: raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
airflow/task/task_runner/__init__.py
apache/airflow
AWSBatchOperator._wait_for_task_ended
def _wait_for_task_ended(self): try: waiter = self.client.get_waiter('job_execution_complete') waiter.config.max_attempts = sys.maxsize waiter.wait(jobs=[self.jobId]) except ValueError: retry = True retries = 0 while retries < self.max_retries and retry: self.log.info('AWS Batch retry in the next %s seconds', retries) response = self.client.describe_jobs( jobs=[self.jobId] ) if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']: retry = False sleep(1 + pow(retries * 0.1, 2)) retries += 1
Try to use a waiter from the below pull request
def _wait_for_task_ended(self): """ Try to use a waiter from the below pull request * https://github.com/boto/botocore/pull/1307 If the waiter is not available apply a exponential backoff * docs.aws.amazon.com/general/latest/gr/api-retries.html """ try: waiter = self.client.get_waiter('job_execution_complete') waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow waiter.wait(jobs=[self.jobId]) except ValueError: # If waiter not available use expo retry = True retries = 0 while retries < self.max_retries and retry: self.log.info('AWS Batch retry in the next %s seconds', retries) response = self.client.describe_jobs( jobs=[self.jobId] ) if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']: retry = False sleep(1 + pow(retries * 0.1, 2)) retries += 1
airflow/contrib/operators/awsbatch_operator.py
apache/airflow
MySqlToGoogleCloudStorageOperator._query_mysql
def _query_mysql(self): mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id) conn = mysql.get_conn() cursor = conn.cursor() cursor.execute(self.sql) return cursor
Queries mysql and returns a cursor to the results.
def _query_mysql(self): """ Queries mysql and returns a cursor to the results. """ mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id) conn = mysql.get_conn() cursor = conn.cursor() cursor.execute(self.sql) return cursor
airflow/contrib/operators/mysql_to_gcs.py
apache/airflow
MySqlToGoogleCloudStorageOperator._configure_csv_file
def _configure_csv_file(self, file_handle, schema): csv_writer = csv.writer(file_handle, encoding='utf-8', delimiter=self.field_delimiter) csv_writer.writerow(schema) return csv_writer
Configure a csv writer with the file_handle and write schema as headers for the new file.
def _configure_csv_file(self, file_handle, schema): """Configure a csv writer with the file_handle and write schema as headers for the new file. """ csv_writer = csv.writer(file_handle, encoding='utf-8', delimiter=self.field_delimiter) csv_writer.writerow(schema) return csv_writer
airflow/contrib/operators/mysql_to_gcs.py
apache/airflow
MySqlToGoogleCloudStorageOperator._write_local_schema_file
def _write_local_schema_file(self, cursor): schema_str = None schema_file_mime_type = 'application/json' tmp_schema_file_handle = NamedTemporaryFile(delete=True) if self.schema is not None and isinstance(self.schema, string_types): schema_str = self.schema.encode('utf-8') elif self.schema is not None and isinstance(self.schema, list): schema_str = json.dumps(self.schema).encode('utf-8') else: schema = [] for field in cursor.description: field_name = field[0] field_type = self.type_map(field[1]) if field[6] or field_type == 'TIMESTAMP': field_mode = 'NULLABLE' else: field_mode = 'REQUIRED' schema.append({ 'name': field_name, 'type': field_type, 'mode': field_mode, }) schema_str = json.dumps(schema, sort_keys=True).encode('utf-8') tmp_schema_file_handle.write(schema_str) self.log.info('Using schema for %s: %s', self.schema_filename, schema_str) schema_file_to_upload = { 'file_name': self.schema_filename, 'file_handle': tmp_schema_file_handle, 'file_mime_type': schema_file_mime_type } return schema_file_to_upload
Takes a cursor, and writes the BigQuery schema in .json format for the results to a local file system.
def _write_local_schema_file(self, cursor): """ Takes a cursor, and writes the BigQuery schema in .json format for the results to a local file system. :return: A dictionary where key is a filename to be used as an object name in GCS, and values are file handles to local files that contains the BigQuery schema fields in .json format. """ schema_str = None schema_file_mime_type = 'application/json' tmp_schema_file_handle = NamedTemporaryFile(delete=True) if self.schema is not None and isinstance(self.schema, string_types): schema_str = self.schema.encode('utf-8') elif self.schema is not None and isinstance(self.schema, list): schema_str = json.dumps(self.schema).encode('utf-8') else: schema = [] for field in cursor.description: # See PEP 249 for details about the description tuple. field_name = field[0] field_type = self.type_map(field[1]) # Always allow TIMESTAMP to be nullable. MySQLdb returns None types # for required fields because some MySQL timestamps can't be # represented by Python's datetime (e.g. 0000-00-00 00:00:00). if field[6] or field_type == 'TIMESTAMP': field_mode = 'NULLABLE' else: field_mode = 'REQUIRED' schema.append({ 'name': field_name, 'type': field_type, 'mode': field_mode, }) schema_str = json.dumps(schema, sort_keys=True).encode('utf-8') tmp_schema_file_handle.write(schema_str) self.log.info('Using schema for %s: %s', self.schema_filename, schema_str) schema_file_to_upload = { 'file_name': self.schema_filename, 'file_handle': tmp_schema_file_handle, 'file_mime_type': schema_file_mime_type } return schema_file_to_upload
airflow/contrib/operators/mysql_to_gcs.py
apache/airflow
MySqlToGoogleCloudStorageOperator._get_col_type_dict
def _get_col_type_dict(self): schema = [] if isinstance(self.schema, string_types): schema = json.loads(self.schema) elif isinstance(self.schema, list): schema = self.schema elif self.schema is not None: self.log.warn('Using default schema due to unexpected type.' 'Should be a string or list.') col_type_dict = {} try: col_type_dict = {col['name']: col['type'] for col in schema} except KeyError: self.log.warn('Using default schema due to missing name or type. Please ' 'refer to: https://cloud.google.com/bigquery/docs/schemas' '#specifying_a_json_schema_file') return col_type_dict
Return a dict of column name and column type based on self.schema if not None.
def _get_col_type_dict(self): """ Return a dict of column name and column type based on self.schema if not None. """ schema = [] if isinstance(self.schema, string_types): schema = json.loads(self.schema) elif isinstance(self.schema, list): schema = self.schema elif self.schema is not None: self.log.warn('Using default schema due to unexpected type.' 'Should be a string or list.') col_type_dict = {} try: col_type_dict = {col['name']: col['type'] for col in schema} except KeyError: self.log.warn('Using default schema due to missing name or type. Please ' 'refer to: https://cloud.google.com/bigquery/docs/schemas' '#specifying_a_json_schema_file') return col_type_dict
airflow/contrib/operators/mysql_to_gcs.py
apache/airflow
MySqlToGoogleCloudStorageOperator.type_map
def type_map(cls, mysql_type): d = { FIELD_TYPE.INT24: 'INTEGER', FIELD_TYPE.TINY: 'INTEGER', FIELD_TYPE.BIT: 'INTEGER', FIELD_TYPE.DATETIME: 'TIMESTAMP', FIELD_TYPE.DATE: 'TIMESTAMP', FIELD_TYPE.DECIMAL: 'FLOAT', FIELD_TYPE.NEWDECIMAL: 'FLOAT', FIELD_TYPE.DOUBLE: 'FLOAT', FIELD_TYPE.FLOAT: 'FLOAT', FIELD_TYPE.LONG: 'INTEGER', FIELD_TYPE.LONGLONG: 'INTEGER', FIELD_TYPE.SHORT: 'INTEGER', FIELD_TYPE.TIMESTAMP: 'TIMESTAMP', FIELD_TYPE.YEAR: 'INTEGER', } return d[mysql_type] if mysql_type in d else 'STRING'
Helper function that maps from MySQL fields to BigQuery fields. Used when a schema_filename is set.
def type_map(cls, mysql_type): """ Helper function that maps from MySQL fields to BigQuery fields. Used when a schema_filename is set. """ d = { FIELD_TYPE.INT24: 'INTEGER', FIELD_TYPE.TINY: 'INTEGER', FIELD_TYPE.BIT: 'INTEGER', FIELD_TYPE.DATETIME: 'TIMESTAMP', FIELD_TYPE.DATE: 'TIMESTAMP', FIELD_TYPE.DECIMAL: 'FLOAT', FIELD_TYPE.NEWDECIMAL: 'FLOAT', FIELD_TYPE.DOUBLE: 'FLOAT', FIELD_TYPE.FLOAT: 'FLOAT', FIELD_TYPE.LONG: 'INTEGER', FIELD_TYPE.LONGLONG: 'INTEGER', FIELD_TYPE.SHORT: 'INTEGER', FIELD_TYPE.TIMESTAMP: 'TIMESTAMP', FIELD_TYPE.YEAR: 'INTEGER', } return d[mysql_type] if mysql_type in d else 'STRING'
airflow/contrib/operators/mysql_to_gcs.py
apache/airflow
SqoopOperator.execute
def execute(self, context): self.hook = SqoopHook( conn_id=self.conn_id, verbose=self.verbose, num_mappers=self.num_mappers, hcatalog_database=self.hcatalog_database, hcatalog_table=self.hcatalog_table, properties=self.properties ) if self.cmd_type == 'export': self.hook.export_table( table=self.table, export_dir=self.export_dir, input_null_string=self.input_null_string, input_null_non_string=self.input_null_non_string, staging_table=self.staging_table, clear_staging_table=self.clear_staging_table, enclosed_by=self.enclosed_by, escaped_by=self.escaped_by, input_fields_terminated_by=self.input_fields_terminated_by, input_lines_terminated_by=self.input_lines_terminated_by, input_optionally_enclosed_by=self.input_optionally_enclosed_by, batch=self.batch, relaxed_isolation=self.relaxed_isolation, extra_export_options=self.extra_export_options) elif self.cmd_type == 'import': if self.create_hcatalog_table: self.extra_import_options['create-hcatalog-table'] = '' if self.table and self.query: raise AirflowException( 'Cannot specify query and table together. Need to specify either or.' ) if self.table: self.hook.import_table( table=self.table, target_dir=self.target_dir, append=self.append, file_type=self.file_type, columns=self.columns, split_by=self.split_by, where=self.where, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) elif self.query: self.hook.import_query( query=self.query, target_dir=self.target_dir, append=self.append, file_type=self.file_type, split_by=self.split_by, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) else: raise AirflowException( "Provide query or table parameter to import using Sqoop" ) else: raise AirflowException("cmd_type should be 'import' or 'export'")
Execute sqoop job
def execute(self, context): """ Execute sqoop job """ self.hook = SqoopHook( conn_id=self.conn_id, verbose=self.verbose, num_mappers=self.num_mappers, hcatalog_database=self.hcatalog_database, hcatalog_table=self.hcatalog_table, properties=self.properties ) if self.cmd_type == 'export': self.hook.export_table( table=self.table, export_dir=self.export_dir, input_null_string=self.input_null_string, input_null_non_string=self.input_null_non_string, staging_table=self.staging_table, clear_staging_table=self.clear_staging_table, enclosed_by=self.enclosed_by, escaped_by=self.escaped_by, input_fields_terminated_by=self.input_fields_terminated_by, input_lines_terminated_by=self.input_lines_terminated_by, input_optionally_enclosed_by=self.input_optionally_enclosed_by, batch=self.batch, relaxed_isolation=self.relaxed_isolation, extra_export_options=self.extra_export_options) elif self.cmd_type == 'import': # add create hcatalog table to extra import options if option passed # if new params are added to constructor can pass them in here # so don't modify sqoop_hook for each param if self.create_hcatalog_table: self.extra_import_options['create-hcatalog-table'] = '' if self.table and self.query: raise AirflowException( 'Cannot specify query and table together. Need to specify either or.' ) if self.table: self.hook.import_table( table=self.table, target_dir=self.target_dir, append=self.append, file_type=self.file_type, columns=self.columns, split_by=self.split_by, where=self.where, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) elif self.query: self.hook.import_query( query=self.query, target_dir=self.target_dir, append=self.append, file_type=self.file_type, split_by=self.split_by, direct=self.direct, driver=self.driver, extra_import_options=self.extra_import_options) else: raise AirflowException( "Provide query or table parameter to import using Sqoop" ) else: raise AirflowException("cmd_type should be 'import' or 'export'")
airflow/contrib/operators/sqoop_operator.py
apache/airflow
apply_lineage
def apply_lineage(func): backend = _get_backend() @wraps(func) def wrapper(self, context, *args, **kwargs): self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s", backend, self.inlets, self.outlets) ret_val = func(self, context, *args, **kwargs) outlets = [x.as_dict() for x in self.outlets] inlets = [x.as_dict() for x in self.inlets] if len(self.outlets) > 0: self.xcom_push(context, key=PIPELINE_OUTLETS, value=outlets, execution_date=context['ti'].execution_date) if len(self.inlets) > 0: self.xcom_push(context, key=PIPELINE_INLETS, value=inlets, execution_date=context['ti'].execution_date) if backend: backend.send_lineage(operator=self, inlets=self.inlets, outlets=self.outlets, context=context) return ret_val return wrapper
Saves the lineage to XCom and if configured to do so sends it to the backend.
def apply_lineage(func): """ Saves the lineage to XCom and if configured to do so sends it to the backend. """ backend = _get_backend() @wraps(func) def wrapper(self, context, *args, **kwargs): self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s", backend, self.inlets, self.outlets) ret_val = func(self, context, *args, **kwargs) outlets = [x.as_dict() for x in self.outlets] inlets = [x.as_dict() for x in self.inlets] if len(self.outlets) > 0: self.xcom_push(context, key=PIPELINE_OUTLETS, value=outlets, execution_date=context['ti'].execution_date) if len(self.inlets) > 0: self.xcom_push(context, key=PIPELINE_INLETS, value=inlets, execution_date=context['ti'].execution_date) if backend: backend.send_lineage(operator=self, inlets=self.inlets, outlets=self.outlets, context=context) return ret_val return wrapper
airflow/lineage/__init__.py
apache/airflow
date_range
def date_range(start_date, end_date=None, num=None, delta=None): if not delta: return [] if end_date and start_date > end_date: raise Exception("Wait. start_date needs to be before end_date") if end_date and num: raise Exception("Wait. Either specify end_date OR num") if not end_date and not num: end_date = timezone.utcnow() delta_iscron = False tz = start_date.tzinfo if isinstance(delta, six.string_types): delta_iscron = True start_date = timezone.make_naive(start_date, tz) cron = croniter(delta, start_date) elif isinstance(delta, timedelta): delta = abs(delta) dates = [] if end_date: if timezone.is_naive(start_date): end_date = timezone.make_naive(end_date, tz) while start_date <= end_date: if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: start_date = cron.get_next(datetime) else: start_date += delta else: for _ in range(abs(num)): if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: if num > 0: start_date = cron.get_next(datetime) else: start_date = cron.get_prev(datetime) else: if num > 0: start_date += delta else: start_date -= delta return sorted(dates)
Get a set of dates as a list based on a start, end and delta, delta can be something that can be added to `datetime.datetime` or a cron expression as a `str`
def date_range(start_date, end_date=None, num=None, delta=None): """ Get a set of dates as a list based on a start, end and delta, delta can be something that can be added to `datetime.datetime` or a cron expression as a `str` :Example:: date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1)) [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0), datetime.datetime(2016, 1, 3, 0, 0)] date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *') [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0), datetime.datetime(2016, 1, 3, 0, 0)] date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *") [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0), datetime.datetime(2016, 3, 1, 0, 0)] :param start_date: anchor date to start the series from :type start_date: datetime.datetime :param end_date: right boundary for the date range :type end_date: datetime.datetime :param num: alternatively to end_date, you can specify the number of number of entries you want in the range. This number can be negative, output will always be sorted regardless :type num: int """ if not delta: return [] if end_date and start_date > end_date: raise Exception("Wait. start_date needs to be before end_date") if end_date and num: raise Exception("Wait. Either specify end_date OR num") if not end_date and not num: end_date = timezone.utcnow() delta_iscron = False tz = start_date.tzinfo if isinstance(delta, six.string_types): delta_iscron = True start_date = timezone.make_naive(start_date, tz) cron = croniter(delta, start_date) elif isinstance(delta, timedelta): delta = abs(delta) dates = [] if end_date: if timezone.is_naive(start_date): end_date = timezone.make_naive(end_date, tz) while start_date <= end_date: if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: start_date = cron.get_next(datetime) else: start_date += delta else: for _ in range(abs(num)): if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: if num > 0: start_date = cron.get_next(datetime) else: start_date = cron.get_prev(datetime) else: if num > 0: start_date += delta else: start_date -= delta return sorted(dates)
airflow/utils/dates.py
apache/airflow
scale_time_units
def scale_time_units(time_seconds_arr, unit): if unit == 'minutes': return list(map(lambda x: x * 1.0 / 60, time_seconds_arr)) elif unit == 'hours': return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr)) elif unit == 'days': return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr)) return time_seconds_arr
Convert an array of time durations in seconds to the specified time unit.
def scale_time_units(time_seconds_arr, unit): """ Convert an array of time durations in seconds to the specified time unit. """ if unit == 'minutes': return list(map(lambda x: x * 1.0 / 60, time_seconds_arr)) elif unit == 'hours': return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr)) elif unit == 'days': return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr)) return time_seconds_arr
airflow/utils/dates.py
apache/airflow
days_ago
def days_ago(n, hour=0, minute=0, second=0, microsecond=0): today = timezone.utcnow().replace( hour=hour, minute=minute, second=second, microsecond=microsecond) return today - timedelta(days=n)
Get a datetime object representing `n` days ago. By default the time is set to midnight.
def days_ago(n, hour=0, minute=0, second=0, microsecond=0): """ Get a datetime object representing `n` days ago. By default the time is set to midnight. """ today = timezone.utcnow().replace( hour=hour, minute=minute, second=second, microsecond=microsecond) return today - timedelta(days=n)
airflow/utils/dates.py
apache/airflow
AirflowSecurityManager.init_role
def init_role(self, role_name, role_vms, role_perms): pvms = self.get_session.query(sqla_models.PermissionView).all() pvms = [p for p in pvms if p.permission and p.view_menu] role = self.find_role(role_name) if not role: role = self.add_role(role_name) if len(role.permissions) == 0: self.log.info('Initializing permissions for role:%s in the database.', role_name) role_pvms = set() for pvm in pvms: if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms: role_pvms.add(pvm) role.permissions = list(role_pvms) self.get_session.merge(role) self.get_session.commit() else: self.log.debug('Existing permissions for the role:%s ' 'within the database will persist.', role_name)
Initialize the role with the permissions and related view-menus.
def init_role(self, role_name, role_vms, role_perms): """ Initialize the role with the permissions and related view-menus. :param role_name: :param role_vms: :param role_perms: :return: """ pvms = self.get_session.query(sqla_models.PermissionView).all() pvms = [p for p in pvms if p.permission and p.view_menu] role = self.find_role(role_name) if not role: role = self.add_role(role_name) if len(role.permissions) == 0: self.log.info('Initializing permissions for role:%s in the database.', role_name) role_pvms = set() for pvm in pvms: if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms: role_pvms.add(pvm) role.permissions = list(role_pvms) self.get_session.merge(role) self.get_session.commit() else: self.log.debug('Existing permissions for the role:%s ' 'within the database will persist.', role_name)
airflow/www/security.py
apache/airflow
AirflowSecurityManager.delete_role
def delete_role(self, role_name): session = self.get_session role = session.query(sqla_models.Role)\ .filter(sqla_models.Role.name == role_name)\ .first() if role: self.log.info("Deleting role '%s'", role_name) session.delete(role) session.commit() else: raise AirflowException("Role named '{}' does not exist".format( role_name))
Delete the given Role
def delete_role(self, role_name): """Delete the given Role :param role_name: the name of a role in the ab_role table """ session = self.get_session role = session.query(sqla_models.Role)\ .filter(sqla_models.Role.name == role_name)\ .first() if role: self.log.info("Deleting role '%s'", role_name) session.delete(role) session.commit() else: raise AirflowException("Role named '{}' does not exist".format( role_name))
airflow/www/security.py
apache/airflow
AirflowSecurityManager.get_user_roles
def get_user_roles(self, user=None): if user is None: user = g.user if user.is_anonymous: public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC') return [appbuilder.security_manager.find_role(public_role)] \ if public_role else [] return user.roles
Get all the roles associated with the user.
def get_user_roles(self, user=None): """ Get all the roles associated with the user. :param user: the ab_user in FAB model. :return: a list of roles associated with the user. """ if user is None: user = g.user if user.is_anonymous: public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC') return [appbuilder.security_manager.find_role(public_role)] \ if public_role else [] return user.roles
airflow/www/security.py
apache/airflow
AirflowSecurityManager._has_role
def _has_role(self, role_name_or_list): if not isinstance(role_name_or_list, list): role_name_or_list = [role_name_or_list] return any( [r.name in role_name_or_list for r in self.get_user_roles()])
Whether the user has this role name
def _has_role(self, role_name_or_list): """ Whether the user has this role name """ if not isinstance(role_name_or_list, list): role_name_or_list = [role_name_or_list] return any( [r.name in role_name_or_list for r in self.get_user_roles()])
airflow/www/security.py
apache/airflow
AirflowSecurityManager._has_perm
def _has_perm(self, permission_name, view_menu_name): if hasattr(self, 'perms'): if (permission_name, view_menu_name) in self.perms: return True self._get_and_cache_perms() return (permission_name, view_menu_name) in self.perms
Whether the user has this perm
def _has_perm(self, permission_name, view_menu_name): """ Whether the user has this perm """ if hasattr(self, 'perms'): if (permission_name, view_menu_name) in self.perms: return True # rebuild the permissions set self._get_and_cache_perms() return (permission_name, view_menu_name) in self.perms
airflow/www/security.py
apache/airflow
AirflowSecurityManager.clean_perms
def clean_perms(self): self.log.debug('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(sqla_models.PermissionView) .filter(or_( sqla_models.PermissionView.permission == None, sqla_models.PermissionView.view_menu == None, )) ) deleted_count = pvms.delete() sesh.commit() if deleted_count: self.log.info('Deleted %s faulty permissions', deleted_count)
FAB leaves faulty permissions that need to be cleaned up
def clean_perms(self): """ FAB leaves faulty permissions that need to be cleaned up """ self.log.debug('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(sqla_models.PermissionView) .filter(or_( sqla_models.PermissionView.permission == None, # NOQA sqla_models.PermissionView.view_menu == None, # NOQA )) ) deleted_count = pvms.delete() sesh.commit() if deleted_count: self.log.info('Deleted %s faulty permissions', deleted_count)
airflow/www/security.py
apache/airflow
AirflowSecurityManager._merge_perm
def _merge_perm(self, permission_name, view_menu_name): permission = self.find_permission(permission_name) view_menu = self.find_view_menu(view_menu_name) pv = None if permission and view_menu: pv = self.get_session.query(self.permissionview_model).filter_by( permission=permission, view_menu=view_menu).first() if not pv and permission_name and view_menu_name: self.add_permission_view_menu(permission_name, view_menu_name)
Add the new permission , view_menu to ab_permission_view_role if not exists. It will add the related entry to ab_permission and ab_view_menu two meta tables as well.
def _merge_perm(self, permission_name, view_menu_name): """ Add the new permission , view_menu to ab_permission_view_role if not exists. It will add the related entry to ab_permission and ab_view_menu two meta tables as well. :param permission_name: Name of the permission. :type permission_name: str :param view_menu_name: Name of the view-menu :type view_menu_name: str :return: """ permission = self.find_permission(permission_name) view_menu = self.find_view_menu(view_menu_name) pv = None if permission and view_menu: pv = self.get_session.query(self.permissionview_model).filter_by( permission=permission, view_menu=view_menu).first() if not pv and permission_name and view_menu_name: self.add_permission_view_menu(permission_name, view_menu_name)
airflow/www/security.py
apache/airflow
AirflowSecurityManager.update_admin_perm_view
def update_admin_perm_view(self): pvms = self.get_session.query(sqla_models.PermissionView).all() pvms = [p for p in pvms if p.permission and p.view_menu] admin = self.find_role('Admin') admin.permissions = list(set(admin.permissions) | set(pvms)) self.get_session.commit()
Admin should have all the permission-views. Add the missing ones to the table for admin.
def update_admin_perm_view(self): """ Admin should have all the permission-views. Add the missing ones to the table for admin. :return: None. """ pvms = self.get_session.query(sqla_models.PermissionView).all() pvms = [p for p in pvms if p.permission and p.view_menu] admin = self.find_role('Admin') admin.permissions = list(set(admin.permissions) | set(pvms)) self.get_session.commit()
airflow/www/security.py
apache/airflow
AirflowSecurityManager._sync_dag_view_permissions
def _sync_dag_view_permissions(self, dag_id, access_control): def _get_or_create_dag_permission(perm_name): dag_perm = self.find_permission_view_menu(perm_name, dag_id) if not dag_perm: self.log.info( "Creating new permission '%s' on view '%s'", perm_name, dag_id ) dag_perm = self.add_permission_view_menu(perm_name, dag_id) return dag_perm def _revoke_stale_permissions(dag_view): existing_dag_perms = self.find_permissions_view_menu(dag_view) for perm in existing_dag_perms: non_admin_roles = [role for role in perm.role if role.name != 'Admin'] for role in non_admin_roles: target_perms_for_role = access_control.get(role.name, {}) if perm.permission.name not in target_perms_for_role: self.log.info( "Revoking '%s' on DAG '%s' for role '%s'", perm.permission, dag_id, role.name ) self.del_permission_role(role, perm) dag_view = self.find_view_menu(dag_id) if dag_view: _revoke_stale_permissions(dag_view) for rolename, perms in access_control.items(): role = self.find_role(rolename) if not role: raise AirflowException( "The access_control mapping for DAG '{}' includes a role " "named '{}', but that role does not exist".format( dag_id, rolename)) perms = set(perms) invalid_perms = perms - self.DAG_PERMS if invalid_perms: raise AirflowException( "The access_control map for DAG '{}' includes the following " "invalid permissions: {}; The set of valid permissions " "is: {}".format(dag_id, (perms - self.DAG_PERMS), self.DAG_PERMS)) for perm_name in perms: dag_perm = _get_or_create_dag_permission(perm_name) self.add_permission_role(role, dag_perm)
Set the access policy on the given DAG's ViewModel.
def _sync_dag_view_permissions(self, dag_id, access_control): """Set the access policy on the given DAG's ViewModel. :param dag_id: the ID of the DAG whose permissions should be updated :type dag_id: string :param access_control: a dict where each key is a rolename and each value is a set() of permission names (e.g., {'can_dag_read'} :type access_control: dict """ def _get_or_create_dag_permission(perm_name): dag_perm = self.find_permission_view_menu(perm_name, dag_id) if not dag_perm: self.log.info( "Creating new permission '%s' on view '%s'", perm_name, dag_id ) dag_perm = self.add_permission_view_menu(perm_name, dag_id) return dag_perm def _revoke_stale_permissions(dag_view): existing_dag_perms = self.find_permissions_view_menu(dag_view) for perm in existing_dag_perms: non_admin_roles = [role for role in perm.role if role.name != 'Admin'] for role in non_admin_roles: target_perms_for_role = access_control.get(role.name, {}) if perm.permission.name not in target_perms_for_role: self.log.info( "Revoking '%s' on DAG '%s' for role '%s'", perm.permission, dag_id, role.name ) self.del_permission_role(role, perm) dag_view = self.find_view_menu(dag_id) if dag_view: _revoke_stale_permissions(dag_view) for rolename, perms in access_control.items(): role = self.find_role(rolename) if not role: raise AirflowException( "The access_control mapping for DAG '{}' includes a role " "named '{}', but that role does not exist".format( dag_id, rolename)) perms = set(perms) invalid_perms = perms - self.DAG_PERMS if invalid_perms: raise AirflowException( "The access_control map for DAG '{}' includes the following " "invalid permissions: {}; The set of valid permissions " "is: {}".format(dag_id, (perms - self.DAG_PERMS), self.DAG_PERMS)) for perm_name in perms: dag_perm = _get_or_create_dag_permission(perm_name) self.add_permission_role(role, dag_perm)
airflow/www/security.py
apache/airflow
AirflowSecurityManager.create_perm_vm_for_all_dag
def create_perm_vm_for_all_dag(self): for dag_vm in self.DAG_VMS: for perm in self.DAG_PERMS: self._merge_perm(permission_name=perm, view_menu_name=dag_vm)
Create perm-vm if not exist and insert into FAB security model for all-dags.
def create_perm_vm_for_all_dag(self): """ Create perm-vm if not exist and insert into FAB security model for all-dags. """ # create perm for global logical dag for dag_vm in self.DAG_VMS: for perm in self.DAG_PERMS: self._merge_perm(permission_name=perm, view_menu_name=dag_vm)
airflow/www/security.py
apache/airflow
get_fernet
def get_fernet(): global _fernet log = LoggingMixin().log if _fernet: return _fernet try: from cryptography.fernet import Fernet, MultiFernet, InvalidToken global InvalidFernetToken InvalidFernetToken = InvalidToken except BuiltinImportError: log.warning( "cryptography not found - values will not be stored encrypted." ) _fernet = NullFernet() return _fernet try: fernet_key = configuration.conf.get('core', 'FERNET_KEY') if not fernet_key: log.warning( "empty cryptography key - values will not be stored encrypted." ) _fernet = NullFernet() else: _fernet = MultiFernet([ Fernet(fernet_part.encode('utf-8')) for fernet_part in fernet_key.split(',') ]) _fernet.is_encrypted = True except (ValueError, TypeError) as ve: raise AirflowException("Could not create Fernet object: {}".format(ve)) return _fernet
Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid.
def get_fernet(): """ Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet """ global _fernet log = LoggingMixin().log if _fernet: return _fernet try: from cryptography.fernet import Fernet, MultiFernet, InvalidToken global InvalidFernetToken InvalidFernetToken = InvalidToken except BuiltinImportError: log.warning( "cryptography not found - values will not be stored encrypted." ) _fernet = NullFernet() return _fernet try: fernet_key = configuration.conf.get('core', 'FERNET_KEY') if not fernet_key: log.warning( "empty cryptography key - values will not be stored encrypted." ) _fernet = NullFernet() else: _fernet = MultiFernet([ Fernet(fernet_part.encode('utf-8')) for fernet_part in fernet_key.split(',') ]) _fernet.is_encrypted = True except (ValueError, TypeError) as ve: raise AirflowException("Could not create Fernet object: {}".format(ve)) return _fernet
airflow/models/crypto.py
apache/airflow
AwsGlueCatalogPartitionSensor.poke
def poke(self, context): if '.' in self.table_name: self.database_name, self.table_name = self.table_name.split('.') self.log.info( 'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression ) return self.get_hook().check_for_partition( self.database_name, self.table_name, self.expression)
Checks for existence of the partition in the AWS Glue Catalog table
def poke(self, context): """ Checks for existence of the partition in the AWS Glue Catalog table """ if '.' in self.table_name: self.database_name, self.table_name = self.table_name.split('.') self.log.info( 'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression ) return self.get_hook().check_for_partition( self.database_name, self.table_name, self.expression)
airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py
apache/airflow
AwsGlueCatalogPartitionSensor.get_hook
def get_hook(self): if not hasattr(self, 'hook'): from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook self.hook = AwsGlueCatalogHook( aws_conn_id=self.aws_conn_id, region_name=self.region_name) return self.hook
Gets the AwsGlueCatalogHook
def get_hook(self): """ Gets the AwsGlueCatalogHook """ if not hasattr(self, 'hook'): from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook self.hook = AwsGlueCatalogHook( aws_conn_id=self.aws_conn_id, region_name=self.region_name) return self.hook
airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py
apache/airflow
SQSSensor.poke
def poke(self, context): sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id) sqs_conn = sqs_hook.get_conn() self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue) messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue, MaxNumberOfMessages=self.max_messages, WaitTimeSeconds=self.wait_time_seconds) self.log.info("reveived message %s", str(messages)) if 'Messages' in messages and len(messages['Messages']) > 0: entries = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']} for message in messages['Messages']] result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries) if 'Successful' in result: context['ti'].xcom_push(key='messages', value=messages) return True else: raise AirflowException( 'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages)) return False
Check for message on subscribed queue and write to xcom the message with key ``messages``
def poke(self, context): """ Check for message on subscribed queue and write to xcom the message with key ``messages`` :param context: the context object :type context: dict :return: ``True`` if message is available or ``False`` """ sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id) sqs_conn = sqs_hook.get_conn() self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue) messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue, MaxNumberOfMessages=self.max_messages, WaitTimeSeconds=self.wait_time_seconds) self.log.info("reveived message %s", str(messages)) if 'Messages' in messages and len(messages['Messages']) > 0: entries = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']} for message in messages['Messages']] result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries) if 'Successful' in result: context['ti'].xcom_push(key='messages', value=messages) return True else: raise AirflowException( 'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages)) return False
airflow/contrib/sensors/aws_sqs_sensor.py
apache/airflow
WebHDFSHook.get_conn
def get_conn(self): connections = self.get_connections(self.webhdfs_conn_id) for connection in connections: try: self.log.debug('Trying namenode %s', connection.host) client = self._get_client(connection) client.status('/') self.log.debug('Using namenode %s for hook', connection.host) return client except HdfsError as hdfs_error: self.log.debug('Read operation on namenode %s failed with error: %s', connection.host, hdfs_error) hosts = [connection.host for connection in connections] error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format( hosts='\n'.join(hosts)) raise AirflowWebHDFSHookException(error_message)
Establishes a connection depending on the security mode set via config or environment variable.
def get_conn(self): """ Establishes a connection depending on the security mode set via config or environment variable. :return: a hdfscli InsecureClient or KerberosClient object. :rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient """ connections = self.get_connections(self.webhdfs_conn_id) for connection in connections: try: self.log.debug('Trying namenode %s', connection.host) client = self._get_client(connection) client.status('/') self.log.debug('Using namenode %s for hook', connection.host) return client except HdfsError as hdfs_error: self.log.debug('Read operation on namenode %s failed with error: %s', connection.host, hdfs_error) hosts = [connection.host for connection in connections] error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format( hosts='\n'.join(hosts)) raise AirflowWebHDFSHookException(error_message)
airflow/hooks/webhdfs_hook.py
apache/airflow
WebHDFSHook.check_for_path
def check_for_path(self, hdfs_path): conn = self.get_conn() status = conn.status(hdfs_path, strict=False) return bool(status)
Check for the existence of a path in HDFS by querying FileStatus.
def check_for_path(self, hdfs_path): """ Check for the existence of a path in HDFS by querying FileStatus. :param hdfs_path: The path to check. :type hdfs_path: str :return: True if the path exists and False if not. :rtype: bool """ conn = self.get_conn() status = conn.status(hdfs_path, strict=False) return bool(status)
airflow/hooks/webhdfs_hook.py
apache/airflow
WebHDFSHook.load_file
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs): rconn = self.get_conn() conn.upload(hdfs_path=destination, local_path=source, overwrite=overwrite, n_threads=parallelism, **kwargs) self.log.debug("Uploaded file %s to %s", source, destination)
r""" Uploads a file to HDFS.
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs): r""" Uploads a file to HDFS. :param source: Local path to file or folder. If it's a folder, all the files inside of it will be uploaded. .. note:: This implies that folders empty of files will not be created remotely. :type source: str :param destination: PTarget HDFS path. If it already exists and is a directory, files will be uploaded inside. :type destination: str :param overwrite: Overwrite any existing file or directory. :type overwrite: bool :param parallelism: Number of threads to use for parallelization. A value of `0` (or negative) uses as many threads as there are files. :type parallelism: int :param \**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`. """ conn = self.get_conn() conn.upload(hdfs_path=destination, local_path=source, overwrite=overwrite, n_threads=parallelism, **kwargs) self.log.debug("Uploaded file %s to %s", source, destination)
airflow/hooks/webhdfs_hook.py
apache/airflow
PinotDbApiHook.get_conn
def get_conn(self): conn = self.get_connection(self.pinot_broker_conn_id) pinot_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/pql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to pinot ' 'broker on {host}'.format(host=conn.host)) return pinot_broker_conn
Establish a connection to pinot broker through pinot dbqpi.
def get_conn(self): """ Establish a connection to pinot broker through pinot dbqpi. """ conn = self.get_connection(self.pinot_broker_conn_id) pinot_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/pql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to pinot ' 'broker on {host}'.format(host=conn.host)) return pinot_broker_conn
airflow/contrib/hooks/pinot_hook.py
apache/airflow
PinotDbApiHook.get_uri
def get_uri(self): conn = self.get_connection(getattr(self, self.conn_name_attr)) host = conn.host if conn.port is not None: host += ':{port}'.format(port=conn.port) conn_type = 'http' if not conn.conn_type else conn.conn_type endpoint = conn.extra_dejson.get('endpoint', 'pql') return '{conn_type}://{host}/{endpoint}'.format( conn_type=conn_type, host=host, endpoint=endpoint)
Get the connection uri for pinot broker. e.g:
def get_uri(self): """ Get the connection uri for pinot broker. e.g: http://localhost:9000/pql """ conn = self.get_connection(getattr(self, self.conn_name_attr)) host = conn.host if conn.port is not None: host += ':{port}'.format(port=conn.port) conn_type = 'http' if not conn.conn_type else conn.conn_type endpoint = conn.extra_dejson.get('endpoint', 'pql') return '{conn_type}://{host}/{endpoint}'.format( conn_type=conn_type, host=host, endpoint=endpoint)
airflow/contrib/hooks/pinot_hook.py
apache/airflow
TransferJobPreprocessor._convert_date_to_dict
def _convert_date_to_dict(field_date): return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
Convert native python ``datetime.date`` object to a format supported by the API
def _convert_date_to_dict(field_date): """ Convert native python ``datetime.date`` object to a format supported by the API """ return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
airflow/contrib/operators/gcp_transfer_operator.py
apache/airflow
TransferJobPreprocessor._convert_time_to_dict
def _convert_time_to_dict(time): return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second}
Convert native python ``datetime.time`` object to a format supported by the API
def _convert_time_to_dict(time): """ Convert native python ``datetime.time`` object to a format supported by the API """ return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second}
airflow/contrib/operators/gcp_transfer_operator.py
apache/airflow
DbApiHook.get_pandas_df
def get_pandas_df(self, sql, parameters=None): import pandas.io.sql as psql with closing(self.get_conn()) as conn: return psql.read_sql(sql, con=conn, params=parameters)
Executes the sql and returns a pandas dataframe
def get_pandas_df(self, sql, parameters=None): """ Executes the sql and returns a pandas dataframe :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ import pandas.io.sql as psql with closing(self.get_conn()) as conn: return psql.read_sql(sql, con=conn, params=parameters)
airflow/hooks/dbapi_hook.py
apache/airflow
DbApiHook.run
def run(self, sql, autocommit=False, parameters=None): if isinstance(sql, basestring): sql = [sql] with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, autocommit) with closing(conn.cursor()) as cur: for s in sql: if parameters is not None: self.log.info("{} with parameters {}".format(s, parameters)) cur.execute(s, parameters) else: self.log.info(s) cur.execute(s) if not self.get_autocommit(conn): conn.commit()
Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially
def run(self, sql, autocommit=False, parameters=None): """ Runs a command or a list of commands. Pass a list of sql statements to the sql parameter to get them to execute sequentially :param sql: the sql statement to be executed (str) or a list of sql statements to execute :type sql: str or list :param autocommit: What to set the connection's autocommit setting to before executing the query. :type autocommit: bool :param parameters: The parameters to render the SQL query with. :type parameters: mapping or iterable """ if isinstance(sql, basestring): sql = [sql] with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, autocommit) with closing(conn.cursor()) as cur: for s in sql: if parameters is not None: self.log.info("{} with parameters {}".format(s, parameters)) cur.execute(s, parameters) else: self.log.info(s) cur.execute(s) # If autocommit was set to False for db that supports autocommit, # or if db does not supports autocommit, we do a manual commit. if not self.get_autocommit(conn): conn.commit()
airflow/hooks/dbapi_hook.py
apache/airflow
DbApiHook.set_autocommit
def set_autocommit(self, conn, autocommit): if not self.supports_autocommit and autocommit: self.log.warn( ("%s connection doesn't support " "autocommit but autocommit activated."), getattr(self, self.conn_name_attr)) conn.autocommit = autocommit
Sets the autocommit flag on the connection
def set_autocommit(self, conn, autocommit): """ Sets the autocommit flag on the connection """ if not self.supports_autocommit and autocommit: self.log.warn( ("%s connection doesn't support " "autocommit but autocommit activated."), getattr(self, self.conn_name_attr)) conn.autocommit = autocommit
airflow/hooks/dbapi_hook.py
apache/airflow
DbApiHook.insert_rows
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False): if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' i = 0 with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, False) conn.commit() with closing(conn.cursor()) as cur: for i, row in enumerate(rows, 1): lst = [] for cell in row: lst.append(self._serialize_cell(cell, conn)) values = tuple(lst) placeholders = ["%s", ] * len(values) if not replace: sql = "INSERT INTO " else: sql = "REPLACE INTO " sql += "{0} {1} VALUES ({2})".format( table, target_fields, ",".join(placeholders)) cur.execute(sql, values) if commit_every and i % commit_every == 0: conn.commit() self.log.info( "Loaded %s into %s rows so far", i, table ) conn.commit() self.log.info("Done loading. Loaded a total of %s rows", i)
A generic way to insert a set of tuples into a table, a new transaction is created every commit_every rows
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False): """ A generic way to insert a set of tuples into a table, a new transaction is created every commit_every rows :param table: Name of the target table :type table: str :param rows: The rows to insert into the table :type rows: iterable of tuples :param target_fields: The names of the columns to fill in the table :type target_fields: iterable of strings :param commit_every: The maximum number of rows to insert in one transaction. Set to 0 to insert all rows in one transaction. :type commit_every: int :param replace: Whether to replace instead of insert :type replace: bool """ if target_fields: target_fields = ", ".join(target_fields) target_fields = "({})".format(target_fields) else: target_fields = '' i = 0 with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, False) conn.commit() with closing(conn.cursor()) as cur: for i, row in enumerate(rows, 1): lst = [] for cell in row: lst.append(self._serialize_cell(cell, conn)) values = tuple(lst) placeholders = ["%s", ] * len(values) if not replace: sql = "INSERT INTO " else: sql = "REPLACE INTO " sql += "{0} {1} VALUES ({2})".format( table, target_fields, ",".join(placeholders)) cur.execute(sql, values) if commit_every and i % commit_every == 0: conn.commit() self.log.info( "Loaded %s into %s rows so far", i, table ) conn.commit() self.log.info("Done loading. Loaded a total of %s rows", i)
airflow/hooks/dbapi_hook.py
apache/airflow
Airflow.health
def health(self, session=None): BJ = jobs.BaseJob payload = {} scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler', 'scheduler_health_check_threshold' )) latest_scheduler_heartbeat = None payload['metadatabase'] = {'status': 'healthy'} try: latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\ filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\ scalar() except Exception: payload['metadatabase']['status'] = 'unhealthy' if not latest_scheduler_heartbeat: scheduler_status = 'unhealthy' else: if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold: scheduler_status = 'healthy' else: scheduler_status = 'unhealthy' payload['scheduler'] = {'status': scheduler_status, 'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)} return wwwutils.json_response(payload)
An endpoint helping check the health status of the Airflow instance, including metadatabase and scheduler.
def health(self, session=None): """ An endpoint helping check the health status of the Airflow instance, including metadatabase and scheduler. """ BJ = jobs.BaseJob payload = {} scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler', 'scheduler_health_check_threshold' )) latest_scheduler_heartbeat = None payload['metadatabase'] = {'status': 'healthy'} try: latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\ filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\ scalar() except Exception: payload['metadatabase']['status'] = 'unhealthy' if not latest_scheduler_heartbeat: scheduler_status = 'unhealthy' else: if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold: scheduler_status = 'healthy' else: scheduler_status = 'unhealthy' payload['scheduler'] = {'status': scheduler_status, 'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)} return wwwutils.json_response(payload)
airflow/www/views.py
apache/airflow
Airflow.extra_links
def extra_links(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') link_name = request.args.get('link_name') dttm = airflow.utils.timezone.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: response = jsonify( {'url': None, 'error': "can't find dag {dag} or task_id {task_id}".format( dag=dag, task_id=task_id )} ) response.status_code = 404 return response task = dag.get_task(task_id) try: url = task.get_extra_links(dttm, link_name) except ValueError as err: response = jsonify({'url': None, 'error': str(err)}) response.status_code = 404 return response if url: response = jsonify({'error': None, 'url': url}) response.status_code = 200 return response else: response = jsonify( {'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)}) response.status_code = 404 return response
A restful endpoint that returns external links for a given Operator It queries the operator that sent the request for the links it wishes to provide for a given external link name.
def extra_links(self): """ A restful endpoint that returns external links for a given Operator It queries the operator that sent the request for the links it wishes to provide for a given external link name. API: GET Args: dag_id: The id of the dag containing the task in question task_id: The id of the task in question execution_date: The date of execution of the task link_name: The name of the link reference to find the actual URL for Returns: 200: {url: <url of link>, error: None} - returned when there was no problem finding the URL 404: {url: None, error: <error message>} - returned when the operator does not return a URL """ dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') link_name = request.args.get('link_name') dttm = airflow.utils.timezone.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: response = jsonify( {'url': None, 'error': "can't find dag {dag} or task_id {task_id}".format( dag=dag, task_id=task_id )} ) response.status_code = 404 return response task = dag.get_task(task_id) try: url = task.get_extra_links(dttm, link_name) except ValueError as err: response = jsonify({'url': None, 'error': str(err)}) response.status_code = 404 return response if url: response = jsonify({'error': None, 'url': url}) response.status_code = 200 return response else: response = jsonify( {'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)}) response.status_code = 404 return response
airflow/www/views.py
apache/airflow
CloudantHook.get_conn
def get_conn(self): conn = self.get_connection(self.cloudant_conn_id) self._validate_connection(conn) cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host) return cloudant_session
Opens a connection to the cloudant service and closes it automatically if used as context manager.
def get_conn(self): """ Opens a connection to the cloudant service and closes it automatically if used as context manager. .. note:: In the connection form: - 'host' equals the 'Account' (optional) - 'login' equals the 'Username (or API Key)' (required) - 'password' equals the 'Password' (required) :return: an authorized cloudant session context manager object. :rtype: cloudant """ conn = self.get_connection(self.cloudant_conn_id) self._validate_connection(conn) cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host) return cloudant_session
airflow/contrib/hooks/cloudant_hook.py
apache/airflow
SlackWebhookOperator.execute
def execute(self, context): self.hook = SlackWebhookHook( self.http_conn_id, self.webhook_token, self.message, self.attachments, self.channel, self.username, self.icon_emoji, self.link_names, self.proxy ) self.hook.execute()
Call the SlackWebhookHook to post the provided Slack message
def execute(self, context): """ Call the SlackWebhookHook to post the provided Slack message """ self.hook = SlackWebhookHook( self.http_conn_id, self.webhook_token, self.message, self.attachments, self.channel, self.username, self.icon_emoji, self.link_names, self.proxy ) self.hook.execute()
airflow/contrib/operators/slack_webhook_operator.py
apache/airflow
GoogleCloudBaseHook.catch_http_exception
def catch_http_exception(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): try: return func(self, *args, **kwargs) except GoogleAPICallError as e: if isinstance(e, AlreadyExists): raise e else: self.log.error('The request failed:\n%s', str(e)) raise AirflowException(e) except RetryError as e: self.log.error('The request failed due to a retryable error and retry attempts failed.') raise AirflowException(e) except ValueError as e: self.log.error('The request failed, the parameters are invalid.') raise AirflowException(e) except HttpError as e: self.log.error('The request failed:\n%s', str(e)) raise AirflowException(e) return wrapper_decorator
Function decorator that intercepts HTTP Errors and raises AirflowException with more informative message.
def catch_http_exception(func): """ Function decorator that intercepts HTTP Errors and raises AirflowException with more informative message. """ @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): try: return func(self, *args, **kwargs) except GoogleAPICallError as e: if isinstance(e, AlreadyExists): raise e else: self.log.error('The request failed:\n%s', str(e)) raise AirflowException(e) except RetryError as e: self.log.error('The request failed due to a retryable error and retry attempts failed.') raise AirflowException(e) except ValueError as e: self.log.error('The request failed, the parameters are invalid.') raise AirflowException(e) except HttpError as e: self.log.error('The request failed:\n%s', str(e)) raise AirflowException(e) return wrapper_decorator
airflow/contrib/hooks/gcp_api_base_hook.py
apache/airflow
State.unfinished
def unfinished(cls): return [ cls.NONE, cls.SCHEDULED, cls.QUEUED, cls.RUNNING, cls.SHUTDOWN, cls.UP_FOR_RETRY, cls.UP_FOR_RESCHEDULE ]
A list of states indicating that a task either has not completed a run or has not even started.
def unfinished(cls): """ A list of states indicating that a task either has not completed a run or has not even started. """ return [ cls.NONE, cls.SCHEDULED, cls.QUEUED, cls.RUNNING, cls.SHUTDOWN, cls.UP_FOR_RETRY, cls.UP_FOR_RESCHEDULE ]
airflow/utils/state.py
apache/airflow
SparkSqlHook._prepare_command
def _prepare_command(self, cmd): connection_cmd = ["spark-sql"] if self._conf: for conf_el in self._conf.split(","): connection_cmd += ["--conf", conf_el] if self._total_executor_cores: connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)] if self._executor_cores: connection_cmd += ["--executor-cores", str(self._executor_cores)] if self._executor_memory: connection_cmd += ["--executor-memory", self._executor_memory] if self._keytab: connection_cmd += ["--keytab", self._keytab] if self._principal: connection_cmd += ["--principal", self._principal] if self._num_executors: connection_cmd += ["--num-executors", str(self._num_executors)] if self._sql: sql = self._sql.strip() if sql.endswith(".sql") or sql.endswith(".hql"): connection_cmd += ["-f", sql] else: connection_cmd += ["-e", sql] if self._master: connection_cmd += ["--master", self._master] if self._name: connection_cmd += ["--name", self._name] if self._verbose: connection_cmd += ["--verbose"] if self._yarn_queue: connection_cmd += ["--queue", self._yarn_queue] connection_cmd += cmd self.log.debug("Spark-Sql cmd: %s", connection_cmd) return connection_cmd
Construct the spark-sql command to execute. Verbose output is enabled as default.
def _prepare_command(self, cmd): """ Construct the spark-sql command to execute. Verbose output is enabled as default. :param cmd: command to append to the spark-sql command :type cmd: str :return: full command to be executed """ connection_cmd = ["spark-sql"] if self._conf: for conf_el in self._conf.split(","): connection_cmd += ["--conf", conf_el] if self._total_executor_cores: connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)] if self._executor_cores: connection_cmd += ["--executor-cores", str(self._executor_cores)] if self._executor_memory: connection_cmd += ["--executor-memory", self._executor_memory] if self._keytab: connection_cmd += ["--keytab", self._keytab] if self._principal: connection_cmd += ["--principal", self._principal] if self._num_executors: connection_cmd += ["--num-executors", str(self._num_executors)] if self._sql: sql = self._sql.strip() if sql.endswith(".sql") or sql.endswith(".hql"): connection_cmd += ["-f", sql] else: connection_cmd += ["-e", sql] if self._master: connection_cmd += ["--master", self._master] if self._name: connection_cmd += ["--name", self._name] if self._verbose: connection_cmd += ["--verbose"] if self._yarn_queue: connection_cmd += ["--queue", self._yarn_queue] connection_cmd += cmd self.log.debug("Spark-Sql cmd: %s", connection_cmd) return connection_cmd
airflow/contrib/hooks/spark_sql_hook.py
Azure/azure-sdk-for-python
Message.schedule
def schedule(self, schedule_time): if not self.properties.message_id: self.properties.message_id = str(uuid.uuid4()) if not self.message.annotations: self.message.annotations = {} self.message.annotations[types.AMQPSymbol(self._x_OPT_SCHEDULED_ENQUEUE_TIME)] = schedule_time
Add a specific enqueue time to the message.
def schedule(self, schedule_time): """Add a specific enqueue time to the message. :param schedule_time: The scheduled time to enqueue the message. :type schedule_time: ~datetime.datetime """ if not self.properties.message_id: self.properties.message_id = str(uuid.uuid4()) if not self.message.annotations: self.message.annotations = {} self.message.annotations[types.AMQPSymbol(self._x_OPT_SCHEDULED_ENQUEUE_TIME)] = schedule_time
azure-servicebus/azure/servicebus/common/message.py
Azure/azure-sdk-for-python
VpnSitesConfigurationOperations.download
def download( self, resource_group_name, virtual_wan_name, vpn_sites=None, output_blob_sas_url=None, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._download_initial( resource_group_name=resource_group_name, virtual_wan_name=virtual_wan_name, vpn_sites=vpn_sites, output_blob_sas_url=output_blob_sas_url, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Gives the sas-url to download the configurations for vpn-sites in a resource group.
def download( self, resource_group_name, virtual_wan_name, vpn_sites=None, output_blob_sas_url=None, custom_headers=None, raw=False, polling=True, **operation_config): """Gives the sas-url to download the configurations for vpn-sites in a resource group. :param resource_group_name: The resource group name. :type resource_group_name: str :param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is needed. :type virtual_wan_name: str :param vpn_sites: List of resource-ids of the vpn-sites for which config is to be downloaded. :type vpn_sites: list[~azure.mgmt.network.v2018_04_01.models.SubResource] :param output_blob_sas_url: The sas-url to download the configurations for vpn-sites :type output_blob_sas_url: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`ErrorException<azure.mgmt.network.v2018_04_01.models.ErrorException>` """ raw_result = self._download_initial( resource_group_name=resource_group_name, virtual_wan_name=virtual_wan_name, vpn_sites=vpn_sites, output_blob_sas_url=output_blob_sas_url, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/vpn_sites_configuration_operations.py
Azure/azure-sdk-for-python
guess_service_info_from_path
def guess_service_info_from_path(spec_path): spec_path = spec_path.lower() spec_path = spec_path[spec_path.index("specification"):] split_spec_path = spec_path.split("/") rp_name = split_spec_path[1] is_arm = split_spec_path[2] == "resource-manager" return { "rp_name": rp_name, "is_arm": is_arm }
Guess Python Autorest options based on the spec path. Expected
def guess_service_info_from_path(spec_path): """Guess Python Autorest options based on the spec path. Expected path: specification/compute/resource-manager/readme.md """ spec_path = spec_path.lower() spec_path = spec_path[spec_path.index("specification"):] # Might raise and it's ok split_spec_path = spec_path.split("/") rp_name = split_spec_path[1] is_arm = split_spec_path[2] == "resource-manager" return { "rp_name": rp_name, "is_arm": is_arm }
scripts/build_sdk.py
Azure/azure-sdk-for-python
PowerShellOperations.update_command
def update_command( self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._update_command_initial( resource_group_name=resource_group_name, node_name=node_name, session=session, pssession=pssession, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('PowerShellCommandResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Updates a running PowerShell command with more data.
def update_command( self, resource_group_name, node_name, session, pssession, custom_headers=None, raw=False, polling=True, **operation_config): """Updates a running PowerShell command with more data. :param resource_group_name: The resource group name uniquely identifies the resource group within the user subscriptionId. :type resource_group_name: str :param node_name: The node name (256 characters maximum). :type node_name: str :param session: The sessionId from the user. :type session: str :param pssession: The PowerShell sessionId from the user. :type pssession: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PowerShellCommandResults or ClientRawResponse<PowerShellCommandResults> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.PowerShellCommandResults] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.PowerShellCommandResults]] :raises: :class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>` """ raw_result = self._update_command_initial( resource_group_name=resource_group_name, node_name=node_name, session=session, pssession=pssession, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('PowerShellCommandResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-servermanager/azure/mgmt/servermanager/operations/power_shell_operations.py
Azure/azure-sdk-for-python
ApplicationDefinitionsOperations.delete_by_id
def delete_by_id( self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._delete_by_id_initial( application_definition_id=application_definition_id, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Deletes the managed application definition.
def delete_by_id( self, application_definition_id, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the managed application definition. :param application_definition_id: The fully qualified ID of the managed application definition, including the managed application name and the managed application definition resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name} :type application_definition_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>` """ raw_result = self._delete_by_id_initial( application_definition_id=application_definition_id, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-resource/azure/mgmt/resource/managedapplications/operations/application_definitions_operations.py
Azure/azure-sdk-for-python
ApplicationDefinitionsOperations.create_or_update_by_id
def create_or_update_by_id( self, application_definition_id, parameters, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._create_or_update_by_id_initial( application_definition_id=application_definition_id, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('ApplicationDefinition', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Creates a new managed application definition.
def create_or_update_by_id( self, application_definition_id, parameters, custom_headers=None, raw=False, polling=True, **operation_config): """Creates a new managed application definition. :param application_definition_id: The fully qualified ID of the managed application definition, including the managed application name and the managed application definition resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.Solutions/applicationDefinitions/{applicationDefinition-name} :type application_definition_id: str :param parameters: Parameters supplied to the create or update a managed application definition. :type parameters: ~azure.mgmt.resource.managedapplications.models.ApplicationDefinition :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns ApplicationDefinition or ClientRawResponse<ApplicationDefinition> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.resource.managedapplications.models.ApplicationDefinition] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.resource.managedapplications.models.ApplicationDefinition]] :raises: :class:`ErrorResponseException<azure.mgmt.resource.managedapplications.models.ErrorResponseException>` """ raw_result = self._create_or_update_by_id_initial( application_definition_id=application_definition_id, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('ApplicationDefinition', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-resource/azure/mgmt/resource/managedapplications/operations/application_definitions_operations.py
Azure/azure-sdk-for-python
_HTTPClient.get_uri
def get_uri(self, request): protocol = request.protocol_override \ if request.protocol_override else self.protocol protocol = protocol.lower() port = HTTP_PORT if protocol == 'http' else HTTPS_PORT return protocol + '://' + request.host + ':' + str(port) + request.path
Return the target uri for the request.
def get_uri(self, request): ''' Return the target uri for the request.''' protocol = request.protocol_override \ if request.protocol_override else self.protocol protocol = protocol.lower() port = HTTP_PORT if protocol == 'http' else HTTPS_PORT return protocol + '://' + request.host + ':' + str(port) + request.path
azure-servicebus/azure/servicebus/control_client/_http/httpclient.py
Azure/azure-sdk-for-python
_HTTPClient.get_connection
def get_connection(self, request): protocol = request.protocol_override \ if request.protocol_override else self.protocol protocol = protocol.lower() target_host = request.host connection = _RequestsConnection( target_host, protocol, self.request_session, self.timeout) proxy_host = self.proxy_host proxy_port = self.proxy_port if self.proxy_host: headers = None if self.proxy_user and self.proxy_password: auth = base64.b64encode("{0}:{1}".format(self.proxy_user, self.proxy_password).encode()) headers = {'Proxy-Authorization': 'Basic {0}'.format(auth.decode())} connection.set_tunnel(proxy_host, int(proxy_port), headers) return connection
Create connection for the request.
def get_connection(self, request): ''' Create connection for the request. ''' protocol = request.protocol_override \ if request.protocol_override else self.protocol protocol = protocol.lower() target_host = request.host # target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT connection = _RequestsConnection( target_host, protocol, self.request_session, self.timeout) proxy_host = self.proxy_host proxy_port = self.proxy_port if self.proxy_host: headers = None if self.proxy_user and self.proxy_password: auth = base64.b64encode("{0}:{1}".format(self.proxy_user, self.proxy_password).encode()) headers = {'Proxy-Authorization': 'Basic {0}'.format(auth.decode())} connection.set_tunnel(proxy_host, int(proxy_port), headers) return connection
azure-servicebus/azure/servicebus/control_client/_http/httpclient.py
Azure/azure-sdk-for-python
_HTTPClient.perform_request
def perform_request(self, request): connection = self.get_connection(request) try: connection.putrequest(request.method, request.path) self.send_request_headers(connection, request.headers) self.send_request_body(connection, request.body) if DEBUG_REQUESTS and request.body: print('request:') try: print(request.body) except: pass resp = connection.getresponse() status = int(resp.status) message = resp.reason respheaders = resp.getheaders() for i, value in enumerate(respheaders): respheaders[i] = (value[0].lower(), value[1]) respbody = None if resp.length is None: respbody = resp.read() elif resp.length > 0: respbody = resp.read(resp.length) if DEBUG_RESPONSES and respbody: print('response:') try: print(respbody) except: pass response = HTTPResponse( status, resp.reason, respheaders, respbody) if status == 307: new_url = urlparse(dict(respheaders)['location']) request.host = new_url.hostname request.path = new_url.path request.path, request.query = self._update_request_uri_query(request) return self.perform_request(request) if status >= 300: raise HTTPError(status, message, respheaders, respbody) return response finally: connection.close()
Sends request to cloud service server and return the response.
def perform_request(self, request): ''' Sends request to cloud service server and return the response. ''' connection = self.get_connection(request) try: connection.putrequest(request.method, request.path) self.send_request_headers(connection, request.headers) self.send_request_body(connection, request.body) if DEBUG_REQUESTS and request.body: print('request:') try: print(request.body) except: # pylint: disable=bare-except pass resp = connection.getresponse() status = int(resp.status) message = resp.reason respheaders = resp.getheaders() # for consistency across platforms, make header names lowercase for i, value in enumerate(respheaders): respheaders[i] = (value[0].lower(), value[1]) respbody = None if resp.length is None: respbody = resp.read() elif resp.length > 0: respbody = resp.read(resp.length) if DEBUG_RESPONSES and respbody: print('response:') try: print(respbody) except: # pylint: disable=bare-except pass response = HTTPResponse( status, resp.reason, respheaders, respbody) if status == 307: new_url = urlparse(dict(respheaders)['location']) request.host = new_url.hostname request.path = new_url.path request.path, request.query = self._update_request_uri_query(request) return self.perform_request(request) if status >= 300: raise HTTPError(status, message, respheaders, respbody) return response finally: connection.close()
azure-servicebus/azure/servicebus/control_client/_http/httpclient.py
Azure/azure-sdk-for-python
ClustersOperations.execute_script_actions
def execute_script_actions( self, resource_group_name, cluster_name, persist_on_success, script_actions=None, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._execute_script_actions_initial( resource_group_name=resource_group_name, cluster_name=cluster_name, persist_on_success=persist_on_success, script_actions=script_actions, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Executes script actions on the specified HDInsight cluster.
def execute_script_actions( self, resource_group_name, cluster_name, persist_on_success, script_actions=None, custom_headers=None, raw=False, polling=True, **operation_config): """Executes script actions on the specified HDInsight cluster. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cluster_name: The name of the cluster. :type cluster_name: str :param persist_on_success: Gets or sets if the scripts needs to be persisted. :type persist_on_success: bool :param script_actions: The list of run time script actions. :type script_actions: list[~azure.mgmt.hdinsight.models.RuntimeScriptAction] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`ErrorResponseException<azure.mgmt.hdinsight.models.ErrorResponseException>` """ raw_result = self._execute_script_actions_initial( resource_group_name=resource_group_name, cluster_name=cluster_name, persist_on_success=persist_on_success, script_actions=script_actions, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-hdinsight/azure/mgmt/hdinsight/operations/clusters_operations.py
Azure/azure-sdk-for-python
FrontDoorManagementClient.check_front_door_name_availability
def check_front_door_name_availability( self, name, type, custom_headers=None, raw=False, **operation_config): check_front_door_name_availability_input = models.CheckNameAvailabilityInput(name=name, type=type) api_version = "2018-08-01" url = self.check_front_door_name_availability.metadata['url'] query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') body_content = self._serialize.body(check_front_door_name_availability_input, 'CheckNameAvailabilityInput') request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('CheckNameAvailabilityOutput', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
Check the availability of a Front Door resource name.
def check_front_door_name_availability( self, name, type, custom_headers=None, raw=False, **operation_config): """Check the availability of a Front Door resource name. :param name: The resource name to validate. :type name: str :param type: The type of the resource whose name is to be validated. Possible values include: 'Microsoft.Network/frontDoors', 'Microsoft.Network/frontDoors/frontendEndpoints' :type type: str or ~azure.mgmt.frontdoor.models.ResourceType :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: CheckNameAvailabilityOutput or ClientRawResponse if raw=true :rtype: ~azure.mgmt.frontdoor.models.CheckNameAvailabilityOutput or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.frontdoor.models.ErrorResponseException>` """ check_front_door_name_availability_input = models.CheckNameAvailabilityInput(name=name, type=type) api_version = "2018-08-01" # Construct URL url = self.check_front_door_name_availability.metadata['url'] # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(check_front_door_name_availability_input, 'CheckNameAvailabilityInput') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('CheckNameAvailabilityOutput', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
azure-mgmt-frontdoor/azure/mgmt/frontdoor/front_door_management_client.py
Azure/azure-sdk-for-python
VaultsOperations.purge_deleted
def purge_deleted( self, vault_name, location, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._purge_deleted_initial( vault_name=vault_name, location=location, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Permanently deletes the specified vault. aka Purges the deleted Azure key vault.
def purge_deleted( self, vault_name, location, custom_headers=None, raw=False, polling=True, **operation_config): """Permanently deletes the specified vault. aka Purges the deleted Azure key vault. :param vault_name: The name of the soft-deleted vault. :type vault_name: str :param location: The location of the soft-deleted vault. :type location: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._purge_deleted_initial( vault_name=vault_name, location=location, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-keyvault/azure/mgmt/keyvault/v2016_10_01/operations/vaults_operations.py
Azure/azure-sdk-for-python
HttpChallenge._validate_request_uri
def _validate_request_uri(self, uri): if not uri: raise ValueError('request_uri cannot be empty') uri = parse.urlparse(uri) if not uri.netloc: raise ValueError('request_uri must be an absolute URI') if uri.scheme.lower() not in ['http', 'https']: raise ValueError('request_uri must be HTTP or HTTPS') return uri.netloc
Extracts the host authority from the given URI.
def _validate_request_uri(self, uri): """ Extracts the host authority from the given URI. """ if not uri: raise ValueError('request_uri cannot be empty') uri = parse.urlparse(uri) if not uri.netloc: raise ValueError('request_uri must be an absolute URI') if uri.scheme.lower() not in ['http', 'https']: raise ValueError('request_uri must be HTTP or HTTPS') return uri.netloc
azure-keyvault/azure/keyvault/http_challenge.py
Azure/azure-sdk-for-python
get_cli_profile
def get_cli_profile(): try: from azure.cli.core._profile import Profile from azure.cli.core._session import ACCOUNT from azure.cli.core._environment import get_config_dir except ImportError: raise ImportError("You need to install 'azure-cli-core' to load CLI credentials") azure_folder = get_config_dir() ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json')) return Profile(storage=ACCOUNT)
Return a CLI profile class.
def get_cli_profile(): """Return a CLI profile class. .. versionadded:: 1.1.6 :return: A CLI Profile :rtype: azure.cli.core._profile.Profile :raises: ImportError if azure-cli-core package is not available """ try: from azure.cli.core._profile import Profile from azure.cli.core._session import ACCOUNT from azure.cli.core._environment import get_config_dir except ImportError: raise ImportError("You need to install 'azure-cli-core' to load CLI credentials") azure_folder = get_config_dir() ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json')) return Profile(storage=ACCOUNT)
azure-common/azure/common/credentials.py
Azure/azure-sdk-for-python
get_azure_cli_credentials
def get_azure_cli_credentials(resource=None, with_tenant=False): profile = get_cli_profile() cred, subscription_id, tenant_id = profile.get_login_credentials(resource=resource) if with_tenant: return cred, subscription_id, tenant_id else: return cred, subscription_id
Return Credentials and default SubscriptionID of current loaded profile of the CLI. Credentials will be the "az login"
def get_azure_cli_credentials(resource=None, with_tenant=False): """Return Credentials and default SubscriptionID of current loaded profile of the CLI. Credentials will be the "az login" command: https://docs.microsoft.com/cli/azure/authenticate-azure-cli Default subscription ID is either the only one you have, or you can define it: https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli .. versionadded:: 1.1.6 :param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.) :param bool with_tenant: If True, return a three-tuple with last as tenant ID :return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant) :rtype: tuple """ profile = get_cli_profile() cred, subscription_id, tenant_id = profile.get_login_credentials(resource=resource) if with_tenant: return cred, subscription_id, tenant_id else: return cred, subscription_id
azure-common/azure/common/credentials.py
Azure/azure-sdk-for-python
PredictionOperations.resolve
def resolve( self, app_id, query, timezone_offset=None, verbose=None, staging=None, spell_check=None, bing_spell_check_subscription_key=None, log=None, custom_headers=None, raw=False, **operation_config): url = self.resolve.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'appId': self._serialize.url("app_id", app_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} if timezone_offset is not None: query_parameters['timezoneOffset'] = self._serialize.query("timezone_offset", timezone_offset, 'float') if verbose is not None: query_parameters['verbose'] = self._serialize.query("verbose", verbose, 'bool') if staging is not None: query_parameters['staging'] = self._serialize.query("staging", staging, 'bool') if spell_check is not None: query_parameters['spellCheck'] = self._serialize.query("spell_check", spell_check, 'bool') if bing_spell_check_subscription_key is not None: query_parameters['bing-spell-check-subscription-key'] = self._serialize.query("bing_spell_check_subscription_key", bing_spell_check_subscription_key, 'str') if log is not None: query_parameters['log'] = self._serialize.query("log", log, 'bool') header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) body_content = self._serialize.body(query, 'str') request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('LuisResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
Gets predictions for a given utterance, in the form of intents and entities. The current maximum query size is 500 characters.
def resolve( self, app_id, query, timezone_offset=None, verbose=None, staging=None, spell_check=None, bing_spell_check_subscription_key=None, log=None, custom_headers=None, raw=False, **operation_config): """Gets predictions for a given utterance, in the form of intents and entities. The current maximum query size is 500 characters. :param app_id: The LUIS application ID (Guid). :type app_id: str :param query: The utterance to predict. :type query: str :param timezone_offset: The timezone offset for the location of the request. :type timezone_offset: float :param verbose: If true, return all intents instead of just the top scoring intent. :type verbose: bool :param staging: Use the staging endpoint slot. :type staging: bool :param spell_check: Enable spell checking. :type spell_check: bool :param bing_spell_check_subscription_key: The subscription key to use when enabling Bing spell check :type bing_spell_check_subscription_key: str :param log: Log query (default is true) :type log: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: LuisResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.language.luis.runtime.models.LuisResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.language.luis.runtime.models.APIErrorException>` """ # Construct URL url = self.resolve.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'appId': self._serialize.url("app_id", app_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if timezone_offset is not None: query_parameters['timezoneOffset'] = self._serialize.query("timezone_offset", timezone_offset, 'float') if verbose is not None: query_parameters['verbose'] = self._serialize.query("verbose", verbose, 'bool') if staging is not None: query_parameters['staging'] = self._serialize.query("staging", staging, 'bool') if spell_check is not None: query_parameters['spellCheck'] = self._serialize.query("spell_check", spell_check, 'bool') if bing_spell_check_subscription_key is not None: query_parameters['bing-spell-check-subscription-key'] = self._serialize.query("bing_spell_check_subscription_key", bing_spell_check_subscription_key, 'str') if log is not None: query_parameters['log'] = self._serialize.query("log", log, 'bool') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(query, 'str') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('LuisResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/runtime/operations/prediction_operations.py
Azure/azure-sdk-for-python
MixedRealityClient.check_name_availability_local
def check_name_availability_local( self, location, name, type, custom_headers=None, raw=False, **operation_config): check_name_availability = models.CheckNameAvailabilityRequest(name=name, type=type) url = self.check_name_availability_local.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'location': self._serialize.url("location", location, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') body_content = self._serialize.body(check_name_availability, 'CheckNameAvailabilityRequest') request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('CheckNameAvailabilityResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
Check Name Availability for global uniqueness.
def check_name_availability_local( self, location, name, type, custom_headers=None, raw=False, **operation_config): """Check Name Availability for global uniqueness. :param location: The location in which uniqueness will be verified. :type location: str :param name: Resource Name To Verify :type name: str :param type: Fully qualified resource type which includes provider namespace :type type: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: CheckNameAvailabilityResponse or ClientRawResponse if raw=true :rtype: ~azure.mgmt.mixedreality.models.CheckNameAvailabilityResponse or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>` """ check_name_availability = models.CheckNameAvailabilityRequest(name=name, type=type) # Construct URL url = self.check_name_availability_local.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'location': self._serialize.url("location", location, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(check_name_availability, 'CheckNameAvailabilityRequest') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('CheckNameAvailabilityResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
azure-mgmt-mixedreality/azure/mgmt/mixedreality/mixed_reality_client.py
Azure/azure-sdk-for-python
_WinHttpRequest.open
def open(self, method, url): flag = VARIANT.create_bool_false() _method = BSTR(method) _url = BSTR(url) _WinHttpRequest._Open(self, _method, _url, flag)
Opens the request.
def open(self, method, url): ''' Opens the request. method: the request VERB 'GET', 'POST', etc. url: the url to connect ''' flag = VARIANT.create_bool_false() _method = BSTR(method) _url = BSTR(url) _WinHttpRequest._Open(self, _method, _url, flag)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.set_timeout
def set_timeout(self, timeout_in_seconds): timeout_in_ms = int(timeout_in_seconds * 1000) _WinHttpRequest._SetTimeouts( self, 0, timeout_in_ms, timeout_in_ms, timeout_in_ms)
Sets up the timeout for the request.
def set_timeout(self, timeout_in_seconds): ''' Sets up the timeout for the request. ''' timeout_in_ms = int(timeout_in_seconds * 1000) _WinHttpRequest._SetTimeouts( self, 0, timeout_in_ms, timeout_in_ms, timeout_in_ms)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.set_request_header
def set_request_header(self, name, value): _name = BSTR(name) _value = BSTR(value) _WinHttpRequest._SetRequestHeader(self, _name, _value)
Sets the request header.
def set_request_header(self, name, value): ''' Sets the request header. ''' _name = BSTR(name) _value = BSTR(value) _WinHttpRequest._SetRequestHeader(self, _name, _value)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.get_all_response_headers
def get_all_response_headers(self): bstr_headers = c_void_p() _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers)) bstr_headers = ctypes.cast(bstr_headers, c_wchar_p) headers = bstr_headers.value _SysFreeString(bstr_headers) return headers
Gets back all response headers.
def get_all_response_headers(self): ''' Gets back all response headers. ''' bstr_headers = c_void_p() _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers)) bstr_headers = ctypes.cast(bstr_headers, c_wchar_p) headers = bstr_headers.value _SysFreeString(bstr_headers) return headers
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.send
def send(self, request=None): if request is None: var_empty = VARIANT.create_empty() _WinHttpRequest._Send(self, var_empty) else: _request = VARIANT.create_safearray_from_str(request) _WinHttpRequest._Send(self, _request)
Sends the request body.
def send(self, request=None): ''' Sends the request body. ''' # Sends VT_EMPTY if it is GET, HEAD request. if request is None: var_empty = VARIANT.create_empty() _WinHttpRequest._Send(self, var_empty) else: # Sends request body as SAFEArray. _request = VARIANT.create_safearray_from_str(request) _WinHttpRequest._Send(self, _request)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.status
def status(self): status = c_long() _WinHttpRequest._Status(self, byref(status)) return int(status.value)
Gets status of response.
def status(self): ''' Gets status of response. ''' status = c_long() _WinHttpRequest._Status(self, byref(status)) return int(status.value)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.status_text
def status_text(self): bstr_status_text = c_void_p() _WinHttpRequest._StatusText(self, byref(bstr_status_text)) bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p) status_text = bstr_status_text.value _SysFreeString(bstr_status_text) return status_text
Gets status text of response.
def status_text(self): ''' Gets status text of response. ''' bstr_status_text = c_void_p() _WinHttpRequest._StatusText(self, byref(bstr_status_text)) bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p) status_text = bstr_status_text.value _SysFreeString(bstr_status_text) return status_text
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.response_body
def response_body(self): var_respbody = VARIANT() _WinHttpRequest._ResponseBody(self, byref(var_respbody)) if var_respbody.is_safearray_of_bytes(): respbody = var_respbody.str_from_safearray() return respbody else: return ''
Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.
def response_body(self): ''' Gets response body as a SAFEARRAY and converts the SAFEARRAY to str. ''' var_respbody = VARIANT() _WinHttpRequest._ResponseBody(self, byref(var_respbody)) if var_respbody.is_safearray_of_bytes(): respbody = var_respbody.str_from_safearray() return respbody else: return ''
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_WinHttpRequest.set_client_certificate
def set_client_certificate(self, certificate): _certificate = BSTR(certificate) _WinHttpRequest._SetClientCertificate(self, _certificate)
Sets client certificate for the request.
def set_client_certificate(self, certificate): '''Sets client certificate for the request. ''' _certificate = BSTR(certificate) _WinHttpRequest._SetClientCertificate(self, _certificate)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_HTTPConnection.putrequest
def putrequest(self, method, uri): protocol = unicode(self.protocol + '://') url = protocol + self.host + unicode(uri) self._httprequest.set_timeout(self.timeout) self._httprequest.open(unicode(method), url) if self.cert_file is not None: self._httprequest.set_client_certificate(unicode(self.cert_file))
Connects to host and sends the request.
def putrequest(self, method, uri): ''' Connects to host and sends the request. ''' protocol = unicode(self.protocol + '://') url = protocol + self.host + unicode(uri) self._httprequest.set_timeout(self.timeout) self._httprequest.open(unicode(method), url) # sets certificate for the connection if cert_file is set. if self.cert_file is not None: self._httprequest.set_client_certificate(unicode(self.cert_file))
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_HTTPConnection.putheader
def putheader(self, name, value): if sys.version_info < (3,): name = str(name).decode('utf-8') value = str(value).decode('utf-8') self._httprequest.set_request_header(name, value)
Sends the headers of request.
def putheader(self, name, value): ''' Sends the headers of request. ''' if sys.version_info < (3,): name = str(name).decode('utf-8') value = str(value).decode('utf-8') self._httprequest.set_request_header(name, value)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_HTTPConnection.send
def send(self, request_body): if not request_body: self._httprequest.send() else: self._httprequest.send(request_body)
Sends request body.
def send(self, request_body): ''' Sends request body. ''' if not request_body: self._httprequest.send() else: self._httprequest.send(request_body)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_HTTPConnection.getresponse
def getresponse(self): status = self._httprequest.status() status_text = self._httprequest.status_text() resp_headers = self._httprequest.get_all_response_headers() fixed_headers = [] for resp_header in resp_headers.split('\n'): if (resp_header.startswith('\t') or\ resp_header.startswith(' ')) and fixed_headers: fixed_headers[-1] += resp_header else: fixed_headers.append(resp_header) headers = [] for resp_header in fixed_headers: if ':' in resp_header: pos = resp_header.find(':') headers.append( (resp_header[:pos].lower(), resp_header[pos + 1:].strip())) body = self._httprequest.response_body() length = len(body) return _Response(status, status_text, length, headers, body)
Gets the response and generates the _Response object
def getresponse(self): ''' Gets the response and generates the _Response object''' status = self._httprequest.status() status_text = self._httprequest.status_text() resp_headers = self._httprequest.get_all_response_headers() fixed_headers = [] for resp_header in resp_headers.split('\n'): if (resp_header.startswith('\t') or\ resp_header.startswith(' ')) and fixed_headers: # append to previous header fixed_headers[-1] += resp_header else: fixed_headers.append(resp_header) headers = [] for resp_header in fixed_headers: if ':' in resp_header: pos = resp_header.find(':') headers.append( (resp_header[:pos].lower(), resp_header[pos + 1:].strip())) body = self._httprequest.response_body() length = len(body) return _Response(status, status_text, length, headers, body)
azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py
Azure/azure-sdk-for-python
_get_readable_id
def _get_readable_id(id_name, id_prefix_to_skip): pos = id_name.find('//') if pos != -1: pos += 2 if id_prefix_to_skip: pos = id_name.find(id_prefix_to_skip, pos) if pos != -1: pos += len(id_prefix_to_skip) pos = id_name.find('/', pos) if pos != -1: return id_name[pos + 1:] return id_name
simplified an id to be more friendly for us people
def _get_readable_id(id_name, id_prefix_to_skip): """simplified an id to be more friendly for us people""" # id_name is in the form 'https://namespace.host.suffix/name' # where name may contain a forward slash! pos = id_name.find('//') if pos != -1: pos += 2 if id_prefix_to_skip: pos = id_name.find(id_prefix_to_skip, pos) if pos != -1: pos += len(id_prefix_to_skip) pos = id_name.find('/', pos) if pos != -1: return id_name[pos + 1:] return id_name
azure-servicemanagement-legacy/azure/servicemanagement/_common_serialization.py
Azure/azure-sdk-for-python
_get_serialization_name
def _get_serialization_name(element_name): known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) if known is not None: return known if element_name.startswith('x_ms_'): return element_name.replace('_', '-') if element_name.endswith('_id'): element_name = element_name.replace('_id', 'ID') for name in ['content_', 'last_modified', 'if_', 'cache_control']: if element_name.startswith(name): element_name = element_name.replace('_', '-_') return ''.join(name.capitalize() for name in element_name.split('_'))
converts a Python name into a serializable name
def _get_serialization_name(element_name): """converts a Python name into a serializable name""" known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) if known is not None: return known if element_name.startswith('x_ms_'): return element_name.replace('_', '-') if element_name.endswith('_id'): element_name = element_name.replace('_id', 'ID') for name in ['content_', 'last_modified', 'if_', 'cache_control']: if element_name.startswith(name): element_name = element_name.replace('_', '-_') return ''.join(name.capitalize() for name in element_name.split('_'))
azure-servicemanagement-legacy/azure/servicemanagement/_common_serialization.py
Azure/azure-sdk-for-python
FaceOperations.verify_face_to_person
def verify_face_to_person( self, face_id, person_id, person_group_id=None, large_person_group_id=None, custom_headers=None, raw=False, **operation_config): body = models.VerifyFaceToPersonRequest(face_id=face_id, person_group_id=person_group_id, large_person_group_id=large_person_group_id, person_id=person_id) url = self.verify_face_to_person.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) body_content = self._serialize.body(body, 'VerifyFaceToPersonRequest') request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VerifyResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
Verify whether two faces belong to a same person. Compares a face Id with a Person Id.
def verify_face_to_person( self, face_id, person_id, person_group_id=None, large_person_group_id=None, custom_headers=None, raw=False, **operation_config): """Verify whether two faces belong to a same person. Compares a face Id with a Person Id. :param face_id: FaceId of the face, comes from Face - Detect :type face_id: str :param person_id: Specify a certain person in a person group or a large person group. personId is created in PersonGroup Person - Create or LargePersonGroup Person - Create. :type person_id: str :param person_group_id: Using existing personGroupId and personId for fast loading a specified person. personGroupId is created in PersonGroup - Create. Parameter personGroupId and largePersonGroupId should not be provided at the same time. :type person_group_id: str :param large_person_group_id: Using existing largePersonGroupId and personId for fast loading a specified person. largePersonGroupId is created in LargePersonGroup - Create. Parameter personGroupId and largePersonGroupId should not be provided at the same time. :type large_person_group_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: VerifyResult or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.face.models.VerifyResult or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` """ body = models.VerifyFaceToPersonRequest(face_id=face_id, person_group_id=person_group_id, large_person_group_id=large_person_group_id, person_id=person_id) # Construct URL url = self.verify_face_to_person.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(body, 'VerifyFaceToPersonRequest') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VerifyResult', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py
Azure/azure-sdk-for-python
_MinidomXmlToObject.get_entry_properties_from_node
def get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False): properties = {} etag = entry.getAttributeNS(METADATA_NS, 'etag') if etag: properties['etag'] = etag for updated in _MinidomXmlToObject.get_child_nodes(entry, 'updated'): properties['updated'] = updated.firstChild.nodeValue for name in _MinidomXmlToObject.get_children_from_path(entry, 'author', 'name'): if name.firstChild is not None: properties['author'] = name.firstChild.nodeValue if include_id: if use_title_as_id: for title in _MinidomXmlToObject.get_child_nodes(entry, 'title'): properties['name'] = title.firstChild.nodeValue else: for id in _MinidomXmlToObject.get_child_nodes(entry, 'id'): properties['name'] = _get_readable_id( id.firstChild.nodeValue, id_prefix_to_skip) return properties
get properties from entry xml
def get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False): ''' get properties from entry xml ''' properties = {} etag = entry.getAttributeNS(METADATA_NS, 'etag') if etag: properties['etag'] = etag for updated in _MinidomXmlToObject.get_child_nodes(entry, 'updated'): properties['updated'] = updated.firstChild.nodeValue for name in _MinidomXmlToObject.get_children_from_path(entry, 'author', 'name'): if name.firstChild is not None: properties['author'] = name.firstChild.nodeValue if include_id: if use_title_as_id: for title in _MinidomXmlToObject.get_child_nodes(entry, 'title'): properties['name'] = title.firstChild.nodeValue else: # TODO: check if this is used for id in _MinidomXmlToObject.get_child_nodes(entry, 'id'): properties['name'] = _get_readable_id( id.firstChild.nodeValue, id_prefix_to_skip) return properties
azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py
Azure/azure-sdk-for-python
_MinidomXmlToObject.get_children_from_path
def get_children_from_path(node, *path): cur = node for index, child in enumerate(path): if isinstance(child, _strtype): next = _MinidomXmlToObject.get_child_nodes(cur, child) else: next = _MinidomXmlToObject._get_child_nodesNS(cur, *child) if index == len(path) - 1: return next elif not next: break cur = next[0] return []
descends through a hierarchy of nodes returning the list of children at the inner most level. Only returns children who share a common parent, not cousins.
def get_children_from_path(node, *path): '''descends through a hierarchy of nodes returning the list of children at the inner most level. Only returns children who share a common parent, not cousins.''' cur = node for index, child in enumerate(path): if isinstance(child, _strtype): next = _MinidomXmlToObject.get_child_nodes(cur, child) else: next = _MinidomXmlToObject._get_child_nodesNS(cur, *child) if index == len(path) - 1: return next elif not next: break cur = next[0] return []
azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py