response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Run command and returns stdout.
def run_command(command: str) -> str: """Run command and returns stdout.""" process = subprocess.Popen( shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True ) output, stderr = (stream.decode(sys.getdefaultencoding(), "ignore") for stream in process.communicate()) if process.returncode != 0: raise AirflowConfigException( f"Cannot execute {command}. Error code is: {process.returncode}. " f"Output: {output}, Stderr: {stderr}" ) return output
Get Config option values from Secret Backend.
def _get_config_value_from_secret_backend(config_key: str) -> str | None: """Get Config option values from Secret Backend.""" try: secrets_client = get_custom_secret_backend() if not secrets_client: return None return secrets_client.get_config(config_key) except Exception as e: raise AirflowConfigException( "Cannot retrieve config from alternative secrets backend. " "Make sure it is configured properly and that the Backend " "is accessible.\n" f"{e}" )
Check if the config is a template. :param configuration_description: description of configuration :param section: section :param key: key :return: True if the config is a template
def _is_template(configuration_description: dict[str, dict[str, Any]], section: str, key: str) -> bool: """ Check if the config is a template. :param configuration_description: description of configuration :param section: section :param key: key :return: True if the config is a template """ return configuration_description.get(section, {}).get(key, {}).get("is_template", False)
Read Airflow configuration description from YAML file. :param include_airflow: Include Airflow configs :param include_providers: Include provider configs :param selected_provider: If specified, include selected provider only :return: Python dictionary containing configs & their info
def retrieve_configuration_description( include_airflow: bool = True, include_providers: bool = True, selected_provider: str | None = None, ) -> dict[str, dict[str, Any]]: """ Read Airflow configuration description from YAML file. :param include_airflow: Include Airflow configs :param include_providers: Include provider configs :param selected_provider: If specified, include selected provider only :return: Python dictionary containing configs & their info """ base_configuration_description: dict[str, dict[str, Any]] = {} if include_airflow: with open(_default_config_file_path("config.yml")) as config_file: base_configuration_description.update(yaml.safe_load(config_file)) if include_providers: from airflow.providers_manager import ProvidersManager for provider, config in ProvidersManager().provider_configs: if not selected_provider or provider == selected_provider: base_configuration_description.update(config) return base_configuration_description
Get path to Airflow Home.
def get_airflow_home() -> str: """Get path to Airflow Home.""" return expand_env_var(os.environ.get("AIRFLOW_HOME", "~/airflow"))
Get Path to airflow.cfg path.
def get_airflow_config(airflow_home: str) -> str: """Get Path to airflow.cfg path.""" airflow_config_var = os.environ.get("AIRFLOW_CONFIG") if airflow_config_var is None: return os.path.join(airflow_home, "airflow.cfg") return expand_env_var(airflow_config_var)
Create default config parser based on configuration description. It creates ConfigParser with all default values retrieved from the configuration description and expands all the variables from the global and local variables defined in this module. :param configuration_description: configuration description - retrieved from config.yaml files following the schema defined in "config.yml.schema.json" in the config_templates folder. :return: Default Config Parser that can be used to read configuration values from.
def create_default_config_parser(configuration_description: dict[str, dict[str, Any]]) -> ConfigParser: """ Create default config parser based on configuration description. It creates ConfigParser with all default values retrieved from the configuration description and expands all the variables from the global and local variables defined in this module. :param configuration_description: configuration description - retrieved from config.yaml files following the schema defined in "config.yml.schema.json" in the config_templates folder. :return: Default Config Parser that can be used to read configuration values from. """ parser = ConfigParser() all_vars = get_all_expansion_variables() for section, section_desc in configuration_description.items(): parser.add_section(section) options = section_desc["options"] for key in options: default_value = options[key]["default"] is_template = options[key].get("is_template", False) if default_value is not None: if is_template or not isinstance(default_value, str): parser.set(section, key, default_value) else: parser.set(section, key, default_value.format(**all_vars)) return parser
Create parser using the old defaults from Airflow < 2.7.0. This is used in order to be able to fall-back to those defaults when old version of provider, not supporting "config contribution" is installed with Airflow 2.7.0+. This "default" configuration does not support variable expansion, those are pretty much hard-coded defaults ' we want to fall-back to in such case.
def create_pre_2_7_defaults() -> ConfigParser: """ Create parser using the old defaults from Airflow < 2.7.0. This is used in order to be able to fall-back to those defaults when old version of provider, not supporting "config contribution" is installed with Airflow 2.7.0+. This "default" configuration does not support variable expansion, those are pretty much hard-coded defaults ' we want to fall-back to in such case. """ config_parser = ConfigParser() config_parser.read(_default_config_file_path("pre_2_7_defaults.cfg")) return config_parser
Load standard airflow configuration. In case it finds that the configuration file is missing, it will create it and write the default configuration values there, based on defaults passed, and will add the comments and examples from the default configuration. :param airflow_config_parser: parser to which the configuration will be loaded
def load_standard_airflow_configuration(airflow_config_parser: AirflowConfigParser): """ Load standard airflow configuration. In case it finds that the configuration file is missing, it will create it and write the default configuration values there, based on defaults passed, and will add the comments and examples from the default configuration. :param airflow_config_parser: parser to which the configuration will be loaded """ global AIRFLOW_HOME log.info("Reading the config from %s", AIRFLOW_CONFIG) airflow_config_parser.read(AIRFLOW_CONFIG) if airflow_config_parser.has_option("core", "AIRFLOW_HOME"): msg = ( "Specifying both AIRFLOW_HOME environment variable and airflow_home " "in the config file is deprecated. Please use only the AIRFLOW_HOME " "environment variable and remove the config file entry." ) if "AIRFLOW_HOME" in os.environ: warnings.warn(msg, category=DeprecationWarning, stacklevel=1) elif airflow_config_parser.get("core", "airflow_home") == AIRFLOW_HOME: warnings.warn( "Specifying airflow_home in the config file is deprecated. As you " "have left it at the default value you should remove the setting " "from your airflow.cfg and suffer no change in behaviour.", category=DeprecationWarning, stacklevel=1, ) else: # there AIRFLOW_HOME = airflow_config_parser.get("core", "airflow_home") # type: ignore[assignment] warnings.warn(msg, category=DeprecationWarning, stacklevel=1)
Load the Airflow config files. Called for you automatically as part of the Airflow boot process.
def initialize_config() -> AirflowConfigParser: """ Load the Airflow config files. Called for you automatically as part of the Airflow boot process. """ airflow_config_parser = AirflowConfigParser() if airflow_config_parser.getboolean("core", "unit_test_mode"): airflow_config_parser.load_test_config() else: load_standard_airflow_configuration(airflow_config_parser) # If the user set unit_test_mode in the airflow.cfg, we still # want to respect that and then load the default unit test configuration # file on top of it. if airflow_config_parser.getboolean("core", "unit_test_mode"): airflow_config_parser.load_test_config() # Set the WEBSERVER_CONFIG variable global WEBSERVER_CONFIG WEBSERVER_CONFIG = airflow_config_parser.get("webserver", "config_file") return airflow_config_parser
Historical get.
def get(*args, **kwargs) -> ConfigType | None: """Historical get.""" warnings.warn( "Accessing configuration method 'get' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.get'", DeprecationWarning, stacklevel=2, ) return conf.get(*args, **kwargs)
Historical getboolean.
def getboolean(*args, **kwargs) -> bool: """Historical getboolean.""" warnings.warn( "Accessing configuration method 'getboolean' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.getboolean'", DeprecationWarning, stacklevel=2, ) return conf.getboolean(*args, **kwargs)
Historical getfloat.
def getfloat(*args, **kwargs) -> float: """Historical getfloat.""" warnings.warn( "Accessing configuration method 'getfloat' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.getfloat'", DeprecationWarning, stacklevel=2, ) return conf.getfloat(*args, **kwargs)
Historical getint.
def getint(*args, **kwargs) -> int: """Historical getint.""" warnings.warn( "Accessing configuration method 'getint' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.getint'", DeprecationWarning, stacklevel=2, ) return conf.getint(*args, **kwargs)
Historical getsection.
def getsection(*args, **kwargs) -> ConfigOptionsDictType | None: """Historical getsection.""" warnings.warn( "Accessing configuration method 'getsection' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.getsection'", DeprecationWarning, stacklevel=2, ) return conf.getsection(*args, **kwargs)
Historical has_option.
def has_option(*args, **kwargs) -> bool: """Historical has_option.""" warnings.warn( "Accessing configuration method 'has_option' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.has_option'", DeprecationWarning, stacklevel=2, ) return conf.has_option(*args, **kwargs)
Historical remove_option.
def remove_option(*args, **kwargs) -> bool: """Historical remove_option.""" warnings.warn( "Accessing configuration method 'remove_option' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.remove_option'", DeprecationWarning, stacklevel=2, ) return conf.remove_option(*args, **kwargs)
Historical as_dict.
def as_dict(*args, **kwargs) -> ConfigSourcesType: """Historical as_dict.""" warnings.warn( "Accessing configuration method 'as_dict' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.as_dict'", DeprecationWarning, stacklevel=2, ) return conf.as_dict(*args, **kwargs)
Historical set.
def set(*args, **kwargs) -> None: """Historical set.""" warnings.warn( "Accessing configuration method 'set' directly from the configuration module is " "deprecated. Please access the configuration from the 'configuration.conf' object via " "'conf.set'", DeprecationWarning, stacklevel=2, ) conf.set(*args, **kwargs)
Ensure that all secrets backends are loaded. If the secrets_backend_list contains only 2 default backends, reload it.
def ensure_secrets_loaded() -> list[BaseSecretsBackend]: """ Ensure that all secrets backends are loaded. If the secrets_backend_list contains only 2 default backends, reload it. """ # Check if the secrets_backend_list contains only 2 default backends if len(secrets_backend_list) == 2: return initialize_secrets_backends() return secrets_backend_list
Get Secret Backend if defined in airflow.cfg.
def get_custom_secret_backend() -> BaseSecretsBackend | None: """Get Secret Backend if defined in airflow.cfg.""" secrets_backend_cls = conf.getimport(section="secrets", key="backend") if not secrets_backend_cls: return None try: backend_kwargs = conf.getjson(section="secrets", key="backend_kwargs") if not backend_kwargs: backend_kwargs = {} elif not isinstance(backend_kwargs, dict): raise ValueError("not a dict") except AirflowConfigException: log.warning("Failed to parse [secrets] backend_kwargs as JSON, defaulting to no kwargs.") backend_kwargs = {} except ValueError: log.warning("Failed to parse [secrets] backend_kwargs into a dict, defaulting to no kwargs.") backend_kwargs = {} return secrets_backend_cls(**backend_kwargs)
Initialize secrets backend. * import secrets backend classes * instantiate them and return them in a list
def initialize_secrets_backends() -> list[BaseSecretsBackend]: """ Initialize secrets backend. * import secrets backend classes * instantiate them and return them in a list """ backend_list = [] custom_secret_backend = get_custom_secret_backend() if custom_secret_backend is not None: backend_list.append(custom_secret_backend) for class_name in DEFAULT_SECRETS_SEARCH_PATH: secrets_backend_cls = import_string(class_name) backend_list.append(secrets_backend_cls()) return backend_list
Initialize auth manager. * import user manager class * instantiate it and return it
def initialize_auth_manager() -> BaseAuthManager: """ Initialize auth manager. * import user manager class * instantiate it and return it """ auth_manager_cls = conf.getimport(section="core", key="auth_manager") if not auth_manager_cls: raise AirflowConfigException( "No auth manager defined in the config. " "Please specify one using section/key [core/auth_manager]." ) return auth_manager_cls()
Configure & Validate Airflow Logging.
def configure_logging(): """Configure & Validate Airflow Logging.""" logging_class_path = "" try: logging_class_path = conf.get("logging", "logging_config_class") except AirflowConfigException: log.debug("Could not find key logging_config_class in config") if logging_class_path: try: logging_config = import_string(logging_class_path) # Make sure that the variable is in scope if not isinstance(logging_config, dict): raise ValueError("Logging Config should be of dict type") log.info("Successfully imported user-defined logging config from %s", logging_class_path) except Exception as err: # Import default logging configurations. raise ImportError(f"Unable to load custom logging from {logging_class_path} due to {err}") else: logging_class_path = "airflow.config_templates.airflow_local_settings.DEFAULT_LOGGING_CONFIG" logging_config = import_string(logging_class_path) log.debug("Unable to load custom logging, using default config instead") try: # Ensure that the password masking filter is applied to the 'task' handler # no matter what the user did. if "filters" in logging_config and "mask_secrets" in logging_config["filters"]: # But if they replace the logging config _entirely_, don't try to set this, it won't work task_handler_config = logging_config["handlers"]["task"] task_handler_config.setdefault("filters", []) if "mask_secrets" not in task_handler_config["filters"]: task_handler_config["filters"].append("mask_secrets") # Try to init logging dictConfig(logging_config) except (ValueError, KeyError) as e: log.error("Unable to load the config, contains a configuration error.") # When there is an error in the config, escalate the exception # otherwise Airflow would silently fall back on the default config raise e validate_logging_config(logging_config) return logging_class_path
Validate the provided Logging Config.
def validate_logging_config(logging_config): """Validate the provided Logging Config.""" # Now lets validate the other logging-related settings task_log_reader = conf.get("logging", "task_log_reader") logger = logging.getLogger("airflow.task") def _get_handler(name): return next((h for h in logger.handlers if h.name == name), None) if _get_handler(task_log_reader) is None: # Check for pre 1.10 setting that might be in deployed airflow.cfg files if task_log_reader == "file.task" and _get_handler("task"): warnings.warn( f"task_log_reader setting in [logging] has a deprecated value of {task_log_reader!r}, " "but no handler with this name was found. Please update your config to use task. " "Running config has been adjusted to match", DeprecationWarning, stacklevel=2, ) conf.set("logging", "task_log_reader", "task") else: raise AirflowConfigException( f"Configured task_log_reader {task_log_reader!r} was not a handler of " f"the 'airflow.task' logger." )
Check whether a potential object is a subclass of the AirflowPlugin class. :param plugin_obj: potential subclass of AirflowPlugin :return: Whether or not the obj is a valid subclass of AirflowPlugin
def is_valid_plugin(plugin_obj): """ Check whether a potential object is a subclass of the AirflowPlugin class. :param plugin_obj: potential subclass of AirflowPlugin :return: Whether or not the obj is a valid subclass of AirflowPlugin """ global plugins if ( inspect.isclass(plugin_obj) and issubclass(plugin_obj, AirflowPlugin) and (plugin_obj is not AirflowPlugin) ): plugin_obj.validate() return plugin_obj not in plugins return False
Start plugin load and register it after success initialization. If plugin is already registered, do nothing. :param plugin_instance: subclass of AirflowPlugin
def register_plugin(plugin_instance): """ Start plugin load and register it after success initialization. If plugin is already registered, do nothing. :param plugin_instance: subclass of AirflowPlugin """ global plugins if plugin_instance.name in loaded_plugins: return loaded_plugins.add(plugin_instance.name) plugin_instance.on_load() plugins.append(plugin_instance)
Load and register plugins AirflowPlugin subclasses from the entrypoints. The entry_point group should be 'airflow.plugins'.
def load_entrypoint_plugins(): """ Load and register plugins AirflowPlugin subclasses from the entrypoints. The entry_point group should be 'airflow.plugins'. """ global import_errors log.debug("Loading plugins from entrypoints") for entry_point, dist in entry_points_with_dist("airflow.plugins"): log.debug("Importing entry_point plugin %s", entry_point.name) try: plugin_class = entry_point.load() if not is_valid_plugin(plugin_class): continue plugin_instance = plugin_class() plugin_instance.source = EntryPointSource(entry_point, dist) register_plugin(plugin_instance) except Exception as e: log.exception("Failed to import plugin %s", entry_point.name) import_errors[entry_point.module] = str(e)
Load and register Airflow Plugins from plugins directory.
def load_plugins_from_plugin_directory(): """Load and register Airflow Plugins from plugins directory.""" global import_errors log.debug("Loading plugins from directory: %s", settings.PLUGINS_FOLDER) for file_path in find_path_from_directory(settings.PLUGINS_FOLDER, ".airflowignore"): path = Path(file_path) if not path.is_file() or path.suffix != ".py": continue mod_name = path.stem try: loader = importlib.machinery.SourceFileLoader(mod_name, file_path) spec = importlib.util.spec_from_loader(mod_name, loader) mod = importlib.util.module_from_spec(spec) sys.modules[spec.name] = mod loader.exec_module(mod) log.debug("Importing plugin module %s", file_path) for mod_attr_value in (m for m in mod.__dict__.values() if is_valid_plugin(m)): plugin_instance = mod_attr_value() plugin_instance.source = PluginsDirectorySource(file_path) register_plugin(plugin_instance) except Exception as e: log.exception("Failed to import plugin %s", file_path) import_errors[file_path] = str(e)
Create new module.
def make_module(name: str, objects: list[Any]): """Create new module.""" if not objects: return None log.debug("Creating module %s", name) name = name.lower() module = types.ModuleType(name) module._name = name.split(".")[-1] # type: ignore module._objects = objects # type: ignore module.__dict__.update((o.__name__, o) for o in objects) return module
Load plugins from plugins directory and entrypoints. Plugins are only loaded if they have not been previously loaded.
def ensure_plugins_loaded(): """ Load plugins from plugins directory and entrypoints. Plugins are only loaded if they have not been previously loaded. """ from airflow.stats import Stats global plugins, registered_hooks if plugins is not None: log.debug("Plugins are already loaded. Skipping.") return if not settings.PLUGINS_FOLDER: raise ValueError("Plugins folder is not set") log.debug("Loading plugins") with Stats.timer() as timer: plugins = [] registered_hooks = [] load_plugins_from_plugin_directory() load_entrypoint_plugins() if not settings.LAZY_LOAD_PROVIDERS: load_providers_plugins() # We don't do anything with these for now, but we want to keep track of # them so we can integrate them in to the UI's Connection screens for plugin in plugins: registered_hooks.extend(plugin.hooks) if plugins: log.debug("Loading %d plugin(s) took %.2f seconds", len(plugins), timer.duration)
Collect extension points for WEB UI.
def initialize_web_ui_plugins(): """Collect extension points for WEB UI.""" global plugins global flask_blueprints global flask_appbuilder_views global flask_appbuilder_menu_links if ( flask_blueprints is not None and flask_appbuilder_views is not None and flask_appbuilder_menu_links is not None ): return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize Web UI plugin") flask_blueprints = [] flask_appbuilder_views = [] flask_appbuilder_menu_links = [] for plugin in plugins: flask_appbuilder_views.extend(plugin.appbuilder_views) flask_appbuilder_menu_links.extend(plugin.appbuilder_menu_items) flask_blueprints.extend([{"name": plugin.name, "blueprint": bp} for bp in plugin.flask_blueprints]) if (plugin.admin_views and not plugin.appbuilder_views) or ( plugin.menu_links and not plugin.appbuilder_menu_items ): log.warning( "Plugin '%s' may not be compatible with the current Airflow version. " "Please contact the author of the plugin.", plugin.name, )
Create modules for loaded extension from custom task instance dependency rule plugins.
def initialize_ti_deps_plugins(): """Create modules for loaded extension from custom task instance dependency rule plugins.""" global registered_ti_dep_classes if registered_ti_dep_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize custom taskinstance deps plugins") registered_ti_dep_classes = {} for plugin in plugins: registered_ti_dep_classes.update( {qualname(ti_dep.__class__): ti_dep.__class__ for ti_dep in plugin.ti_deps} )
Create modules for loaded extension from extra operators links plugins.
def initialize_extra_operators_links_plugins(): """Create modules for loaded extension from extra operators links plugins.""" global global_operator_extra_links global operator_extra_links global registered_operator_link_classes if ( global_operator_extra_links is not None and operator_extra_links is not None and registered_operator_link_classes is not None ): return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra operators links plugins") global_operator_extra_links = [] operator_extra_links = [] registered_operator_link_classes = {} for plugin in plugins: global_operator_extra_links.extend(plugin.global_operator_extra_links) operator_extra_links.extend(list(plugin.operator_extra_links)) registered_operator_link_classes.update( {qualname(link.__class__): link.__class__ for link in plugin.operator_extra_links} )
Collect timetable classes registered by plugins.
def initialize_timetables_plugins(): """Collect timetable classes registered by plugins.""" global timetable_classes if timetable_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra timetables plugins") timetable_classes = { qualname(timetable_class): timetable_class for plugin in plugins for timetable_class in plugin.timetables }
Integrate executor plugins to the context.
def integrate_executor_plugins() -> None: """Integrate executor plugins to the context.""" global plugins global executors_modules if executors_modules is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Integrate executor plugins") executors_modules = [] for plugin in plugins: if plugin.name is None: raise AirflowPluginException("Invalid plugin name") plugin_name: str = plugin.name executors_module = make_module("airflow.executors." + plugin_name, plugin.executors) if executors_module: executors_modules.append(executors_module) sys.modules[executors_module.__name__] = executors_module
Integrates macro plugins.
def integrate_macros_plugins() -> None: """Integrates macro plugins.""" global plugins global macros_modules from airflow import macros if macros_modules is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Integrate DAG plugins") macros_modules = [] for plugin in plugins: if plugin.name is None: raise AirflowPluginException("Invalid plugin name") macros_module = make_module(f"airflow.macros.{plugin.name}", plugin.macros) if macros_module: macros_modules.append(macros_module) sys.modules[macros_module.__name__] = macros_module # Register the newly created module on airflow.macros such that it # can be accessed when rendering templates. setattr(macros, plugin.name, macros_module)
Add listeners from plugins.
def integrate_listener_plugins(listener_manager: ListenerManager) -> None: """Add listeners from plugins.""" global plugins ensure_plugins_loaded() if plugins: for plugin in plugins: if plugin.name is None: raise AirflowPluginException("Invalid plugin name") for listener in plugin.listeners: listener_manager.add_listener(listener)
Dump plugins attributes. :param attrs_to_dump: A list of plugin attributes to dump
def get_plugin_info(attrs_to_dump: Iterable[str] | None = None) -> list[dict[str, Any]]: """ Dump plugins attributes. :param attrs_to_dump: A list of plugin attributes to dump """ ensure_plugins_loaded() integrate_executor_plugins() integrate_macros_plugins() initialize_web_ui_plugins() initialize_extra_operators_links_plugins() if not attrs_to_dump: attrs_to_dump = PLUGINS_ATTRIBUTES_TO_DUMP plugins_info = [] if plugins: for plugin in plugins: info: dict[str, Any] = {"name": plugin.name} for attr in attrs_to_dump: if attr in ("global_operator_extra_links", "operator_extra_links"): info[attr] = [f"<{qualname(d.__class__)} object>" for d in getattr(plugin, attr)] elif attr in ("macros", "timetables", "hooks", "executors", "priority_weight_strategies"): info[attr] = [qualname(d) for d in getattr(plugin, attr)] elif attr == "listeners": # listeners may be modules or class instances info[attr] = [ d.__name__ if inspect.ismodule(d) else qualname(d) for d in getattr(plugin, attr) ] elif attr == "appbuilder_views": info[attr] = [ {**d, "view": qualname(d["view"].__class__) if "view" in d else None} for d in getattr(plugin, attr) ] elif attr == "flask_blueprints": info[attr] = [ f"<{qualname(d.__class__)}: name={d.name!r} import_name={d.import_name!r}>" for d in getattr(plugin, attr) ] else: info[attr] = getattr(plugin, attr) plugins_info.append(info) return plugins_info
Collect priority weight strategy classes registered by plugins.
def initialize_priority_weight_strategy_plugins(): """Collect priority weight strategy classes registered by plugins.""" global priority_weight_strategy_classes if priority_weight_strategy_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra priority weight strategy plugins") plugins_priority_weight_strategy_classes = { qualname(priority_weight_strategy_class): priority_weight_strategy_class for plugin in plugins for priority_weight_strategy_class in plugin.priority_weight_strategies } priority_weight_strategy_classes = { **airflow_priority_weight_strategies, **plugins_priority_weight_strategy_classes, }
Allow altering tasks after they are loaded in the DagBag. It allows administrator to rewire some task's parameters. Alternatively you can raise ``AirflowClusterPolicyViolation`` exception to stop DAG from being executed. Here are a few examples of how this can be useful: * You could enforce a specific queue (say the ``spark`` queue) for tasks using the ``SparkOperator`` to make sure that these tasks get wired to the right workers * You could enforce a task timeout policy, making sure that no tasks run for more than 48 hours :param task: task to be mutated
def task_policy(task: BaseOperator) -> None: """ Allow altering tasks after they are loaded in the DagBag. It allows administrator to rewire some task's parameters. Alternatively you can raise ``AirflowClusterPolicyViolation`` exception to stop DAG from being executed. Here are a few examples of how this can be useful: * You could enforce a specific queue (say the ``spark`` queue) for tasks using the ``SparkOperator`` to make sure that these tasks get wired to the right workers * You could enforce a task timeout policy, making sure that no tasks run for more than 48 hours :param task: task to be mutated """
Allow altering DAGs after they are loaded in the DagBag. It allows administrator to rewire some DAG's parameters. Alternatively you can raise ``AirflowClusterPolicyViolation`` exception to stop DAG from being executed. Here are a few examples of how this can be useful: * You could enforce default user for DAGs * Check if every DAG has configured tags :param dag: dag to be mutated
def dag_policy(dag: DAG) -> None: """ Allow altering DAGs after they are loaded in the DagBag. It allows administrator to rewire some DAG's parameters. Alternatively you can raise ``AirflowClusterPolicyViolation`` exception to stop DAG from being executed. Here are a few examples of how this can be useful: * You could enforce default user for DAGs * Check if every DAG has configured tags :param dag: dag to be mutated """
Allow altering task instances before being queued by the Airflow scheduler. This could be used, for instance, to modify the task instance during retries. :param task_instance: task instance to be mutated
def task_instance_mutation_hook(task_instance: TaskInstance) -> None: """ Allow altering task instances before being queued by the Airflow scheduler. This could be used, for instance, to modify the task instance during retries. :param task_instance: task instance to be mutated """
Mutate pod before scheduling. This setting allows altering ``kubernetes.client.models.V1Pod`` object before they are passed to the Kubernetes client for scheduling. This could be used, for instance, to add sidecar or init containers to every worker pod launched by KubernetesExecutor or KubernetesPodOperator.
def pod_mutation_hook(pod) -> None: """ Mutate pod before scheduling. This setting allows altering ``kubernetes.client.models.V1Pod`` object before they are passed to the Kubernetes client for scheduling. This could be used, for instance, to add sidecar or init containers to every worker pod launched by KubernetesExecutor or KubernetesPodOperator. """
Inject airflow context vars into default airflow context vars. This setting allows getting the airflow context vars, which are key value pairs. They are then injected to default airflow context vars, which in the end are available as environment variables when running tasks dag_id, task_id, execution_date, dag_run_id, try_number are reserved keys. :param context: The context for the task_instance of interest.
def get_airflow_context_vars(context) -> dict[str, str]: # type: ignore[empty-body] """ Inject airflow context vars into default airflow context vars. This setting allows getting the airflow context vars, which are key value pairs. They are then injected to default airflow context vars, which in the end are available as environment variables when running tasks dag_id, task_id, execution_date, dag_run_id, try_number are reserved keys. :param context: The context for the task_instance of interest. """
Allow for dynamic control of the DAG file parsing timeout based on the DAG file path. It is useful when there are a few DAG files requiring longer parsing times, while others do not. You can control them separately instead of having one value for all DAG files. If the return value is less than or equal to 0, it means no timeout during the DAG parsing.
def get_dagbag_import_timeout(dag_file_path: str) -> int | float: # type: ignore[empty-body] """ Allow for dynamic control of the DAG file parsing timeout based on the DAG file path. It is useful when there are a few DAG files requiring longer parsing times, while others do not. You can control them separately instead of having one value for all DAG files. If the return value is less than or equal to 0, it means no timeout during the DAG parsing. """
Turn the functions from airflow_local_settings module into a custom/local plugin. Allows plugin-registered functions to co-operate with pluggy/setuptool entrypoint plugins of the same methods. Airflow local settings will be "win" (i.e. they have the final say) as they are the last plugin registered. :meta private:
def make_plugin_from_local_settings(pm: pluggy.PluginManager, module, names: set[str]): """ Turn the functions from airflow_local_settings module into a custom/local plugin. Allows plugin-registered functions to co-operate with pluggy/setuptool entrypoint plugins of the same methods. Airflow local settings will be "win" (i.e. they have the final say) as they are the last plugin registered. :meta private: """ import inspect import textwrap import attr hook_methods = set() def _make_shim_fn(name, desired_sig, target): # Functions defined in airflow_local_settings are called by positional parameters, so the names don't # have to match what we define in the "template" policy. # # However Pluggy validates the names match (and will raise an error if they don't!) # # To maintain compat, if we detect the names don't match, we will wrap it with a dynamically created # shim function that looks somewhat like this: # # def dag_policy_name_mismatch_shim(dag): # airflow_local_settings.dag_policy(dag) # codestr = textwrap.dedent( f""" def {name}_name_mismatch_shim{desired_sig}: return __target({' ,'.join(desired_sig.parameters)}) """ ) code = compile(codestr, "<policy-shim>", "single") scope = {"__target": target} exec(code, scope, scope) return scope[f"{name}_name_mismatch_shim"] @attr.define(frozen=True) class AirflowLocalSettingsPolicy: hook_methods: tuple[str, ...] __name__ = "AirflowLocalSettingsPolicy" def __dir__(self): return self.hook_methods for name in names: if not hasattr(pm.hook, name): continue policy = getattr(module, name) if not policy: continue local_sig = inspect.signature(policy) policy_sig = inspect.signature(globals()[name]) # We only care if names/order/number of parameters match, not type hints if local_sig.parameters.keys() != policy_sig.parameters.keys(): policy = _make_shim_fn(name, policy_sig, target=policy) setattr(AirflowLocalSettingsPolicy, name, staticmethod(hookimpl(policy, specname=name))) hook_methods.add(name) if hook_methods: pm.register(AirflowLocalSettingsPolicy(hook_methods=tuple(hook_methods))) return hook_methods
Verify the correct placeholder prefix. If the given field_behaviors dict contains a placeholder's node, and there are placeholders for extra fields (i.e. anything other than the built-in conn attrs), and if those extra fields are unprefixed, then add the prefix. The reason we need to do this is, all custom conn fields live in the same dictionary, so we need to namespace them with a prefix internally. But for user convenience, and consistency between the `get_ui_field_behaviour` method and the extra dict itself, we allow users to supply the unprefixed name.
def _ensure_prefix_for_placeholders(field_behaviors: dict[str, Any], conn_type: str): """ Verify the correct placeholder prefix. If the given field_behaviors dict contains a placeholder's node, and there are placeholders for extra fields (i.e. anything other than the built-in conn attrs), and if those extra fields are unprefixed, then add the prefix. The reason we need to do this is, all custom conn fields live in the same dictionary, so we need to namespace them with a prefix internally. But for user convenience, and consistency between the `get_ui_field_behaviour` method and the extra dict itself, we allow users to supply the unprefixed name. """ conn_attrs = {"host", "schema", "login", "password", "port", "extra"} def ensure_prefix(field): if field not in conn_attrs and not field.startswith("extra__"): return f"extra__{conn_type}__{field}" else: return field if "placeholders" in field_behaviors: placeholders = field_behaviors["placeholders"] field_behaviors["placeholders"] = {ensure_prefix(k): v for k, v in placeholders.items()} return field_behaviors
Create JSON schema validator from the provider_info.schema.json.
def _create_provider_info_schema_validator(): """Create JSON schema validator from the provider_info.schema.json.""" import jsonschema schema = _read_schema_from_resources_or_local_file("provider_info.schema.json") cls = jsonschema.validators.validator_for(schema) validator = cls(schema) return validator
Create JSON schema validator from the customized_form_field_behaviours.schema.json.
def _create_customized_form_field_behaviours_schema_validator(): """Create JSON schema validator from the customized_form_field_behaviours.schema.json.""" import jsonschema schema = _read_schema_from_resources_or_local_file("customized_form_field_behaviours.schema.json") cls = jsonschema.validators.validator_for(schema) validator = cls(schema) return validator
Log debug imports from sources.
def log_debug_import_from_sources(class_name, e, provider_package): """Log debug imports from sources.""" log.debug( "Optional feature disabled on exception when importing '%s' from '%s' package", class_name, provider_package, exc_info=e, )
Log optional feature disabled.
def log_optional_feature_disabled(class_name, e, provider_package): """Log optional feature disabled.""" log.debug( "Optional feature disabled on exception when importing '%s' from '%s' package", class_name, provider_package, exc_info=e, ) log.info( "Optional provider feature disabled when importing '%s' from '%s' package", class_name, provider_package, )
Log import warning.
def log_import_warning(class_name, e, provider_package): """Log import warning.""" log.warning( "Exception when importing '%s' from '%s' package", class_name, provider_package, exc_info=e, )
Perform coherence check on provider classes. For apache-airflow providers - it checks if it starts with appropriate package. For all providers it tries to import the provider - checking that there are no exceptions during importing. It logs appropriate warning in case it detects any problems. :param provider_package: name of the provider package :param class_name: name of the class to import :return the class if the class is OK, None otherwise.
def _correctness_check(provider_package: str, class_name: str, provider_info: ProviderInfo) -> Any: """ Perform coherence check on provider classes. For apache-airflow providers - it checks if it starts with appropriate package. For all providers it tries to import the provider - checking that there are no exceptions during importing. It logs appropriate warning in case it detects any problems. :param provider_package: name of the provider package :param class_name: name of the class to import :return the class if the class is OK, None otherwise. """ if not _check_builtin_provider_prefix(provider_package, class_name): return None try: imported_class = import_string(class_name) except AirflowOptionalProviderFeatureException as e: # When the provider class raises AirflowOptionalProviderFeatureException # this is an expected case when only some classes in provider are # available. We just log debug level here and print info message in logs so that # the user is aware of it log_optional_feature_disabled(class_name, e, provider_package) return None except ImportError as e: if provider_info.is_source: # When we have providers from sources, then we just turn all import logs to debug logs # As this is pretty expected that you have a number of dependencies not installed # (we always have all providers from sources until we split providers to separate repo) log_debug_import_from_sources(class_name, e, provider_package) return None if "No module named 'airflow.providers." in e.msg: # handle cases where another provider is missing. This can only happen if # there is an optional feature, so we log debug and print information about it log_optional_feature_disabled(class_name, e, provider_package) return None for known_error in KNOWN_UNHANDLED_OPTIONAL_FEATURE_ERRORS: # Until we convert all providers to use AirflowOptionalProviderFeatureException # we assume any problem with importing another "provider" is because this is an # optional feature, so we log debug and print information about it if known_error[0] == provider_package and known_error[1] in e.msg: log_optional_feature_disabled(class_name, e, provider_package) return None # But when we have no idea - we print warning to logs log_import_warning(class_name, e, provider_package) return None except Exception as e: log_import_warning(class_name, e, provider_package) return None return imported_class
Decorate and cache provider info. Decorator factory that create decorator that caches initialization of provider's parameters :param cache_name: Name of the cache
def provider_info_cache(cache_name: str) -> Callable[[T], T]: """ Decorate and cache provider info. Decorator factory that create decorator that caches initialization of provider's parameters :param cache_name: Name of the cache """ def provider_info_cache_decorator(func: T): @wraps(func) def wrapped_function(*args, **kwargs): providers_manager_instance = args[0] if cache_name in providers_manager_instance._initialized_cache: return start_time = perf_counter() logger.debug("Initializing Providers Manager[%s]", cache_name) func(*args, **kwargs) providers_manager_instance._initialized_cache[cache_name] = True logger.debug( "Initialization of Providers Manager[%s] took %.2f seconds", cache_name, perf_counter() - start_time, ) return cast(T, wrapped_function) return provider_info_cache_decorator
Print rich and visible warnings.
def custom_show_warning(message, category, filename, lineno, file=None, line=None): """Print rich and visible warnings.""" # Delay imports until we need it from rich.markup import escape msg = f"[bold]{line}" if line else f"[bold][yellow]{filename}:{lineno}" msg += f" {category.__name__}[/bold]: {escape(str(message))}[/yellow]" write_console = _get_rich_console(file or sys.stderr) write_console.print(msg, soft_wrap=True)
Replace ``warnings.showwarning``, returning the original. This is useful since we want to "reset" the ``showwarning`` hook on exit to avoid lazy-loading issues. If a warning is emitted after Python cleaned up the import system, we would no longer be able to import ``rich``.
def replace_showwarning(replacement): """Replace ``warnings.showwarning``, returning the original. This is useful since we want to "reset" the ``showwarning`` hook on exit to avoid lazy-loading issues. If a warning is emitted after Python cleaned up the import system, we would no longer be able to import ``rich``. """ original = warnings.showwarning warnings.showwarning = replacement return original
Configure Global Variables from airflow.cfg.
def configure_vars(): """Configure Global Variables from airflow.cfg.""" global SQL_ALCHEMY_CONN global DAGS_FOLDER global PLUGINS_FOLDER global DONOT_MODIFY_HANDLERS SQL_ALCHEMY_CONN = conf.get("database", "SQL_ALCHEMY_CONN") DAGS_FOLDER = os.path.expanduser(conf.get("core", "DAGS_FOLDER")) PLUGINS_FOLDER = conf.get("core", "plugins_folder", fallback=os.path.join(AIRFLOW_HOME, "plugins")) # If donot_modify_handlers=True, we do not modify logging handlers in task_run command # If the flag is set to False, we remove all handlers from the root logger # and add all handlers from 'airflow.task' logger to the root Logger. This is done # to get all the logs from the print & log statements in the DAG files before a task is run # The handlers are restored after the task completes execution. DONOT_MODIFY_HANDLERS = conf.getboolean("logging", "donot_modify_handlers", fallback=False)
Configure ORM using SQLAlchemy.
def configure_orm(disable_connection_pool=False, pool_class=None): """Configure ORM using SQLAlchemy.""" from airflow.utils.log.secrets_masker import mask_secret if ( SQL_ALCHEMY_CONN and SQL_ALCHEMY_CONN.startswith("sqlite") and not SQL_ALCHEMY_CONN.startswith("sqlite:////") # In memory is not useful for production, but useful for writing tests against Airflow for extensions and SQL_ALCHEMY_CONN != "sqlite://" ): from airflow.exceptions import AirflowConfigException raise AirflowConfigException( f"Cannot use relative path: `{SQL_ALCHEMY_CONN}` to connect to sqlite. " "Please use absolute path such as `sqlite:////tmp/airflow.db`." ) global Session global engine from airflow.api_internal.internal_api_call import InternalApiConfig if InternalApiConfig.get_use_internal_api(): Session = TracebackSession engine = None return elif os.environ.get("_AIRFLOW_SKIP_DB_TESTS") == "true": # Skip DB initialization in unit tests, if DB tests are skipped Session = SkipDBTestsSession engine = None return log.debug("Setting up DB connection pool (PID %s)", os.getpid()) engine_args = prepare_engine_args(disable_connection_pool, pool_class) if conf.has_option("database", "sql_alchemy_connect_args"): connect_args = conf.getimport("database", "sql_alchemy_connect_args") else: connect_args = {} engine = create_engine(SQL_ALCHEMY_CONN, connect_args=connect_args, **engine_args, future=True) mask_secret(engine.url.password) setup_event_handlers(engine) Session = scoped_session( sessionmaker( autocommit=False, autoflush=False, bind=engine, expire_on_commit=False, ) )
Prepare SQLAlchemy engine args.
def prepare_engine_args(disable_connection_pool=False, pool_class=None): """Prepare SQLAlchemy engine args.""" default_args = {} for dialect, default in DEFAULT_ENGINE_ARGS.items(): if SQL_ALCHEMY_CONN.startswith(dialect): default_args = default.copy() break engine_args: dict = conf.getjson("database", "sql_alchemy_engine_args", fallback=default_args) # type: ignore if pool_class: # Don't use separate settings for size etc, only those from sql_alchemy_engine_args engine_args["poolclass"] = pool_class elif disable_connection_pool or not conf.getboolean("database", "SQL_ALCHEMY_POOL_ENABLED"): engine_args["poolclass"] = NullPool log.debug("settings.prepare_engine_args(): Using NullPool") elif not SQL_ALCHEMY_CONN.startswith("sqlite"): # Pool size engine args not supported by sqlite. # If no config value is defined for the pool size, select a reasonable value. # 0 means no limit, which could lead to exceeding the Database connection limit. pool_size = conf.getint("database", "SQL_ALCHEMY_POOL_SIZE", fallback=5) # The maximum overflow size of the pool. # When the number of checked-out connections reaches the size set in pool_size, # additional connections will be returned up to this limit. # When those additional connections are returned to the pool, they are disconnected and discarded. # It follows then that the total number of simultaneous connections # the pool will allow is pool_size + max_overflow, # and the total number of "sleeping" connections the pool will allow is pool_size. # max_overflow can be set to -1 to indicate no overflow limit; # no limit will be placed on the total number # of concurrent connections. Defaults to 10. max_overflow = conf.getint("database", "SQL_ALCHEMY_MAX_OVERFLOW", fallback=10) # The DB server already has a value for wait_timeout (number of seconds after # which an idle sleeping connection should be killed). Since other DBs may # co-exist on the same server, SQLAlchemy should set its # pool_recycle to an equal or smaller value. pool_recycle = conf.getint("database", "SQL_ALCHEMY_POOL_RECYCLE", fallback=1800) # Check connection at the start of each connection pool checkout. # Typically, this is a simple statement like "SELECT 1", but may also make use # of some DBAPI-specific method to test the connection for liveness. # More information here: # https://docs.sqlalchemy.org/en/14/core/pooling.html#disconnect-handling-pessimistic pool_pre_ping = conf.getboolean("database", "SQL_ALCHEMY_POOL_PRE_PING", fallback=True) log.debug( "settings.prepare_engine_args(): Using pool settings. pool_size=%d, max_overflow=%d, " "pool_recycle=%d, pid=%d", pool_size, max_overflow, pool_recycle, os.getpid(), ) engine_args["pool_size"] = pool_size engine_args["pool_recycle"] = pool_recycle engine_args["pool_pre_ping"] = pool_pre_ping engine_args["max_overflow"] = max_overflow # The default isolation level for MySQL (REPEATABLE READ) can introduce inconsistencies when # running multiple schedulers, as repeated queries on the same session may read from stale snapshots. # 'READ COMMITTED' is the default value for PostgreSQL. # More information here: # https://dev.mysql.com/doc/refman/8.0/en/innodb-transaction-isolation-levels.html" if SQL_ALCHEMY_CONN.startswith("mysql"): engine_args["isolation_level"] = "READ COMMITTED" if is_sqlalchemy_v1(): # Allow the user to specify an encoding for their DB otherwise default # to utf-8 so jobs & users with non-latin1 characters can still use us. # This parameter was removed in SQLAlchemy 2.x. engine_args["encoding"] = conf.get("database", "SQL_ENGINE_ENCODING", fallback="utf-8") return engine_args
Properly close pooled database connections.
def dispose_orm(): """Properly close pooled database connections.""" log.debug("Disposing DB connection pool (PID %s)", os.getpid()) global engine global Session if Session is not None: # type: ignore[truthy-function] Session.remove() Session = None if engine: engine.dispose() engine = None
Properly close database connections and re-configure ORM.
def reconfigure_orm(disable_connection_pool=False, pool_class=None): """Properly close database connections and re-configure ORM.""" dispose_orm() configure_orm(disable_connection_pool=disable_connection_pool, pool_class=pool_class)
Register Adapters and DB Converters.
def configure_adapters(): """Register Adapters and DB Converters.""" from pendulum import DateTime as Pendulum if SQL_ALCHEMY_CONN.startswith("sqlite"): from sqlite3 import register_adapter register_adapter(Pendulum, lambda val: val.isoformat(" ")) if SQL_ALCHEMY_CONN.startswith("mysql"): try: import MySQLdb.converters MySQLdb.converters.conversions[Pendulum] = MySQLdb.converters.DateTime2literal except ImportError: pass try: import pymysql.converters pymysql.converters.conversions[Pendulum] = pymysql.converters.escape_datetime except ImportError: pass
Validate ORM Session.
def validate_session(): """Validate ORM Session.""" global engine worker_precheck = conf.getboolean("celery", "worker_precheck") if not worker_precheck: return True else: check_session = sessionmaker(bind=engine) session = check_session() try: session.execute(text("select 1")) conn_status = True except exc.DBAPIError as err: log.error(err) conn_status = False session.close() return conn_status
Any additional configuration (register callback) for airflow.utils.action_loggers module.
def configure_action_logging() -> None: """Any additional configuration (register callback) for airflow.utils.action_loggers module."""
Ensure certain subfolders of AIRFLOW_HOME are on the classpath.
def prepare_syspath(): """Ensure certain subfolders of AIRFLOW_HOME are on the classpath.""" if DAGS_FOLDER not in sys.path: sys.path.append(DAGS_FOLDER) # Add ./config/ for loading custom log parsers etc, or # airflow_local_settings etc. config_path = os.path.join(AIRFLOW_HOME, "config") if config_path not in sys.path: sys.path.append(config_path) if PLUGINS_FOLDER not in sys.path: sys.path.append(PLUGINS_FOLDER)
Get session timeout configs and handle outdated configs gracefully.
def get_session_lifetime_config(): """Get session timeout configs and handle outdated configs gracefully.""" session_lifetime_minutes = conf.get("webserver", "session_lifetime_minutes", fallback=None) session_lifetime_days = conf.get("webserver", "session_lifetime_days", fallback=None) uses_deprecated_lifetime_configs = session_lifetime_days or conf.get( "webserver", "force_log_out_after", fallback=None ) minutes_per_day = 24 * 60 default_lifetime_minutes = "43200" if uses_deprecated_lifetime_configs and session_lifetime_minutes == default_lifetime_minutes: warnings.warn( "`session_lifetime_days` option from `[webserver]` section has been " "renamed to `session_lifetime_minutes`. The new option allows to configure " "session lifetime in minutes. The `force_log_out_after` option has been removed " "from `[webserver]` section. Please update your configuration.", category=RemovedInAirflow3Warning, stacklevel=2, ) if session_lifetime_days: session_lifetime_minutes = minutes_per_day * int(session_lifetime_days) if not session_lifetime_minutes: session_lifetime_days = 30 session_lifetime_minutes = minutes_per_day * session_lifetime_days log.debug("User session lifetime is set to %s minutes.", session_lifetime_minutes) return int(session_lifetime_minutes)
Import airflow_local_settings.py files to allow overriding any configs in settings.py file.
def import_local_settings(): """Import airflow_local_settings.py files to allow overriding any configs in settings.py file.""" try: import airflow_local_settings except ModuleNotFoundError as e: if e.name == "airflow_local_settings": log.debug("No airflow_local_settings to import.", exc_info=True) else: log.critical( "Failed to import airflow_local_settings due to a transitive module not found error.", exc_info=True, ) raise except ImportError: log.critical("Failed to import airflow_local_settings.", exc_info=True) raise else: if hasattr(airflow_local_settings, "__all__"): names = set(airflow_local_settings.__all__) else: names = {n for n in airflow_local_settings.__dict__ if not n.startswith("__")} if "policy" in names and "task_policy" not in names: warnings.warn( "Using `policy` in airflow_local_settings.py is deprecated. " "Please rename your `policy` to `task_policy`.", RemovedInAirflow3Warning, stacklevel=2, ) setattr(airflow_local_settings, "task_policy", airflow_local_settings.policy) names.remove("policy") plugin_functions = policies.make_plugin_from_local_settings( POLICY_PLUGIN_MANAGER, airflow_local_settings, names ) # If we have already handled a function by adding it to the plugin, # then don't clobber the global function for name in names - plugin_functions: globals()[name] = getattr(airflow_local_settings, name) if POLICY_PLUGIN_MANAGER.hook.task_instance_mutation_hook.get_hookimpls(): task_instance_mutation_hook.is_noop = False log.info("Loaded airflow_local_settings from %s .", airflow_local_settings.__file__)
Initialize Airflow with all the settings from this file.
def initialize(): """Initialize Airflow with all the settings from this file.""" configure_vars() prepare_syspath() configure_policy_plugin_manager() # Load policy plugins _before_ importing airflow_local_settings, as Pluggy uses LIFO and we want anything # in airflow_local_settings to take precendec load_policy_plugins(POLICY_PLUGIN_MANAGER) import_local_settings() global LOGGING_CLASS_PATH LOGGING_CLASS_PATH = configure_logging() State.state_color.update(STATE_COLORS) configure_adapters() # The webservers import this file from models.py with the default settings. configure_orm() configure_action_logging() # Ensure we close DB connections at scheduler and gunicorn worker terminations atexit.register(dispose_orm)
Date filter.
def ds_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter.""" if value is None: return None return value.strftime("%Y-%m-%d")
Date filter without dashes.
def ds_nodash_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter without dashes.""" if value is None: return None return value.strftime("%Y%m%d")
Timestamp filter.
def ts_filter(value: datetime.date | datetime.time | None) -> str | None: """Timestamp filter.""" if value is None: return None return value.isoformat()
Timestamp filter without dashes.
def ts_nodash_filter(value: datetime.date | datetime.time | None) -> str | None: """Timestamp filter without dashes.""" if value is None: return None return value.strftime("%Y%m%dT%H%M%S")
Timestamp filter with timezone.
def ts_nodash_with_tz_filter(value: datetime.date | datetime.time | None) -> str | None: """Timestamp filter with timezone.""" if value is None: return None return value.isoformat().replace("-", "").replace(":", "")
Load authentication backends.
def load_auth(): """Load authentication backends.""" auth_backends = "airflow.api.auth.backend.default" try: auth_backends = conf.get("api", "auth_backends") except AirflowConfigException: pass backends = [] try: for backend in auth_backends.split(","): auth = import_module(backend.strip()) log.info("Loaded API auth backend: %s", backend) backends.append(auth) except ImportError as err: log.critical("Cannot import %s for API authentication due to: %s", backend, err) raise AirflowException(err) return backends
Initialize authentication backend.
def init_app(_): """Initialize authentication backend."""
Decorate functions that require authentication.
def requires_authentication(function: T): """Decorate functions that require authentication.""" @wraps(function) def decorated(*args, **kwargs): return function(*args, **kwargs) return cast(T, decorated)
Initialize authentication.
def init_app(_): """Initialize authentication."""
Decorate functions that require authentication.
def requires_authentication(function: T): """Decorate functions that require authentication.""" @wraps(function) def decorated(*args, **kwargs): return Response("Forbidden", 403) return cast(T, decorated)
Initialize application with kerberos.
def init_app(app): """Initialize application with kerberos.""" hostname = app.config.get("SERVER_NAME") if not hostname: hostname = getfqdn() log.info("Kerberos: hostname %s", hostname) service = "airflow" _KERBEROS_SERVICE.service_name = f"{service}@{hostname}" if "KRB5_KTNAME" not in os.environ: os.environ["KRB5_KTNAME"] = conf.get("kerberos", "keytab") try: log.info("Kerberos init: %s %s", service, hostname) principal = kerberos.getServerPrincipalDetails(service, hostname) except kerberos.KrbError as err: log.warning("Kerberos: %s", err) else: log.info("Kerberos API: server is %s", principal)
Indicate that authorization is required.
def _unauthorized(): """Indicate that authorization is required.""" return Response("Unauthorized", 401, {"WWW-Authenticate": "Negotiate"})
Decorate functions that require authentication with Kerberos.
def requires_authentication(function: T, find_user: Callable[[str], BaseUser] | None = None): """Decorate functions that require authentication with Kerberos.""" if not find_user: warnings.warn( "This module is deprecated. Please use " "`airflow.providers.fab.auth_manager.api.auth.backend.kerberos_auth` instead.", RemovedInAirflow3Warning, stacklevel=2, ) find_user = get_airflow_app().appbuilder.sm.find_user @wraps(function) def decorated(*args, **kwargs): header = request.headers.get("Authorization") if header: token = "".join(header.split()[1:]) auth = _gssapi_authenticate(token) if auth.return_code == kerberos.AUTH_GSS_COMPLETE: g.user = find_user(auth.user) response = function(*args, **kwargs) response = make_response(response) if auth.token is not None: response.headers["WWW-Authenticate"] = f"negotiate {auth.token}" return response elif auth.return_code != kerberos.AUTH_GSS_CONTINUE: return _forbidden() return _unauthorized() return cast(T, decorated)
Initialize authentication backend.
def init_app(_): """Initialize authentication backend."""
Decorate functions that require authentication.
def requires_authentication(function: T): """Decorate functions that require authentication.""" @wraps(function) def decorated(*args, **kwargs): if not get_auth_manager().is_logged_in(): return Response("Unauthorized", 401, {}) return function(*args, **kwargs) return cast(T, decorated)
Return current API Client based on current Airflow configuration.
def get_current_api_client() -> Client: """Return current API Client based on current Airflow configuration.""" api_module = import_module(conf.get_mandatory_value("cli", "api_client")) auth_backends = api.load_auth() session = None for backend in auth_backends: session_factory = getattr(backend, "create_client_session", None) if session_factory: session = session_factory() api_client = api_module.Client( api_base_url=conf.get("cli", "endpoint_url"), auth=getattr(backend, "CLIENT_AUTH", None), session=session, ) return api_client
Get the health for Airflow metadatabase, scheduler and triggerer.
def get_airflow_health() -> dict[str, Any]: """Get the health for Airflow metadatabase, scheduler and triggerer.""" metadatabase_status = HEALTHY latest_scheduler_heartbeat = None latest_triggerer_heartbeat = None latest_dag_processor_heartbeat = None scheduler_status = UNHEALTHY triggerer_status: str | None = UNHEALTHY dag_processor_status: str | None = UNHEALTHY try: latest_scheduler_job = SchedulerJobRunner.most_recent_job() if latest_scheduler_job: latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat() if latest_scheduler_job.is_alive(): scheduler_status = HEALTHY except Exception: metadatabase_status = UNHEALTHY try: latest_triggerer_job = TriggererJobRunner.most_recent_job() if latest_triggerer_job: latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat() if latest_triggerer_job.is_alive(): triggerer_status = HEALTHY else: triggerer_status = None except Exception: metadatabase_status = UNHEALTHY try: latest_dag_processor_job = DagProcessorJobRunner.most_recent_job() if latest_dag_processor_job: latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat() if latest_dag_processor_job.is_alive(): dag_processor_status = HEALTHY else: dag_processor_status = None except Exception: metadatabase_status = UNHEALTHY airflow_health_status = { "metadatabase": {"status": metadatabase_status}, "scheduler": { "status": scheduler_status, "latest_scheduler_heartbeat": latest_scheduler_heartbeat, }, "triggerer": { "status": triggerer_status, "latest_triggerer_heartbeat": latest_triggerer_heartbeat, }, "dag_processor": { "status": dag_processor_status, "latest_dag_processor_heartbeat": latest_dag_processor_heartbeat, }, } return airflow_health_status
Delete a DAG by a dag_id. :param dag_id: the dag_id of the DAG to delete :param keep_records_in_log: whether keep records of the given dag_id in the Log table in the backend database (for reasons like auditing). The default value is True. :param session: session used :return count of deleted dags
def delete_dag(dag_id: str, keep_records_in_log: bool = True, session: Session = NEW_SESSION) -> int: """ Delete a DAG by a dag_id. :param dag_id: the dag_id of the DAG to delete :param keep_records_in_log: whether keep records of the given dag_id in the Log table in the backend database (for reasons like auditing). The default value is True. :param session: session used :return count of deleted dags """ log.info("Deleting DAG: %s", dag_id) running_tis = session.scalar( select(models.TaskInstance.state) .where(models.TaskInstance.dag_id == dag_id) .where(models.TaskInstance.state == TaskInstanceState.RUNNING) .limit(1) ) if running_tis: raise AirflowException("TaskInstances still running") dag = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id).limit(1)) if dag is None: raise DagNotFound(f"Dag id {dag_id} not found") # deleting a DAG should also delete all of its subdags dags_to_delete_query = session.execute( select(DagModel.dag_id).where( or_( DagModel.dag_id == dag_id, and_(DagModel.dag_id.like(f"{dag_id}.%"), DagModel.is_subdag), ) ) ) dags_to_delete = [dag_id for (dag_id,) in dags_to_delete_query] # Scheduler removes DAGs without files from serialized_dag table every dag_dir_list_interval. # There may be a lag, so explicitly removes serialized DAG here. if SerializedDagModel.has_dag(dag_id=dag_id, session=session): SerializedDagModel.remove_dag(dag_id=dag_id, session=session) count = 0 for model in get_sqla_model_classes(): if hasattr(model, "dag_id") and (not keep_records_in_log or model.__name__ != "Log"): count += session.execute( delete(model) .where(model.dag_id.in_(dags_to_delete)) .execution_options(synchronize_session="fetch") ).rowcount if dag.is_subdag: parent_dag_id, task_id = dag_id.rsplit(".", 1) for model in TaskFail, models.TaskInstance: count += session.execute( delete(model).where(model.dag_id == parent_dag_id, model.task_id == task_id) ).rowcount # Delete entries in Import Errors table for a deleted DAG # This handles the case when the dag_id is changed in the file session.execute( delete(ParseImportError) .where(ParseImportError.filename == dag.fileloc) .execution_options(synchronize_session="fetch") ) return count
Infers from data intervals which DAG runs need to be created and does so. :param dag: The DAG to create runs for. :param infos: List of logical dates and data intervals to evaluate. :param state: The state to set the dag run to :param run_type: The prefix will be used to construct dag run id: ``{run_id_prefix}__{execution_date}``. :return: Newly created and existing dag runs for the execution dates supplied.
def _create_dagruns( dag: DAG, infos: Iterable[_DagRunInfo], state: DagRunState, run_type: DagRunType, ) -> Iterable[DagRun]: """Infers from data intervals which DAG runs need to be created and does so. :param dag: The DAG to create runs for. :param infos: List of logical dates and data intervals to evaluate. :param state: The state to set the dag run to :param run_type: The prefix will be used to construct dag run id: ``{run_id_prefix}__{execution_date}``. :return: Newly created and existing dag runs for the execution dates supplied. """ # Find out existing DAG runs that we don't need to create. dag_runs = { run.logical_date: run for run in DagRun.find(dag_id=dag.dag_id, execution_date=[info.logical_date for info in infos]) } for info in infos: if info.logical_date not in dag_runs: dag_runs[info.logical_date] = dag.create_dagrun( execution_date=info.logical_date, data_interval=info.data_interval, start_date=timezone.utcnow(), external_trigger=False, state=state, run_type=run_type, ) return dag_runs.values()
Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from run_id) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will, as for subdag, dag runs if needed). :param tasks: the iterable of tasks or (task, map_index) tuples from which to work. ``task.dag`` needs to be set :param run_id: the run_id of the dagrun to start looking from :param execution_date: the execution date from which to start looking (deprecated) :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated
def set_state( *, tasks: Collection[Operator | tuple[Operator, int]], run_id: str | None = None, execution_date: datetime | None = None, upstream: bool = False, downstream: bool = False, future: bool = False, past: bool = False, state: TaskInstanceState = TaskInstanceState.SUCCESS, commit: bool = False, session: SASession = NEW_SESSION, ) -> list[TaskInstance]: """ Set the state of a task instance and if needed its relatives. Can set state for future tasks (calculated from run_id) and retroactively for past tasks. Will verify integrity of past dag runs in order to create tasks that did not exist. It will not create dag runs that are missing on the schedule (but it will, as for subdag, dag runs if needed). :param tasks: the iterable of tasks or (task, map_index) tuples from which to work. ``task.dag`` needs to be set :param run_id: the run_id of the dagrun to start looking from :param execution_date: the execution date from which to start looking (deprecated) :param upstream: Mark all parents (upstream tasks) :param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags :param future: Mark all future tasks on the interval of the dag up until last execution date. :param past: Retroactively mark all tasks starting from start_date of the DAG :param state: State to which the tasks need to be set :param commit: Commit tasks to be altered to the database :param session: database session :return: list of tasks that have been created and updated """ if not tasks: return [] if not exactly_one(execution_date, run_id): raise ValueError("Exactly one of dag_run_id and execution_date must be set") if execution_date and not timezone.is_localized(execution_date): raise ValueError(f"Received non-localized date {execution_date}") task_dags = {task[0].dag if isinstance(task, tuple) else task.dag for task in tasks} if len(task_dags) > 1: raise ValueError(f"Received tasks from multiple DAGs: {task_dags}") dag = next(iter(task_dags)) if dag is None: raise ValueError("Received tasks with no DAG") if execution_date: run_id = dag.get_dagrun(execution_date=execution_date, session=session).run_id if not run_id: raise ValueError("Received tasks with no run_id") dag_run_ids = get_run_ids(dag, run_id, future, past, session=session) task_id_map_index_list = list(find_task_relatives(tasks, downstream, upstream)) task_ids = [task_id if isinstance(task_id, str) else task_id[0] for task_id in task_id_map_index_list] confirmed_infos = list(_iter_existing_dag_run_infos(dag, dag_run_ids, session=session)) confirmed_dates = [info.logical_date for info in confirmed_infos] sub_dag_run_ids = list( _iter_subdag_run_ids(dag, session, DagRunState(state), task_ids, commit, confirmed_infos), ) # now look for the task instances that are affected qry_dag = get_all_dag_task_query(dag, session, state, task_id_map_index_list, dag_run_ids) if commit: tis_altered = session.scalars(qry_dag.with_for_update()).all() if sub_dag_run_ids: qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates) tis_altered += session.scalars(qry_sub_dag.with_for_update()).all() for task_instance in tis_altered: # The try_number was decremented when setting to up_for_reschedule and deferred. # Increment it back when changing the state again if task_instance.state in (TaskInstanceState.DEFERRED, TaskInstanceState.UP_FOR_RESCHEDULE): task_instance._try_number += 1 task_instance.set_state(state, session=session) session.flush() else: tis_altered = session.scalars(qry_dag).all() if sub_dag_run_ids: qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates) tis_altered += session.scalars(qry_sub_dag).all() return tis_altered
Get *all* tasks of the sub dags.
def all_subdag_tasks_query( sub_dag_run_ids: list[str], session: SASession, state: TaskInstanceState, confirmed_dates: Iterable[datetime], ): """Get *all* tasks of the sub dags.""" qry_sub_dag = ( select(TaskInstance) .where(TaskInstance.dag_id.in_(sub_dag_run_ids), TaskInstance.execution_date.in_(confirmed_dates)) .where(or_(TaskInstance.state.is_(None), TaskInstance.state != state)) ) return qry_sub_dag
Get all tasks of the main dag that will be affected by a state change.
def get_all_dag_task_query( dag: DAG, session: SASession, state: TaskInstanceState, task_ids: list[str | tuple[str, int]], run_ids: Iterable[str], ): """Get all tasks of the main dag that will be affected by a state change.""" qry_dag = select(TaskInstance).where( TaskInstance.dag_id == dag.dag_id, TaskInstance.run_id.in_(run_ids), TaskInstance.ti_selector_condition(task_ids), ) qry_dag = qry_dag.where(or_(TaskInstance.state.is_(None), TaskInstance.state != state)).options( lazyload(TaskInstance.dag_run) ) return qry_dag
Go through subdag operators and create dag runs. We only work within the scope of the subdag. A subdag does not propagate to its parent DAG, but parent propagates to subdags.
def _iter_subdag_run_ids( dag: DAG, session: SASession, state: DagRunState, task_ids: list[str], commit: bool, confirmed_infos: Iterable[_DagRunInfo], ) -> Iterator[str]: """Go through subdag operators and create dag runs. We only work within the scope of the subdag. A subdag does not propagate to its parent DAG, but parent propagates to subdags. """ dags = [dag] while dags: current_dag = dags.pop() for task_id in task_ids: if not current_dag.has_task(task_id): continue current_task = current_dag.get_task(task_id) if isinstance(current_task, SubDagOperator) or current_task.task_type == "SubDagOperator": # this works as a kind of integrity check # it creates missing dag runs for subdag operators, # maybe this should be moved to dagrun.verify_integrity if TYPE_CHECKING: assert current_task.subdag dag_runs = _create_dagruns( current_task.subdag, infos=confirmed_infos, state=DagRunState.RUNNING, run_type=DagRunType.BACKFILL_JOB, ) verify_dagruns(dag_runs, commit, state, session, current_task) dags.append(current_task.subdag) yield current_task.subdag.dag_id
Verify integrity of dag_runs. :param dag_runs: dag runs to verify :param commit: whether dag runs state should be updated :param state: state of the dag_run to set if commit is True :param session: session to use :param current_task: current task
def verify_dagruns( dag_runs: Iterable[DagRun], commit: bool, state: DagRunState, session: SASession, current_task: Operator, ): """Verify integrity of dag_runs. :param dag_runs: dag runs to verify :param commit: whether dag runs state should be updated :param state: state of the dag_run to set if commit is True :param session: session to use :param current_task: current task """ for dag_run in dag_runs: dag_run.dag = current_task.subdag dag_run.verify_integrity() if commit: dag_run.state = state session.merge(dag_run)
Yield task ids and optionally ancestor and descendant ids.
def find_task_relatives(tasks, downstream, upstream): """Yield task ids and optionally ancestor and descendant ids.""" for item in tasks: if isinstance(item, tuple): task, map_index = item yield task.task_id, map_index else: task = item yield task.task_id if downstream: for relative in task.get_flat_relatives(upstream=False): yield relative.task_id if upstream: for relative in task.get_flat_relatives(upstream=True): yield relative.task_id
Return DAG execution dates.
def get_execution_dates( dag: DAG, execution_date: datetime, future: bool, past: bool, *, session: SASession = NEW_SESSION ) -> list[datetime]: """Return DAG execution dates.""" latest_execution_date = dag.get_latest_execution_date(session=session) if latest_execution_date is None: raise ValueError(f"Received non-localized date {execution_date}") execution_date = timezone.coerce_datetime(execution_date) # determine date range of dag runs and tasks to consider end_date = latest_execution_date if future else execution_date if dag.start_date: start_date = dag.start_date else: start_date = execution_date start_date = execution_date if not past else start_date if not dag.timetable.can_be_scheduled: # If the DAG never schedules, need to look at existing DagRun if the user wants future or # past runs. dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date) dates = sorted({d.execution_date for d in dag_runs}) elif not dag.timetable.periodic: dates = [start_date] else: dates = [ info.logical_date for info in dag.iter_dagrun_infos_between(start_date, end_date, align=False) ] return dates
Return DAG executions' run_ids.
def get_run_ids(dag: DAG, run_id: str, future: bool, past: bool, session: SASession = NEW_SESSION): """Return DAG executions' run_ids.""" last_dagrun = dag.get_last_dagrun(include_externally_triggered=True, session=session) current_dagrun = dag.get_dagrun(run_id=run_id, session=session) first_dagrun = session.scalar( select(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date.asc()).limit(1) ) if last_dagrun is None: raise ValueError(f"DagRun for {dag.dag_id} not found") # determine run_id range of dag runs and tasks to consider end_date = last_dagrun.logical_date if future else current_dagrun.logical_date start_date = current_dagrun.logical_date if not past else first_dagrun.logical_date if not dag.timetable.can_be_scheduled: # If the DAG never schedules, need to look at existing DagRun if the user wants future or # past runs. dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date, session=session) run_ids = sorted({d.run_id for d in dag_runs}) elif not dag.timetable.periodic: run_ids = [run_id] else: dates = [ info.logical_date for info in dag.iter_dagrun_infos_between(start_date, end_date, align=False) ] run_ids = [dr.run_id for dr in DagRun.find(dag_id=dag.dag_id, execution_date=dates, session=session)] return run_ids
Set dag run state in the DB. :param dag_id: dag_id of target dag run :param run_id: run id of target dag run :param state: target state :param session: database session
def _set_dag_run_state(dag_id: str, run_id: str, state: DagRunState, session: SASession): """ Set dag run state in the DB. :param dag_id: dag_id of target dag run :param run_id: run id of target dag run :param state: target state :param session: database session """ dag_run = session.execute( select(DagRun).where(DagRun.dag_id == dag_id, DagRun.run_id == run_id) ).scalar_one() dag_run.state = state session.merge(dag_run)
Set the dag run's state to success. Set for a specific execution date and its task instances to success. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking(deprecated) :param run_id: the run_id to start looking from :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: ValueError if dag or execution_date is invalid
def set_dag_run_state_to_success( *, dag: DAG, execution_date: datetime | None = None, run_id: str | None = None, commit: bool = False, session: SASession = NEW_SESSION, ) -> list[TaskInstance]: """ Set the dag run's state to success. Set for a specific execution date and its task instances to success. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking(deprecated) :param run_id: the run_id to start looking from :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: ValueError if dag or execution_date is invalid """ if not exactly_one(execution_date, run_id): return [] if not dag: return [] if execution_date: if not timezone.is_localized(execution_date): raise ValueError(f"Received non-localized date {execution_date}") dag_run = dag.get_dagrun(execution_date=execution_date) if not dag_run: raise ValueError(f"DagRun with execution_date: {execution_date} not found") run_id = dag_run.run_id if not run_id: raise ValueError(f"Invalid dag_run_id: {run_id}") # Mark the dag run to success. if commit: _set_dag_run_state(dag.dag_id, run_id, DagRunState.SUCCESS, session) # Mark all task instances of the dag run to success. for task in dag.tasks: task.dag = dag return set_state( tasks=dag.tasks, run_id=run_id, state=TaskInstanceState.SUCCESS, commit=commit, session=session, )
Set the dag run's state to failed. Set for a specific execution date and its task instances to failed. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking(deprecated) :param run_id: the DAG run_id to start looking from :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: AssertionError if dag or execution_date is invalid
def set_dag_run_state_to_failed( *, dag: DAG, execution_date: datetime | None = None, run_id: str | None = None, commit: bool = False, session: SASession = NEW_SESSION, ) -> list[TaskInstance]: """ Set the dag run's state to failed. Set for a specific execution date and its task instances to failed. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking(deprecated) :param run_id: the DAG run_id to start looking from :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated :raises: AssertionError if dag or execution_date is invalid """ if not exactly_one(execution_date, run_id): return [] if not dag: return [] if execution_date: if not timezone.is_localized(execution_date): raise ValueError(f"Received non-localized date {execution_date}") dag_run = dag.get_dagrun(execution_date=execution_date) if not dag_run: raise ValueError(f"DagRun with execution_date: {execution_date} not found") run_id = dag_run.run_id if not run_id: raise ValueError(f"Invalid dag_run_id: {run_id}") # Mark the dag run to failed. if commit: _set_dag_run_state(dag.dag_id, run_id, DagRunState.FAILED, session) running_states = ( TaskInstanceState.RUNNING, TaskInstanceState.DEFERRED, TaskInstanceState.UP_FOR_RESCHEDULE, ) # Mark only RUNNING task instances. task_ids = [task.task_id for task in dag.tasks] tis = session.scalars( select(TaskInstance).where( TaskInstance.dag_id == dag.dag_id, TaskInstance.run_id == run_id, TaskInstance.task_id.in_(task_ids), TaskInstance.state.in_(running_states), ) ) task_ids_of_running_tis = [task_instance.task_id for task_instance in tis] tasks = [] for task in dag.tasks: if task.task_id in task_ids_of_running_tis: task.dag = dag tasks.append(task) # Mark non-finished tasks as SKIPPED. tis = session.scalars( select(TaskInstance).filter( TaskInstance.dag_id == dag.dag_id, TaskInstance.run_id == run_id, TaskInstance.state.not_in(State.finished), TaskInstance.state.not_in(running_states), ) ).all() if commit: for ti in tis: ti.set_state(TaskInstanceState.SKIPPED) return tis + set_state( tasks=tasks, run_id=run_id, state=TaskInstanceState.FAILED, commit=commit, session=session, )
Set the dag run for a specific execution date to running. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking :param run_id: the id of the DagRun :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated
def __set_dag_run_state_to_running_or_queued( *, new_state: DagRunState, dag: DAG, execution_date: datetime | None = None, run_id: str | None = None, commit: bool = False, session: SASession, ) -> list[TaskInstance]: """ Set the dag run for a specific execution date to running. :param dag: the DAG of which to alter state :param execution_date: the execution date from which to start looking :param run_id: the id of the DagRun :param commit: commit DAG and tasks to be altered to the database :param session: database session :return: If commit is true, list of tasks that have been updated, otherwise list of tasks that will be updated """ res: list[TaskInstance] = [] if not exactly_one(execution_date, run_id): return res if not dag: return res if execution_date: if not timezone.is_localized(execution_date): raise ValueError(f"Received non-localized date {execution_date}") dag_run = dag.get_dagrun(execution_date=execution_date) if not dag_run: raise ValueError(f"DagRun with execution_date: {execution_date} not found") run_id = dag_run.run_id if not run_id: raise ValueError(f"DagRun with run_id: {run_id} not found") # Mark the dag run to running. if commit: _set_dag_run_state(dag.dag_id, run_id, new_state, session) # To keep the return type consistent with the other similar functions. return res