language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
numba__numba
|
numba/tests/test_parallel_backend.py
|
{
"start": 4658,
"end": 5593
}
|
class ____(object):
def __init__(self, method):
self._method = method
def __call__(self, *args, **kwargs):
ctx = multiprocessing.get_context(self._method)
return ctx.Process(*args, **kwargs)
def _get_mp_classes(method):
if method == 'default':
method = None
ctx = multiprocessing.get_context(method)
proc = _proc_class_impl(method)
queue = ctx.Queue
return proc, queue
thread_impl = compile_factory(_thread_class, t_queue.Queue)
spawn_proc_impl = compile_factory(*_get_mp_classes('spawn'))
if not _windows:
fork_proc_impl = compile_factory(*_get_mp_classes('fork'))
forkserver_proc_impl = compile_factory(*_get_mp_classes('forkserver'))
# this is duplication as Py27, linux uses fork, windows uses spawn, it however
# is kept like this so that when tests fail it's less confusing!
default_proc_impl = compile_factory(*_get_mp_classes('default'))
|
_proc_class_impl
|
python
|
allegroai__clearml
|
clearml/debugging/log.py
|
{
"start": 12473,
"end": 12551
}
|
class ____(logging.NullHandler, ClearmlLoggerHandler):
pass
|
ClearmlNullHandler
|
python
|
pytorch__pytorch
|
torch/_library/autograd.py
|
{
"start": 261,
"end": 395
}
|
class ____(Protocol):
_backward_fn: Optional[Callable]
_setup_context_fn: Optional[Callable]
@dataclasses.dataclass
|
InfoProtocol
|
python
|
sympy__sympy
|
sympy/stats/rv.py
|
{
"start": 3494,
"end": 4343
}
|
class ____(RandomDomain):
"""
A RandomDomain with an attached condition.
See Also
========
sympy.stats.crv.ConditionalContinuousDomain
sympy.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace({rs: rs.symbol
for rs in random_symbols(condition)})
return Basic.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self):
raise NotImplementedError("Set of Conditional Domain not Implemented")
def as_boolean(self):
return And(self.fulldomain.as_boolean(), self.condition)
|
ConditionalDomain
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 4251,
"end": 4295
}
|
class ____(BaseStringType):
pass
|
XsdString
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/elements.py
|
{
"start": 138022,
"end": 140146
}
|
class ____(GroupedElement, ColumnElement[_T]):
"""Represent a grouping within a column expression"""
_traverse_internals: _TraverseInternalsType = [
("element", InternalTraversal.dp_clauseelement),
("type", InternalTraversal.dp_type),
]
_cache_key_traversal = [
("element", InternalTraversal.dp_clauseelement),
]
element: Union[
TextClause, ClauseList, ColumnElement[_T], CompilerColumnElement
]
def __init__(
self,
element: Union[
TextClause, ClauseList, ColumnElement[_T], CompilerColumnElement
],
):
self.element = element
# nulltype assignment issue
self.type = getattr(element, "type", type_api.NULLTYPE) # type: ignore
self._propagate_attrs = element._propagate_attrs
def _with_binary_element_type(self, type_):
return self.__class__(self.element._with_binary_element_type(type_))
def _ungroup(self) -> ColumnElement[_T]:
assert isinstance(self.element, ColumnElement)
return self.element._ungroup()
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@util.non_memoized_property
def _tq_label(self) -> Optional[str]:
return (
getattr(self.element, "_tq_label", None) or self._anon_name_label
)
@util.non_memoized_property
def _proxies(self) -> List[ColumnElement[Any]]:
if isinstance(self.element, ColumnElement):
return [self.element]
else:
return []
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {"element": self.element, "type": self.type}
def __setstate__(self, state):
self.element = state["element"]
self.type = state["type"]
if TYPE_CHECKING:
def self_group(
self, against: Optional[OperatorType] = None
) -> Self: ...
|
Grouping
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py
|
{
"start": 8144,
"end": 10072
}
|
class ____:
def test_K4(self):
"""Edge flow betweenness centrality: K4"""
G = nx.complete_graph(4)
b = edge_current_flow(G, normalized=True)
b_answer = dict.fromkeys(G.edges(), 0.25)
for (s, t), v1 in b_answer.items():
v2 = b.get((s, t), b.get((t, s)))
assert v1 == pytest.approx(v2, abs=1e-7)
def test_K4_normalized(self):
"""Edge flow betweenness centrality: K4"""
G = nx.complete_graph(4)
b = edge_current_flow(G, normalized=False)
b_answer = dict.fromkeys(G.edges(), 0.75)
for (s, t), v1 in b_answer.items():
v2 = b.get((s, t), b.get((t, s)))
assert v1 == pytest.approx(v2, abs=1e-7)
def test_C4(self):
"""Edge flow betweenness centrality: C4"""
G = nx.cycle_graph(4)
b = edge_current_flow(G, normalized=False)
b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25}
for (s, t), v1 in b_answer.items():
v2 = b.get((s, t), b.get((t, s)))
assert v1 == pytest.approx(v2, abs=1e-7)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G = nx.path_graph(4)
b = edge_current_flow(G, normalized=False)
b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5}
for (s, t), v1 in b_answer.items():
v2 = b.get((s, t), b.get((t, s)))
assert v1 == pytest.approx(v2, abs=1e-7)
@pytest.mark.parametrize(
"centrality_func",
(
nx.current_flow_betweenness_centrality,
nx.edge_current_flow_betweenness_centrality,
nx.approximate_current_flow_betweenness_centrality,
),
)
def test_unconnected_graphs_betweenness_centrality(centrality_func):
G = nx.Graph([(1, 2), (3, 4)])
G.add_node(5)
with pytest.raises(nx.NetworkXError, match="Graph not connected"):
centrality_func(G)
|
TestEdgeFlowBetweennessCentrality
|
python
|
scipy__scipy
|
scipy/signal/tests/test_windows.py
|
{
"start": 3650,
"end": 4825
}
|
class ____:
def test_basic(self, xp):
xp_assert_close(windows.blackmanharris(6, False, xp=xp),
xp.asarray([6.0e-05, 0.055645, 0.520575,
1.0, 0.520575, 0.055645], dtype=xp.float64))
xp_assert_close(windows.blackmanharris(7, sym=False, xp=xp),
xp.asarray([6.0e-05, 0.03339172347815117, 0.332833504298565,
0.8893697722232837, 0.8893697722232838,
0.3328335042985652, 0.03339172347815122],
dtype=xp.float64))
xp_assert_close(windows.blackmanharris(6, xp=xp),
xp.asarray([6.0e-05, 0.1030114893456638, 0.7938335106543362,
0.7938335106543364, 0.1030114893456638, 6.0e-05],
dtype=xp.float64))
xp_assert_close(windows.blackmanharris(7, sym=True, xp=xp),
xp.asarray([6.0e-05, 0.055645, 0.520575, 1.0, 0.520575,
0.055645, 6.0e-05], dtype=xp.float64))
@make_xp_test_case(windows.taylor)
|
TestBlackmanHarris
|
python
|
spyder-ide__spyder
|
spyder/api/plugins/new_api.py
|
{
"start": 1412,
"end": 32327
}
|
class ____(QObject, SpyderActionMixin, SpyderConfigurationObserver,
SpyderPluginObserver):
"""
A Spyder plugin to extend functionality without a dockable widget.
If you want to create a plugin that adds a new pane, please use
SpyderDockablePlugin.
"""
# --- API: Mandatory attributes ------------------------------------------
# ------------------------------------------------------------------------
# Name of the plugin that will be used to refer to it.
# This name must be unique and will only be loaded once.
NAME = None
# --- API: Optional attributes ------------------------------------------
# ------------------------------------------------------------------------
# List of required plugin dependencies.
# Example: [Plugins.Plots, Plugins.IPythonConsole, ...].
# These values are defined in the `Plugins` class present in this file.
# If a plugin is using a widget from another plugin, that other
# must be declared as a required dependency.
REQUIRES = []
# List of optional plugin dependencies.
# Example: [Plugins.Plots, Plugins.IPythonConsole, ...].
# These values are defined in the `Plugins` class present in this file.
# A plugin might be performing actions when connectiong to other plugins,
# but the main functionality of the plugin does not depend on other
# plugins. For example, the Help plugin might render information from
# the Editor or from the Console or from another source, but it does not
# depend on either of those plugins.
# Methods in the plugin that make use of optional plugins must check
# existence before using those methods or applying signal connections.
OPTIONAL = []
# This must subclass a `PluginMainContainer` for non dockable plugins that
# create a widget, like a status bar widget, a toolbar, a menu, etc.
# For non dockable plugins that do not define widgets of any kind this can
# be `None`, for example a plugin that only exposes a configuration page.
CONTAINER_CLASS = None
# Name of the configuration section that's going to be
# used to record the plugin's permanent data in Spyder
# config system (i.e. in spyder.ini)
CONF_SECTION = None
# Use a separate configuration file for the plugin.
CONF_FILE = True
# Define configuration defaults if using a separate file.
# List of tuples, with the first item in the tuple being the section
# name and the second item being the default options dictionary.
#
# CONF_DEFAULTS_EXAMPLE = [
# ('section-name', {'option-1': 'some-value',
# 'option-2': True,}),
# ('another-section-name', {'option-3': 'some-other-value',
# 'option-4': [1, 2, 3],}),
# ]
CONF_DEFAULTS = None
# Define configuration version if using a separate file
#
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you
# need to do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed or if you
# want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = None
# Widget to be used as entry in Spyder Preferences dialog.
CONF_WIDGET_CLASS = None
# Some plugins may add configuration options for other plugins.
# Example:
# ADDITIONAL_CONF_OPTIONS = {'section': <new value to add>}
ADDITIONAL_CONF_OPTIONS = None
# Define additional configurable options (via a tab) to
# another's plugin configuration page. All configuration tabs should
# inherit from `SpyderPreferencesTab`.
# Example:
# ADDITIONAL_CONF_TABS = {'plugin_name': [<SpyderPreferencesTab classes>]}
ADDITIONAL_CONF_TABS = None
# Define custom layout classes that the plugin wantes to be registered.
# THe custom classes should extend from
# `spyder.pluginsl.layout.api::BaseGridLayoutType`
CUSTOM_LAYOUTS = []
# Path for images relative to the plugin path
# A Python package can include one or several Spyder plugins. In this case
# the package may be using images from a global folder outside the plugin
# folder
IMG_PATH = None
# Control the font size relative to the global fonts defined in Spyder
MONOSPACE_FONT_SIZE_DELTA = 0
INTERFACE_FONT_SIZE_DELTA = 0
# Define context to store actions, toolbars, toolbuttons and menus.
CONTEXT_NAME = None
# Define if a plugin can be disabled in preferences.
# If False, the plugin is considered "core" and therefore it cannot be
# disabled. Default: True
CAN_BE_DISABLED = True
# Qt Web Widgets may be a heavy dependency for many packagers
# (e.g. conda-forge)
# We thus ask plugins to declare whether or not they need
# web widgets to enhance the distribution of Spyder to users
# https://github.com/spyder-ide/spyder/pull/22196#issuecomment-2189377043
REQUIRE_WEB_WIDGETS = False
# --- API: Signals -------------------------------------------------------
# ------------------------------------------------------------------------
# Signals here are automatically connected by the Spyder main window and
# connected to the the respective global actions defined on it.
sig_free_memory_requested = Signal()
"""
This signal can be emitted to request the main application to garbage
collect deleted objects.
"""
sig_plugin_ready = Signal()
"""
This signal can be emitted to reflect that the plugin was initialized.
"""
sig_quit_requested = Signal()
"""
This signal can be emitted to request the main application to quit.
"""
sig_restart_requested = Signal()
"""
This signal can be emitted to request the main application to restart.
"""
sig_status_message_requested = Signal(str, int)
"""
This signal can be emitted to request the main application to display a
message in the status bar.
Parameters
----------
message: str
The actual message to display.
timeout: int
The timeout before the message disappears.
"""
sig_redirect_stdio_requested = Signal(bool)
"""
This signal can be emitted to request the main application to redirect
standard output/error when using Open/Save/Browse dialogs within widgets.
Parameters
----------
enable: bool
Enable/Disable standard input/output redirection.
"""
sig_exception_occurred = Signal(dict)
"""
This signal can be emitted to report an exception from any plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
.. code-block:: python
error_data = {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
This signal is automatically connected to the main container/widget.
"""
sig_mainwindow_resized = Signal("QResizeEvent")
"""
This signal is emitted when the main window is resized.
Parameters
----------
resize_event: QResizeEvent
The event triggered on main window resize.
Notes
-----
To be used by plugins tracking main window size changes.
"""
sig_mainwindow_moved = Signal("QMoveEvent")
"""
This signal is emitted when the main window is moved.
Parameters
----------
move_event: QMoveEvent
The event triggered on main window move.
Notes
-----
To be used by plugins tracking main window position changes.
"""
sig_unmaximize_plugin_requested = Signal((), (object,))
"""
This signal is emitted to inform the main window that it needs to
unmaximize the currently maximized plugin, if any.
Parameters
----------
plugin_instance: SpyderDockablePlugin
Unmaximize plugin only if it is not `plugin_instance`.
"""
sig_mainwindow_state_changed = Signal(object)
"""
This signal is emitted when the main window state has changed (for
instance, between maximized and minimized states).
Parameters
----------
window_state: Qt.WindowStates
The window state.
"""
sig_focused_plugin_changed = Signal(object)
"""
This signal is emitted when the plugin with keyboard focus changes.
Parameters
----------
plugin: Optional[SpyderDockablePlugin]
The plugin that currently has keyboard focus, or None if no dockable
plugin has focus.
"""
# ---- Private attributes
# -------------------------------------------------------------------------
# Define configuration name map for plugin to split configuration
# among several files. See spyder/config/main.py
_CONF_NAME_MAP = None
def __init__(self, parent, configuration=None):
super().__init__(parent)
# This is required since the MRO of this class does not go up until to
# SpyderPluginObserver and SpyderConfigurationObserver when using
# super(), see https://fuhm.net/super-harmful/
SpyderPluginObserver.__init__(self)
SpyderConfigurationObserver.__init__(self)
self._main = parent
self._widget = None
self._conf = configuration
self._plugin_path = os.path.dirname(inspect.getfile(self.__class__))
self._container = None
self._added_toolbars = OrderedDict()
self._actions = {}
self.is_compatible = None
self.is_registered = None
self.main: QMainWindow = parent
# Attribute used to access the action, toolbar, toolbutton and menu
# registries
self.PLUGIN_NAME = self.NAME
if self.CONTAINER_CLASS is not None:
self._container = container = self.CONTAINER_CLASS(
name=self.NAME,
plugin=self,
parent=parent
)
if hasattr(container, '_setup'):
container._setup()
if isinstance(container, SpyderWidgetMixin):
container.setup()
container.update_actions()
# Default signals to connect in main container or main widget.
container.sig_free_memory_requested.connect(
self.sig_free_memory_requested)
container.sig_quit_requested.connect(self.sig_quit_requested)
container.sig_restart_requested.connect(self.sig_restart_requested)
container.sig_redirect_stdio_requested.connect(
self.sig_redirect_stdio_requested)
container.sig_exception_occurred.connect(
self.sig_exception_occurred)
container.sig_unmaximize_plugin_requested.connect(
self.sig_unmaximize_plugin_requested)
self.after_container_creation()
# Load the custom images of the plugin
if self.IMG_PATH:
plugin_path = osp.join(self.get_path(), self.IMG_PATH)
IMAGE_PATH_MANAGER.add_image_path(plugin_path)
# ---- Private methods
# -------------------------------------------------------------------------
def _register(self, omit_conf=False):
"""
Setup and register plugin in Spyder's main window and connect it to
other plugins.
"""
# Checks
# --------------------------------------------------------------------
if self.NAME is None:
raise SpyderAPIError('A Spyder Plugin must define a `NAME`!')
# Setup configuration
# --------------------------------------------------------------------
if self._conf is not None and not omit_conf:
self._conf.register_plugin(self)
# Signals
# --------------------------------------------------------------------
self.is_registered = True
self.update_font()
def _unregister(self):
"""
Disconnect signals and clean up the plugin to be able to stop it while
Spyder is running.
"""
if self._conf is not None:
self._conf.unregister_plugin(self)
self._container = None
self.is_compatible = None
self.is_registered = False
# ---- API: available methods
# -------------------------------------------------------------------------
def get_path(self):
"""
Return the plugin's system path.
"""
return self._plugin_path
def get_container(self):
"""
Return the plugin main container.
"""
return self._container
def get_configuration(self):
"""
Return the Spyder configuration object.
"""
return self._conf
def get_main(self):
"""
Return the Spyder main window..
"""
return self._main
def get_plugin(self, plugin_name, error=True) -> SpyderPluginV2:
"""
Get a plugin instance by providing its name.
Parameters
----------
plugin_name: str
Name of the plugin from which its instance will be returned.
error: bool
Whether to raise errors when trying to return the plugin's
instance.
"""
# Ensure that this plugin has the plugin corresponding to
# `plugin_name` listed as required or optional.
requires = set(self.REQUIRES or [])
optional = set(self.OPTIONAL or [])
full_set = requires | optional
if plugin_name in full_set or Plugins.All in full_set:
try:
return self._main.get_plugin(plugin_name, error=error)
except SpyderAPIError as e:
if plugin_name in optional:
return None
else:
raise e
else:
raise SpyderAPIError(
'Plugin "{}" not part of REQUIRES or '
'OPTIONAL requirements!'.format(plugin_name)
)
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return self._main.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is available."""
return self._main.is_plugin_available(plugin_name)
def get_dockable_plugins(self):
"""
Return a list of the required plugin instances.
Only required plugins that extend SpyderDockablePlugin are returned.
"""
requires = set(self.REQUIRES or [])
dockable_plugins_required = []
for name, plugin_instance in self._main.get_dockable_plugins():
if (name in requires or Plugins.All in requires) and isinstance(
plugin_instance, SpyderDockablePlugin
):
dockable_plugins_required.append(plugin_instance)
return dockable_plugins_required
def get_conf(self, option, default=NoDefault, section=None, secure=False):
"""
Get an option from Spyder configuration system.
Parameters
----------
option: str
Name of the option to get its value from.
default: bool, int, str, tuple, list, dict, NoDefault
Value to get from the configuration system, passed as a
Python object.
section: str
Section in the configuration system, e.g. `shortcuts`.
secure: bool
If True, the option will be retrieved securely using the `keyring`
Python package.
Returns
-------
bool, int, str, tuple, list, dict
Value associated with `option`.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
return self._conf.get(section, option, default, secure=secure)
@Slot(str, object)
@Slot(str, object, str)
def set_conf(
self,
option,
value,
section=None,
recursive_notification=True,
secure=False,
):
"""
Set an option in Spyder configuration system.
Parameters
----------
option: str
Name of the option (e.g. 'case_sensitive')
value: bool, int, str, tuple, list, dict
Value to save in the configuration system, passed as a
Python object.
section: str
Section in the configuration system, e.g. `shortcuts`.
recursive_notification: bool
If True, all objects that observe all changes on the
configuration section and objects that observe partial tuple paths
are notified. For example if the option `opt` of section `sec`
changes, then the observers for section `sec` are notified.
Likewise, if the option `(a, b, c)` changes, then observers for
`(a, b, c)`, `(a, b)` and a are notified as well.
secure: bool
If True, the option will be saved securely using the `keyring`
Python package.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
self._conf.set(
section,
option,
value,
recursive_notification=recursive_notification,
secure=secure,
)
self.apply_conf({option}, False)
def remove_conf(self, option, section=None, secure=False):
"""
Delete an option in the Spyder configuration system.
Parameters
----------
option: Union[str, Tuple[str, ...]]
Name of the option, either a string or a tuple of strings.
section: str
Section in the configuration system.
secure: bool
If True, the option will be removed securely using the `keyring`
Python package.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
self._conf.remove_option(section, option, secure=secure)
self.apply_conf({option}, False)
def apply_conf(self, options_set, notify=True):
"""
Apply `options_set` to this plugin's widget.
"""
if self._conf is not None and options_set:
if notify:
self.after_configuration_update(list(options_set))
def disable_conf(self, option, section=None):
"""
Disable notifications for an option in the Spyder configuration system.
Parameters
----------
option: Union[str, Tuple[str, ...]]
Name of the option, either a string or a tuple of strings.
section: str
Section in the configuration system.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
self._conf.disable_notifications(section, option)
def restore_conf(self, option, section=None):
"""
Restore notifications for an option in the Spyder configuration system.
Parameters
----------
option: Union[str, Tuple[str, ...]]
Name of the option, either a string or a tuple of strings.
section: str
Section in the configuration system.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
self._conf.restore_notifications(section, option)
@Slot(str)
@Slot(str, int)
def show_status_message(self, message, timeout=0):
"""
Show message in status bar.
Parameters
----------
message: str
Message to display in the status bar.
timeout: int
Amount of time to display the message.
"""
self.sig_status_message_requested.emit(message, timeout)
def before_long_process(self, message):
"""
Show a message in main window's status bar and change the mouse
pointer to Qt.WaitCursor when starting a long process.
Parameters
----------
message: str
Message to show in the status bar when the long process starts.
"""
if message:
self.show_status_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def after_long_process(self, message=""):
"""
Clear main window's status bar after a long process and restore
mouse pointer to the OS deault.
Parameters
----------
message: str
Message to show in the status bar when the long process finishes.
"""
QApplication.restoreOverrideCursor()
self.show_status_message(message, timeout=2000)
QApplication.processEvents()
def get_color_scheme(self):
"""
Get the current color scheme.
Returns
-------
dict
Dictionary with properties and colors of the color scheme
used in the Editor.
Notes
-----
This is useful to set the color scheme of all instances of
CodeEditor used by the plugin.
"""
if self._conf is not None:
return get_color_scheme(self._conf.get('appearance', 'selected'))
def initialize(self):
"""
Initialize a plugin instance.
Notes
-----
This method should be called to initialize the plugin, but it should
not be overridden, since it internally calls `on_initialize` and emits
the `sig_plugin_ready` signal.
"""
self.on_initialize()
self.sig_plugin_ready.emit()
@staticmethod
def create_icon(name):
"""
Provide icons from the theme and icon manager.
"""
return ima.icon(name)
@classmethod
def get_font(cls, font_type):
"""
Return one of font types used in Spyder.
Parameters
----------
font_type: str
There are three types of font types in Spyder:
SpyderFontType.Monospace, used in the Editor, IPython console,
and History; SpyderFontType.Interface, used by the entire Spyder
app; and SpyderFontType.MonospaceInterface, used by the Variable
Explorer, Find, Debugger and others.
Returns
-------
QFont
QFont object to be passed to other Qt widgets.
Notes
-----
All plugins in Spyder use the same, global fonts. In case some a plugin
wants to use a delta font size based on the default one, they can set
the MONOSPACE_FONT_SIZE_DELTA or INTERFACE_FONT_SIZE_DELTA class
constants.
"""
if font_type == SpyderFontType.Monospace:
font_size_delta = cls.MONOSPACE_FONT_SIZE_DELTA
elif font_type in [SpyderFontType.Interface,
SpyderFontType.MonospaceInterface]:
font_size_delta = cls.INTERFACE_FONT_SIZE_DELTA
else:
raise SpyderAPIError("Unrecognized font type")
return get_font(option=font_type, font_size_delta=font_size_delta)
def get_command_line_options(self):
"""
Get command line options passed by the user when they started
Spyder in a system terminal.
See app/cli_options.py for the option names.
"""
if self._main is not None:
return self._main._cli_options
else:
# This is necessary when the plugin has no parent.
sys_argv = [sys.argv[0]] # Avoid options passed to pytest
return get_options(sys_argv)[0]
# ---- API: Mandatory methods to define
# -------------------------------------------------------------------------
@staticmethod
def get_name():
"""
Return the plugin localized name.
Returns
-------
str
Localized name of the plugin.
Notes
-----
This method needs to be decorated with `staticmethod`.
"""
raise NotImplementedError('A plugin name must be defined!')
@staticmethod
def get_description():
"""
Return the plugin localized description.
Returns
-------
str
Localized description of the plugin.
Notes
-----
This method needs to be decorated with `staticmethod`.
"""
raise NotImplementedError('A plugin description must be defined!')
@classmethod
def get_icon(cls):
"""
Return the plugin associated icon.
Returns
-------
QIcon
QIcon instance
Notes
-----
This method needs to be decorated with `classmethod` or `staticmethod`.
"""
raise NotImplementedError('A plugin icon must be defined!')
def on_initialize(self):
"""
Setup the plugin.
Notes
-----
All calls performed on this method should not call other plugins.
"""
if hasattr(self, 'register'):
raise SpyderAPIError(
'register was replaced by on_initialize, please check the '
'Spyder 5.1.0 migration guide to get more information')
raise NotImplementedError(
f'The plugin {type(self)} is missing an implementation of '
'on_initialize')
# ---- API: Optional methods to override
# -------------------------------------------------------------------------
@staticmethod
def check_compatibility():
"""
This method can be reimplemented to check compatibility of a plugin
with the user's current environment.
Returns
-------
(bool, str)
The first value tells Spyder if the plugin has passed the
compatibility test defined in this method. The second value
is a message that must explain users why the plugin was
found to be incompatible (e.g. 'This plugin does not work
with PyQt4'). It will be shown at startup in a QMessageBox.
"""
valid = True
message = '' # Note: Remember to use _('') to localize the string
return valid, message
def on_first_registration(self):
"""
Actions to be performed the first time the plugin is started.
It can also be used to perform actions that are needed only the
first time this is loaded after installation.
This method is called after the main window is visible.
"""
pass
def before_mainwindow_visible(self):
"""
Actions to be performed after setup but before the main window's has
been shown.
"""
pass
def on_mainwindow_visible(self):
"""
Actions to be performed after the main window's has been shown.
"""
pass
def on_close(self, cancelable=False):
"""
Perform actions before the plugin is closed.
This method **must** only operate on local attributes and not other
plugins.
"""
if hasattr(self, 'unregister'):
warnings.warn('The unregister method was deprecated and it '
'was replaced by `on_close`. Please see the '
'Spyder 5.2.0 migration guide to get more '
'information.')
def can_close(self) -> bool:
"""
Determine if a plugin can be closed.
Returns
-------
close: bool
True if the plugin can be closed, False otherwise.
"""
return True
def update_font(self):
"""
This must be reimplemented by plugins that need to adjust their fonts.
The following plugins illustrate the usage of this method:
* spyder/plugins/help/plugin.py
* spyder/plugins/onlinehelp/plugin.py
"""
pass
def update_style(self):
"""
This must be reimplemented by plugins that need to adjust their style.
Changing from the dark to the light interface theme might
require specific styles or stylesheets to be applied. When
the theme is changed by the user through our Preferences,
this method will be called for all plugins.
"""
pass
def after_container_creation(self):
"""
Perform necessary operations before setting up the container.
This must be reimplemented by plugins whose containers emit signals in
on_option_update that need to be connected before applying those
options to our config system.
"""
pass
def after_configuration_update(self, options: List[Union[str, tuple]]):
"""
Perform additional operations after updating the plugin configuration
values.
This can be implemented by plugins that do not have a container and
need to act on configuration updates.
Parameters
----------
options: List[Union[str, tuple]]
A list that contains the options that were updated.
"""
pass
|
SpyderPluginV2
|
python
|
doocs__leetcode
|
solution/2500-2599/2547.Minimum Cost to Split an Array/Solution.py
|
{
"start": 0,
"end": 561
}
|
class ____:
def minCost(self, nums: List[int], k: int) -> int:
@cache
def dfs(i):
if i >= n:
return 0
cnt = Counter()
one = 0
ans = inf
for j in range(i, n):
cnt[nums[j]] += 1
if cnt[nums[j]] == 1:
one += 1
elif cnt[nums[j]] == 2:
one -= 1
ans = min(ans, k + j - i + 1 - one + dfs(j + 1))
return ans
n = len(nums)
return dfs(0)
|
Solution
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-wordlift/llama_index/readers/wordlift/base.py
|
{
"start": 389,
"end": 596
}
|
class ____(WordLiftLoaderError):
"""Exception raised for errors in API calls."""
def __init__(self, message) -> None:
self.message = message
super().__init__(self.message)
|
APICallError
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/callbacks/test_early_stopping.py
|
{
"start": 19680,
"end": 20012
}
|
class ____(BoringModel):
def __init__(self):
super().__init__()
self.epoch_losses = [1.0, 2.0, 5.0, 10.0]
def on_validation_epoch_end(self):
loss = self.epoch_losses[self.current_epoch] if self.current_epoch < len(self.epoch_losses) else 15.0
self.log("val_loss", loss)
|
ModelWithIncreasingLoss
|
python
|
bokeh__bokeh
|
src/bokeh/models/filters.py
|
{
"start": 1811,
"end": 2640
}
|
class ____(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ``ColumnDataSource``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def __invert__(self) -> Filter:
return InversionFilter(operand=self)
def __and__(self, other: Filter) -> Filter:
return IntersectionFilter(operands=[self, other])
def __or__(self, other: Filter) -> Filter:
return UnionFilter(operands=[self, other])
def __sub__(self, other: Filter) -> Filter:
return DifferenceFilter(operands=[self, other])
def __xor__(self, other: Filter) -> Filter:
return SymmetricDifferenceFilter(operands=[self, other])
|
Filter
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 263050,
"end": 263776
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("CreatedPullRequestReviewContributionEdge"),
graphql_name="edges",
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("CreatedPullRequestReviewContribution"),
graphql_name="nodes",
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
CreatedPullRequestReviewContributionConnection
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/device_spec.py
|
{
"start": 13870,
"end": 15863
}
|
class ____(DeviceSpecV2):
__doc__ = DeviceSpecV2.__doc__
__slots__ = DeviceSpecV2.__slots__
@DeviceSpecV2.job.setter
def job(self, job):
self._job = _as_str_or_none(job)
self._as_string, self._hash = None, None
@DeviceSpecV2.replica.setter
def replica(self, replica):
self._replica = _as_int_or_none(replica)
self._as_string, self._hash = None, None
@DeviceSpecV2.task.setter
def task(self, task):
self._task = _as_int_or_none(task)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_type.setter
def device_type(self, device_type):
self._device_type = _as_device_str_or_none(device_type)
self._as_string, self._hash = None, None
@DeviceSpecV2.device_index.setter
def device_index(self, device_index):
self._device_index = _as_int_or_none(device_index)
self._as_string, self._hash = None, None
def __hash__(self):
if self._hash is None:
self._hash = hash(self.to_string())
return self._hash
def to_string(self):
if self._as_string is None:
self._as_string = self._components_to_string(
job=self.job,
replica=self.replica,
task=self.task,
device_type=self.device_type,
device_index=self.device_index)
return self._as_string
def parse_from_string(self, spec):
(self.job, self.replica, self.task, self.device_type,
self.device_index) = self._string_to_components(spec)
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become
immutable.
Args:
dev: a `DeviceSpec`.
"""
(self.job, self.replica, self.task, self.device_type,
self.device_index) = self._get_combined_properties(dev)
# Use parent class docstrings for public methods.
to_string.__doc__ = DeviceSpecV2.to_string.__doc__
parse_from_string.__doc__ = DeviceSpecV2.parse_from_string.__doc__
|
DeviceSpecV1
|
python
|
catalyst-team__catalyst
|
catalyst/callbacks/metrics/confusion_matrix.py
|
{
"start": 429,
"end": 5284
}
|
class ____(Callback):
"""Callback to plot your confusion matrix to the loggers.
Args:
input_key: key to use from ``runner.batch``, specifies our ``y_pred``
target_key: key to use from ``runner.batch``, specifies our ``y_true``
prefix: plot name for monitoring tools
class_names: list with class names
num_classes: number of classes
normalize: boolean flag for confusion matrix normalization
plot_params: extra params for plt.figure rendering
.. note::
catalyst[ml] required for this callback
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples,) * num_classes).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy03",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.AUCCallback(input_key="logits", target_key="targets"),
dl.ConfusionMatrixCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
],
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
prefix: str = None,
class_names: List[str] = None,
num_classes: int = None,
normalize: bool = False,
plot_params: Dict = None,
):
"""Callback initialisation."""
super().__init__(CallbackOrder.metric)
assert num_classes is not None or class_names is not None
self.prefix = prefix or "confusion_matrix"
self.input_key = input_key
self.target_key = target_key
self._plot_params = plot_params or {}
self.class_names = class_names or [f"class_{i:02d}" for i in range(num_classes)]
self.num_classes = num_classes if class_names is None else len(class_names)
self.normalize = normalize
assert self.num_classes is not None
self.confusion_matrix = ConfusionMatrixMetric(
num_classes=self.num_classes, normalize=self.normalize
)
def on_loader_start(self, runner: "IRunner"):
"""Loader start hook.
Args:
runner: current runner
"""
self.confusion_matrix.reset()
def on_batch_end(self, runner: "IRunner"):
"""Batch end hook.
Args:
runner: current runner
"""
inputs, targets = (
runner.batch[self.input_key].detach(),
runner.batch[self.target_key].detach(),
)
self.confusion_matrix.update(predictions=inputs, targets=targets)
def on_loader_end(self, runner: "IRunner"):
"""Loader end hook.
Args:
runner: current runner
"""
confusion_matrix = self.confusion_matrix.compute()
fig = plot_confusion_matrix(
confusion_matrix,
class_names=self.class_names,
normalize=self.normalize,
show=False,
**self._plot_params,
)
image = render_figure_to_array(fig)
runner.log_image(tag=self.prefix, image=image, scope="loader")
__all__ = ["ConfusionMatrixCallback"]
|
ConfusionMatrixCallback
|
python
|
pydata__xarray
|
xarray/core/dataarray.py
|
{
"start": 8159,
"end": 292484
}
|
class ____(
AbstractArray,
DataWithCoords,
DataArrayArithmetic,
DataArrayAggregations,
):
"""N-dimensional array with labeled coordinates and dimensions.
DataArray provides a wrapper around numpy ndarrays that uses
labeled dimensions and coordinates to support metadata aware
operations. The API is similar to that for the pandas Series or
DataFrame, but DataArray objects can have any number of dimensions,
and their contents have fixed data types.
Additional features over raw numpy arrays:
- Apply operations over dimensions by name: ``x.sum('time')``.
- Select or assign values by integer location (like numpy):
``x[:10]`` or by label (like pandas): ``x.loc['2014-01-01']`` or
``x.sel(time='2014-01-01')``.
- Mathematical operations (e.g., ``x - y``) vectorize across
multiple dimensions (known in numpy as "broadcasting") based on
dimension names, regardless of their original order.
- Keep track of arbitrary metadata in the form of a Python
dictionary: ``x.attrs``
- Convert to a pandas Series: ``x.to_series()``.
Getting items from or doing mathematical operations with a
DataArray always returns another DataArray.
Parameters
----------
data : array_like
Values for this array. Must be an ``numpy.ndarray``, ndarray
like, or castable to an ``ndarray``. If a self-described xarray
or pandas object, attempts are made to use this array's
metadata to fill in other unspecified arguments. A view of the
array's data is used instead of a copy if possible.
coords : sequence or dict of array_like or :py:class:`~xarray.Coordinates`, optional
Coordinates (tick labels) to use for indexing along each
dimension. The following notations are accepted:
- mapping {dimension name: array-like}
- sequence of tuples that are valid arguments for
``xarray.Variable()``
- (dims, data)
- (dims, data, attrs)
- (dims, data, attrs, encoding)
Additionally, it is possible to define a coord whose name
does not match the dimension name, or a coord based on multiple
dimensions, with one of the following notations:
- mapping {coord name: DataArray}
- mapping {coord name: Variable}
- mapping {coord name: (dimension name, array-like)}
- mapping {coord name: (tuple of dimension names, array-like)}
Alternatively, a :py:class:`~xarray.Coordinates` object may be used in
order to explicitly pass indexes (e.g., a multi-index or any custom
Xarray index) or to bypass the creation of a default index for any
:term:`Dimension coordinate` included in that object.
dims : Hashable or sequence of Hashable, optional
Name(s) of the data dimension(s). Must be either a Hashable
(only for 1D data) or a sequence of Hashables with length equal
to the number of dimensions. If this argument is omitted,
dimension names are taken from ``coords`` (if possible) and
otherwise default to ``['dim_0', ... 'dim_n']``.
name : str or None, optional
Name of this array.
attrs : dict_like or None, optional
Attributes to assign to the new instance. By default, an empty
attribute dictionary is initialized.
(see FAQ, :ref:`approach to metadata`)
indexes : :py:class:`~xarray.Indexes` or dict-like, optional
For internal use only. For passing indexes objects to the
new DataArray, use the ``coords`` argument instead with a
:py:class:`~xarray.Coordinate` object (both coordinate variables
and indexes will be extracted from the latter).
Examples
--------
Create data:
>>> np.random.seed(0)
>>> temperature = 15 + 8 * np.random.randn(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> time = pd.date_range("2014-09-06", periods=3)
>>> reference_time = pd.Timestamp("2014-09-05")
Initialize a dataarray with multiple dimensions:
>>> da = xr.DataArray(
... data=temperature,
... dims=["x", "y", "time"],
... coords=dict(
... lon=(["x", "y"], lon),
... lat=(["x", "y"], lat),
... time=time,
... reference_time=reference_time,
... ),
... attrs=dict(
... description="Ambient temperature.",
... units="degC",
... ),
... )
>>> da
<xarray.DataArray (x: 2, y: 2, time: 3)> Size: 96B
array([[[29.11241877, 18.20125767, 22.82990387],
[32.92714559, 29.94046392, 7.18177696]],
<BLANKLINE>
[[22.60070734, 13.78914233, 14.17424919],
[18.28478802, 16.15234857, 26.63418806]]])
Coordinates:
* time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Attributes:
description: Ambient temperature.
units: degC
Find out where the coldest temperature was:
>>> da.isel(da.argmin(...))
<xarray.DataArray ()> Size: 8B
array(7.18177696)
Coordinates:
lon float64 8B -99.32
lat float64 8B 42.21
time datetime64[ns] 8B 2014-09-08
reference_time datetime64[ns] 8B 2014-09-05
Attributes:
description: Ambient temperature.
units: degC
"""
_cache: dict[str, Any]
_coords: dict[Any, Variable]
_close: Callable[[], None] | None
_indexes: dict[Hashable, Index]
_name: Hashable | None
_variable: Variable
__slots__ = (
"__weakref__",
"_cache",
"_close",
"_coords",
"_indexes",
"_name",
"_variable",
)
dt = utils.UncachedAccessor(CombinedDatetimelikeAccessor["DataArray"])
def __init__(
self,
data: Any = dtypes.NA,
coords: (
Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray]
| Mapping
| None
) = None,
dims: str | Iterable[Hashable] | None = None,
name: Hashable | None = None,
attrs: Mapping | None = None,
# internal parameters
indexes: Mapping[Hashable, Index] | None = None,
fastpath: bool = False,
) -> None:
if fastpath:
variable = data
assert dims is None
assert attrs is None
assert indexes is not None
else:
if indexes is not None:
raise ValueError(
"Explicitly passing indexes via the `indexes` argument is not supported "
"when `fastpath=False`. Use the `coords` argument instead."
)
# try to fill in arguments from data if they weren't supplied
if coords is None:
if isinstance(data, DataArray):
coords = data.coords
elif isinstance(data, pd.Series):
coords = [data.index]
elif isinstance(data, pd.DataFrame):
coords = [data.index, data.columns]
elif isinstance(data, pd.Index | IndexVariable):
coords = [data]
if dims is None:
dims = getattr(data, "dims", getattr(coords, "dims", None))
if name is None:
name = getattr(data, "name", None)
if attrs is None and not isinstance(data, PANDAS_TYPES):
attrs = getattr(data, "attrs", None)
data = _check_data_shape(data, coords, dims)
data = as_compatible_data(data)
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
variable = Variable(dims, data, attrs, fastpath=True)
if not isinstance(coords, Coordinates):
coords = create_coords_with_default_indexes(coords)
indexes = dict(coords.xindexes)
coords = {k: v.copy() for k, v in coords.variables.items()}
# These fully describe a DataArray
self._variable = variable
assert isinstance(coords, dict)
self._coords = coords
self._name = name
self._indexes = dict(indexes)
self._close = None
@classmethod
def _construct_direct(
cls,
variable: Variable,
coords: dict[Any, Variable],
name: Hashable,
indexes: dict[Hashable, Index],
) -> Self:
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
obj = object.__new__(cls)
obj._variable = variable
obj._coords = coords
obj._name = name
obj._indexes = indexes
obj._close = None
return obj
def _replace(
self,
variable: Variable | None = None,
coords=None,
name: Hashable | Default | None = _default,
attrs=_default,
indexes=None,
) -> Self:
if variable is None:
variable = self.variable
if coords is None:
coords = self._coords
if indexes is None:
indexes = self._indexes
if name is _default:
name = self.name
if attrs is _default:
attrs = copy.copy(self.attrs)
else:
variable = variable.copy()
variable.attrs = attrs
return type(self)(variable, coords, name=name, indexes=indexes, fastpath=True)
def _replace_maybe_drop_dims(
self,
variable: Variable,
name: Hashable | Default | None = _default,
) -> Self:
if self.sizes == variable.sizes:
coords = self._coords.copy()
indexes = self._indexes
elif set(self.dims) == set(variable.dims):
# Shape has changed (e.g. from reduce(..., keepdims=True)
new_sizes = dict(zip(self.dims, variable.shape, strict=True))
coords = {
k: v
for k, v in self._coords.items()
if v.shape == tuple(new_sizes[d] for d in v.dims)
}
indexes = filter_indexes_from_coords(self._indexes, set(coords))
else:
allowed_dims = set(variable.dims)
coords = {
k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims
}
indexes = filter_indexes_from_coords(self._indexes, set(coords))
return self._replace(variable, coords, name, indexes=indexes)
def _overwrite_indexes(
self,
indexes: Mapping[Any, Index],
variables: Mapping[Any, Variable] | None = None,
drop_coords: list[Hashable] | None = None,
rename_dims: Mapping[Any, Any] | None = None,
) -> Self:
"""Maybe replace indexes and their corresponding coordinates."""
if not indexes:
return self
if variables is None:
variables = {}
if drop_coords is None:
drop_coords = []
new_variable = self.variable.copy()
new_coords = self._coords.copy()
new_indexes = dict(self._indexes)
for name in indexes:
new_coords[name] = variables[name]
new_indexes[name] = indexes[name]
for name in drop_coords:
new_coords.pop(name)
new_indexes.pop(name)
if rename_dims:
new_variable.dims = tuple(rename_dims.get(d, d) for d in new_variable.dims)
return self._replace(
variable=new_variable, coords=new_coords, indexes=new_indexes
)
def _to_temp_dataset(self) -> Dataset:
return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False)
def _from_temp_dataset(
self, dataset: Dataset, name: Hashable | Default | None = _default
) -> Self:
variable = dataset._variables.pop(_THIS_ARRAY)
coords = dataset._variables
indexes = dataset._indexes
return self._replace(variable, coords, name, indexes=indexes)
def _to_dataset_split(self, dim: Hashable) -> Dataset:
"""splits dataarray along dimension 'dim'"""
def subset(dim, label):
array = self.loc[{dim: label}]
array.attrs = {}
return as_variable(array)
variables_from_split = {
label: subset(dim, label) for label in self.get_index(dim)
}
coord_names = set(self._coords) - {dim}
ambiguous_vars = set(variables_from_split) & coord_names
if ambiguous_vars:
rename_msg_fmt = ", ".join([f"{v}=..." for v in sorted(ambiguous_vars)])
raise ValueError(
f"Splitting along the dimension {dim!r} would produce the variables "
f"{tuple(sorted(ambiguous_vars))} which are also existing coordinate "
f"variables. Use DataArray.rename({rename_msg_fmt}) or "
f"DataArray.assign_coords({dim}=...) to resolve this ambiguity."
)
variables = variables_from_split | {
k: v for k, v in self._coords.items() if k != dim
}
indexes = filter_indexes_from_coords(self._indexes, coord_names)
dataset = Dataset._construct_direct(
variables, coord_names, indexes=indexes, attrs=self.attrs
)
return dataset
def _to_dataset_whole(
self, name: Hashable = None, shallow_copy: bool = True
) -> Dataset:
if name is None:
name = self.name
if name is None:
raise ValueError(
"unable to convert unnamed DataArray to a "
"Dataset without providing an explicit name"
)
if name in self.coords:
raise ValueError(
"cannot create a Dataset from a DataArray with "
"the same name as one of its coordinates"
)
# use private APIs for speed: this is called by _to_temp_dataset(),
# which is used in the guts of a lot of operations (e.g., reindex)
variables = self._coords.copy()
variables[name] = self.variable
if shallow_copy:
for k in variables:
variables[k] = variables[k].copy(deep=False)
indexes = self._indexes
coord_names = set(self._coords)
return Dataset._construct_direct(variables, coord_names, indexes=indexes)
def to_dataset(
self,
dim: Hashable = None,
*,
name: Hashable = None,
promote_attrs: bool = False,
) -> Dataset:
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : Hashable, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : Hashable, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
promote_attrs : bool, default: False
Set to True to shallow copy attrs of DataArray to returned Dataset.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
raise TypeError(
f"{dim} is not a dim. If supplying a ``name``, pass as a kwarg."
)
if dim is not None:
if name is not None:
raise TypeError("cannot supply both dim and name arguments")
result = self._to_dataset_split(dim)
else:
result = self._to_dataset_whole(name)
if promote_attrs:
result.attrs = dict(self.attrs)
return result
@property
def name(self) -> Hashable | None:
"""The name of this array."""
return self._name
@name.setter
def name(self, value: Hashable | None) -> None:
self._name = value
@property
def variable(self) -> Variable:
"""Low level interface to the Variable object for this DataArray."""
return self._variable
@property
def dtype(self) -> np.dtype:
"""
Data-type of the array’s elements.
See Also
--------
ndarray.dtype
numpy.dtype
"""
return self.variable.dtype
@property
def shape(self) -> tuple[int, ...]:
"""
Tuple of array dimensions.
See Also
--------
numpy.ndarray.shape
"""
return self.variable.shape
@property
def size(self) -> int:
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array’s dimensions.
See Also
--------
numpy.ndarray.size
"""
return self.variable.size
@property
def nbytes(self) -> int:
"""
Total bytes consumed by the elements of this DataArray's data.
If the underlying data array does not include ``nbytes``, estimates
the bytes consumed based on the ``size`` and ``dtype``.
"""
return self.variable.nbytes
@property
def ndim(self) -> int:
"""
Number of array dimensions.
See Also
--------
numpy.ndarray.ndim
"""
return self.variable.ndim
def __len__(self) -> int:
return len(self.variable)
@property
def data(self) -> Any:
"""
The DataArray's data as an array. The underlying array type
(e.g. dask, sparse, pint) is preserved.
See Also
--------
DataArray.to_numpy
DataArray.as_numpy
DataArray.values
"""
return self.variable.data
@data.setter
def data(self, value: Any) -> None:
self.variable.data = value
@property
def values(self) -> np.ndarray:
"""
The array's data converted to numpy.ndarray.
This will attempt to convert the array naively using np.array(),
which will raise an error if the array type does not support
coercion like this (e.g. cupy).
Note that this array is not copied; operations on it follow
numpy's rules of what generates a view vs. a copy, and changes
to this array may be reflected in the DataArray as well.
"""
return self.variable.values
@values.setter
def values(self, value: Any) -> None:
self.variable.values = value
def to_numpy(self) -> np.ndarray:
"""
Coerces wrapped data to numpy and returns a numpy.ndarray.
See Also
--------
DataArray.as_numpy : Same but returns the surrounding DataArray instead.
Dataset.as_numpy
DataArray.values
DataArray.data
"""
return self.variable.to_numpy()
def as_numpy(self) -> Self:
"""
Coerces wrapped data and coordinates into numpy arrays, returning a DataArray.
See Also
--------
DataArray.to_numpy : Same but returns only the data as a numpy.ndarray object.
Dataset.as_numpy : Converts all variables in a Dataset.
DataArray.values
DataArray.data
"""
coords = {k: v.as_numpy() for k, v in self._coords.items()}
return self._replace(self.variable.as_numpy(), coords, indexes=self._indexes)
@property
def _in_memory(self) -> bool:
return self.variable._in_memory
def _to_index(self) -> pd.Index:
return self.variable._to_index()
def to_index(self) -> pd.Index:
"""Convert this variable to a pandas.Index. Only possible for 1D
arrays.
"""
return self.variable.to_index()
@property
def dims(self) -> tuple[Hashable, ...]:
"""Tuple of dimension names associated with this array.
Note that the type of this property is inconsistent with
`Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for
consistently named properties.
See Also
--------
DataArray.sizes
Dataset.dims
"""
return self.variable.dims
@dims.setter
def dims(self, value: Any) -> NoReturn:
raise AttributeError(
"you cannot assign dims on a DataArray. Use "
".rename() or .swap_dims() instead."
)
def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]:
if utils.is_dict_like(key):
return key
key = indexing.expanded_indexer(key, self.ndim)
return dict(zip(self.dims, key, strict=True))
def _getitem_coord(self, key: Any) -> Self:
from xarray.core.dataset_utils import _get_virtual_variable
try:
var = self._coords[key]
except KeyError:
dim_sizes = dict(zip(self.dims, self.shape, strict=True))
_, key, var = _get_virtual_variable(self._coords, key, dim_sizes)
return self._replace_maybe_drop_dims(var, name=key)
def __getitem__(self, key: Any) -> Self:
if isinstance(key, str):
return self._getitem_coord(key)
else:
# xarray-style array indexing
return self.isel(indexers=self._item_key_to_dict(key))
def __setitem__(self, key: Any, value: Any) -> None:
if isinstance(key, str):
self.coords[key] = value
else:
# Coordinates in key, value and self[key] should be consistent.
# TODO Coordinate consistency in key is checked here, but it
# causes unnecessary indexing. It should be optimized.
obj = self[key]
if isinstance(value, DataArray):
assert_coordinate_consistent(value, obj.coords.variables)
value = value.variable
# DataArray key -> Variable key
key = {
k: v.variable if isinstance(v, DataArray) else v
for k, v in self._item_key_to_dict(key).items()
}
self.variable[key] = value
def __delitem__(self, key: Any) -> None:
del self.coords[key]
@property
def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for attribute-style access"""
yield from self._item_sources
yield self.attrs
@property
def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for key-completion"""
yield FilteredMapping(keys=self._coords, mapping=self.coords)
# virtual coordinates
yield FilteredMapping(keys=self.dims, mapping=self.coords)
def __contains__(self, key: Any) -> bool:
return key in self.data
@property
def loc(self) -> _LocIndexer:
"""Attribute for location based indexing like pandas."""
return _LocIndexer(self)
@property
def attrs(self) -> dict[Any, Any]:
"""Dictionary storing arbitrary metadata with this array."""
return self.variable.attrs
@attrs.setter
def attrs(self, value: Mapping[Any, Any]) -> None:
self.variable.attrs = dict(value)
@property
def encoding(self) -> dict[Any, Any]:
"""Dictionary of format-specific settings for how this array should be
serialized."""
return self.variable.encoding
@encoding.setter
def encoding(self, value: Mapping[Any, Any]) -> None:
self.variable.encoding = dict(value)
def reset_encoding(self) -> Self:
warnings.warn(
"reset_encoding is deprecated since 2023.11, use `drop_encoding` instead",
stacklevel=2,
)
return self.drop_encoding()
def drop_encoding(self) -> Self:
"""Return a new DataArray without encoding on the array or any attached
coords."""
ds = self._to_temp_dataset().drop_encoding()
return self._from_temp_dataset(ds)
@property
def indexes(self) -> Indexes:
"""Mapping of pandas.Index objects used for label based indexing.
Raises an error if this Dataset has indexes that cannot be coerced
to pandas.Index objects.
See Also
--------
DataArray.xindexes
"""
return self.xindexes.to_pandas_indexes()
@property
def xindexes(self) -> Indexes[Index]:
"""Mapping of :py:class:`~xarray.indexes.Index` objects
used for label based indexing.
"""
return Indexes(self._indexes, {k: self._coords[k] for k in self._indexes})
@property
def coords(self) -> DataArrayCoordinates:
"""Mapping of :py:class:`~xarray.DataArray` objects corresponding to
coordinate variables.
See Also
--------
Coordinates
"""
return DataArrayCoordinates(self)
@overload
def reset_coords(
self,
names: Dims = None,
*,
drop: Literal[False] = False,
) -> Dataset: ...
@overload
def reset_coords(
self,
names: Dims = None,
*,
drop: Literal[True],
) -> Self: ...
def reset_coords(
self,
names: Dims = None,
*,
drop: bool = False,
) -> Self | Dataset:
"""Given names of coordinates, reset them to become variables.
Parameters
----------
names : str, Iterable of Hashable or None, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, default: False
If True, remove coordinates instead of converting them into
variables.
Returns
-------
Dataset, or DataArray if ``drop == True``
Examples
--------
>>> temperature = np.arange(25).reshape(5, 5)
>>> pressure = np.arange(50, 75).reshape(5, 5)
>>> da = xr.DataArray(
... data=temperature,
... dims=["x", "y"],
... coords=dict(
... lon=("x", np.arange(10, 15)),
... lat=("y", np.arange(20, 25)),
... Pressure=(["x", "y"], pressure),
... ),
... name="Temperature",
... )
>>> da
<xarray.DataArray 'Temperature' (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
lon (x) int64 40B 10 11 12 13 14
lat (y) int64 40B 20 21 22 23 24
Pressure (x, y) int64 200B 50 51 52 53 54 55 56 57 ... 68 69 70 71 72 73 74
Dimensions without coordinates: x, y
Return Dataset with target coordinate as a data variable rather than a coordinate variable:
>>> da.reset_coords(names="Pressure")
<xarray.Dataset> Size: 480B
Dimensions: (x: 5, y: 5)
Coordinates:
lon (x) int64 40B 10 11 12 13 14
lat (y) int64 40B 20 21 22 23 24
Dimensions without coordinates: x, y
Data variables:
Pressure (x, y) int64 200B 50 51 52 53 54 55 56 ... 68 69 70 71 72 73 74
Temperature (x, y) int64 200B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24
Return DataArray without targeted coordinate:
>>> da.reset_coords(names="Pressure", drop=True)
<xarray.DataArray 'Temperature' (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
lon (x) int64 40B 10 11 12 13 14
lat (y) int64 40B 20 21 22 23 24
Dimensions without coordinates: x, y
"""
if names is None:
names = set(self.coords) - set(self._indexes)
dataset = self.coords.to_dataset().reset_coords(names, drop)
if drop:
return self._replace(coords=dataset._variables)
if self.name is None:
raise ValueError(
"cannot reset_coords with drop=False on an unnamed DataArray"
)
dataset[self.name] = self.variable
return dataset
def __dask_tokenize__(self) -> object:
from dask.base import normalize_token
return normalize_token((type(self), self._variable, self._coords, self._name))
def __dask_graph__(self):
return self._to_temp_dataset().__dask_graph__()
def __dask_keys__(self):
return self._to_temp_dataset().__dask_keys__()
def __dask_layers__(self):
return self._to_temp_dataset().__dask_layers__()
@property
def __dask_optimize__(self):
return self._to_temp_dataset().__dask_optimize__
@property
def __dask_scheduler__(self):
return self._to_temp_dataset().__dask_scheduler__
def __dask_postcompute__(self):
func, args = self._to_temp_dataset().__dask_postcompute__()
return self._dask_finalize, (self.name, func) + args
def __dask_postpersist__(self):
func, args = self._to_temp_dataset().__dask_postpersist__()
return self._dask_finalize, (self.name, func) + args
@classmethod
def _dask_finalize(cls, results, name, func, *args, **kwargs) -> Self:
ds = func(results, *args, **kwargs)
variable = ds._variables.pop(_THIS_ARRAY)
coords = ds._variables
indexes = ds._indexes
return cls(variable, coords, name=name, indexes=indexes, fastpath=True)
def load(self, **kwargs) -> Self:
"""Trigger loading data into memory and return this dataarray.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.compute``, the original dataarray is modified and returned.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : DataArray
Same object but with lazy data and coordinates as in-memory arrays.
See Also
--------
dask.compute
DataArray.load_async
DataArray.compute
Dataset.load
Variable.load
"""
ds = self._to_temp_dataset().load(**kwargs)
new = self._from_temp_dataset(ds)
self._variable = new._variable
self._coords = new._coords
return self
async def load_async(self, **kwargs) -> Self:
"""Trigger and await asynchronous loading of data into memory and return this dataarray.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.compute``, the original dataarray is modified and returned.
Only works when opening data lazily from IO storage backends which support lazy asynchronous loading.
Otherwise will raise a NotImplementedError.
Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : Dataarray
Same object but with lazy data and coordinates as in-memory arrays.
See Also
--------
dask.compute
DataArray.compute
DataArray.load
Dataset.load_async
Variable.load_async
"""
temp_ds = self._to_temp_dataset()
ds = await temp_ds.load_async(**kwargs)
new = self._from_temp_dataset(ds)
self._variable = new._variable
self._coords = new._coords
return self
def compute(self, **kwargs) -> Self:
"""Trigger loading data into memory and return a new dataarray.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.load``, the original dataarray is left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : DataArray
New object with the data and all coordinates as in-memory arrays.
See Also
--------
dask.compute
DataArray.load
DataArray.load_async
Dataset.compute
Variable.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def persist(self, **kwargs) -> Self:
"""Trigger computation in constituent dask arrays
This keeps them as dask arrays but encourages them to keep data in
memory. This is particularly useful when on a distributed machine.
When on a single machine consider using ``.compute()`` instead.
Like compute (but unlike load), the original dataset is left unaltered.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
Returns
-------
object : DataArray
New object with all dask-backed data and coordinates as persisted dask arrays.
See Also
--------
dask.persist
"""
ds = self._to_temp_dataset().persist(**kwargs)
return self._from_temp_dataset(ds)
def copy(self, deep: bool = True, data: Any = None) -> Self:
"""Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, and the returned data array's
values are a new view of this data array's values.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array and its coordinates are loaded into memory
and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored for all data variables,
and only used for coords.
Returns
-------
copy : DataArray
New object with dimensions, attributes, coordinates, name,
encoding, and optionally data copied from original.
Examples
--------
Shallow versus deep copy
>>> array = xr.DataArray([1, 2, 3], dims="x", coords={"x": ["a", "b", "c"]})
>>> array.copy()
<xarray.DataArray (x: 3)> Size: 24B
array([1, 2, 3])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
>>> array_0 = array.copy(deep=False)
>>> array_0[0] = 7
>>> array_0
<xarray.DataArray (x: 3)> Size: 24B
array([7, 2, 3])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)> Size: 24B
array([7, 2, 3])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> array.copy(data=[0.1, 0.2, 0.3])
<xarray.DataArray (x: 3)> Size: 24B
array([0.1, 0.2, 0.3])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)> Size: 24B
array([7, 2, 3])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
See Also
--------
pandas.DataFrame.copy
"""
return self._copy(deep=deep, data=data)
def _copy(
self,
deep: bool = True,
data: Any = None,
memo: dict[int, Any] | None = None,
) -> Self:
variable = self.variable._copy(deep=deep, data=data, memo=memo)
indexes, index_vars = self.xindexes.copy_indexes(deep=deep)
coords = {}
for k, v in self._coords.items():
if k in index_vars:
coords[k] = index_vars[k]
else:
coords[k] = v._copy(deep=deep, memo=memo)
return self._replace(variable, coords, indexes=indexes)
def __copy__(self) -> Self:
return self._copy(deep=False)
def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self:
return self._copy(deep=True, memo=memo)
# mutable objects should not be Hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore[assignment]
@property
def chunks(self) -> tuple[tuple[int, ...], ...] | None:
"""
Tuple of block lengths for this dataarray's data, in order of dimensions, or None if
the underlying data is not a dask array.
See Also
--------
DataArray.chunk
DataArray.chunksizes
xarray.unify_chunks
"""
return self.variable.chunks
@property
def chunksizes(self) -> Mapping[Any, tuple[int, ...]]:
"""
Mapping from dimension names to block lengths for this dataarray's data.
If this dataarray does not contain chunked arrays, the mapping will be empty.
Cannot be modified directly, but can be modified by calling .chunk().
Differs from DataArray.chunks because it returns a mapping of dimensions to chunk shapes
instead of a tuple of chunk shapes.
See Also
--------
DataArray.chunk
DataArray.chunks
xarray.unify_chunks
"""
all_variables = [self.variable] + [c.variable for c in self.coords.values()]
return get_chunksizes(all_variables)
def chunk(
self,
chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667)
*,
name_prefix: str = "xarray-",
token: str | None = None,
lock: bool = False,
inline_array: bool = False,
chunked_array_type: str | ChunkManagerEntrypoint | None = None,
from_array_kwargs=None,
**chunks_kwargs: T_ChunkDimFreq,
) -> Self:
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Along datetime-like dimensions, a pandas frequency string is also accepted.
Parameters
----------
chunks : int, "auto", tuple of int or mapping of hashable to int or a pandas frequency string, optional
Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or
``{"x": 5, "y": 5}`` or ``{"x": 5, "time": "YE"}``.
name_prefix : str, optional
Prefix for the name of the new dask array.
token : str, optional
Token uniquely identifying this array.
lock : bool, default: False
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
inline_array: bool, default: False
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
chunked_array_type: str, optional
Which chunked array type to coerce the underlying data array to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict, optional
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example, with dask as the default chunked array type, this method would pass additional kwargs
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
**chunks_kwargs : {dim: chunks, ...}, optional
The keyword arguments form of ``chunks``.
One of chunks or chunks_kwargs must be provided.
Returns
-------
chunked : xarray.DataArray
See Also
--------
DataArray.chunks
DataArray.chunksizes
xarray.unify_chunks
dask.array.from_array
"""
chunk_mapping: T_ChunksFreq
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
stacklevel=2,
)
chunk_mapping = {}
if isinstance(chunks, float | str | int):
# ignoring type; unclear why it won't accept a Literal into the value.
chunk_mapping = dict.fromkeys(self.dims, chunks)
elif isinstance(chunks, tuple | list):
utils.emit_user_level_warning(
"Supplying chunks as dimension-order tuples is deprecated. "
"It will raise an error in the future. Instead use a dict with dimension names as keys.",
category=DeprecationWarning,
)
if len(chunks) != len(self.dims):
raise ValueError(
f"chunks must have the same number of elements as dimensions. "
f"Expected {len(self.dims)} elements, got {len(chunks)}."
)
chunk_mapping = dict(zip(self.dims, chunks, strict=True))
else:
chunk_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk")
ds = self._to_temp_dataset().chunk(
chunk_mapping,
name_prefix=name_prefix,
token=token,
lock=lock,
inline_array=inline_array,
chunked_array_type=chunked_array_type,
from_array_kwargs=from_array_kwargs,
)
return self._from_temp_dataset(ds)
def isel(
self,
indexers: Mapping[Any, Any] | None = None,
drop: bool = False,
missing_dims: ErrorOptionsWithWarn = "raise",
**indexers_kwargs: Any,
) -> Self:
"""Return a new DataArray whose data is given by selecting indexes
along the specified dimension(s).
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, default: False
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
Returns
-------
indexed : xarray.DataArray
See Also
--------
:func:`Dataset.isel <Dataset.isel>`
:func:`DataArray.sel <DataArray.sel>`
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
Examples
--------
>>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y"))
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Dimensions without coordinates: x, y
>>> tgt_x = xr.DataArray(np.arange(0, 5), dims="points")
>>> tgt_y = xr.DataArray(np.arange(0, 5), dims="points")
>>> da = da.isel(x=tgt_x, y=tgt_y)
>>> da
<xarray.DataArray (points: 5)> Size: 40B
array([ 0, 6, 12, 18, 24])
Dimensions without coordinates: points
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
if any(is_fancy_indexer(idx) for idx in indexers.values()):
ds = self._to_temp_dataset()._isel_fancy(
indexers, drop=drop, missing_dims=missing_dims
)
return self._from_temp_dataset(ds)
# Much faster algorithm for when all indexers are ints, slices, one-dimensional
# lists, or zero or one-dimensional np.ndarray's
variable = self._variable.isel(indexers, missing_dims=missing_dims)
indexes, index_variables = isel_indexes(self.xindexes, indexers)
coords = {}
for coord_name, coord_value in self._coords.items():
if coord_name in index_variables:
coord_value = index_variables[coord_name]
else:
coord_indexers = {
k: v for k, v in indexers.items() if k in coord_value.dims
}
if coord_indexers:
coord_value = coord_value.isel(coord_indexers)
if drop and coord_value.ndim == 0:
continue
coords[coord_name] = coord_value
return self._replace(variable=variable, coords=coords, indexes=indexes)
def sel(
self,
indexers: Mapping[Any, Any] | None = None,
method: str | None = None,
tolerance=None,
drop: bool = False,
**indexers_kwargs: Any,
) -> Self:
"""Return a new DataArray whose data is given by selecting index
labels along the specified dimension(s).
In contrast to `DataArray.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
.. warning::
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
da = xr.DataArray([0, 1, 2, 3], dims=["x"])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
Assigning values with the chained indexing using ``.sel`` or
``.isel`` fails silently.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for inexact matches:
- None (default): only exact matches
- pad / ffill: propagate last valid index value forward
- backfill / bfill: propagate next valid index value backward
- nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : DataArray
A new DataArray with the same contents as this DataArray, except the
data and each dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this DataArray, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
:func:`Dataset.sel <Dataset.sel>`
:func:`DataArray.isel <DataArray.isel>`
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
Examples
--------
>>> da = xr.DataArray(
... np.arange(25).reshape(5, 5),
... coords={"x": np.arange(5), "y": np.arange(5)},
... dims=("x", "y"),
... )
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
* x (x) int64 40B 0 1 2 3 4
* y (y) int64 40B 0 1 2 3 4
>>> tgt_x = xr.DataArray(np.linspace(0, 4, num=5), dims="points")
>>> tgt_y = xr.DataArray(np.linspace(0, 4, num=5), dims="points")
>>> da = da.sel(x=tgt_x, y=tgt_y, method="nearest")
>>> da
<xarray.DataArray (points: 5)> Size: 40B
array([ 0, 6, 12, 18, 24])
Coordinates:
x (points) int64 40B 0 1 2 3 4
y (points) int64 40B 0 1 2 3 4
Dimensions without coordinates: points
"""
ds = self._to_temp_dataset().sel(
indexers=indexers,
drop=drop,
method=method,
tolerance=tolerance,
**indexers_kwargs,
)
return self._from_temp_dataset(ds)
def _shuffle(
self, dim: Hashable, *, indices: GroupIndices, chunks: T_Chunks
) -> Self:
ds = self._to_temp_dataset()._shuffle(dim=dim, indices=indices, chunks=chunks)
return self._from_temp_dataset(ds)
def head(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Return a new DataArray whose data is given by the the first `n`
values along the specified dimension(s). Default `n` = 5
See Also
--------
Dataset.head
DataArray.tail
DataArray.thin
Examples
--------
>>> da = xr.DataArray(
... np.arange(25).reshape(5, 5),
... dims=("x", "y"),
... )
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Dimensions without coordinates: x, y
>>> da.head(x=1)
<xarray.DataArray (x: 1, y: 5)> Size: 40B
array([[0, 1, 2, 3, 4]])
Dimensions without coordinates: x, y
>>> da.head({"x": 2, "y": 2})
<xarray.DataArray (x: 2, y: 2)> Size: 32B
array([[0, 1],
[5, 6]])
Dimensions without coordinates: x, y
"""
ds = self._to_temp_dataset().head(indexers, **indexers_kwargs)
return self._from_temp_dataset(ds)
def tail(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Return a new DataArray whose data is given by the the last `n`
values along the specified dimension(s). Default `n` = 5
See Also
--------
Dataset.tail
DataArray.head
DataArray.thin
Examples
--------
>>> da = xr.DataArray(
... np.arange(25).reshape(5, 5),
... dims=("x", "y"),
... )
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Dimensions without coordinates: x, y
>>> da.tail(y=1)
<xarray.DataArray (x: 5, y: 1)> Size: 40B
array([[ 4],
[ 9],
[14],
[19],
[24]])
Dimensions without coordinates: x, y
>>> da.tail({"x": 2, "y": 2})
<xarray.DataArray (x: 2, y: 2)> Size: 32B
array([[18, 19],
[23, 24]])
Dimensions without coordinates: x, y
"""
ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs)
return self._from_temp_dataset(ds)
def thin(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Return a new DataArray whose data is given by each `n` value
along the specified dimension(s).
Examples
--------
>>> x_arr = np.arange(0, 26)
>>> x_arr
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25])
>>> x = xr.DataArray(
... np.reshape(x_arr, (2, 13)),
... dims=("x", "y"),
... coords={"x": [0, 1], "y": np.arange(0, 13)},
... )
>>> x
<xarray.DataArray (x: 2, y: 13)> Size: 208B
array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]])
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12
>>>
>>> x.thin(3)
<xarray.DataArray (x: 1, y: 5)> Size: 40B
array([[ 0, 3, 6, 9, 12]])
Coordinates:
* x (x) int64 8B 0
* y (y) int64 40B 0 3 6 9 12
>>> x.thin({"x": 2, "y": 5})
<xarray.DataArray (x: 1, y: 3)> Size: 24B
array([[ 0, 5, 10]])
Coordinates:
* x (x) int64 8B 0
* y (y) int64 24B 0 5 10
See Also
--------
Dataset.thin
DataArray.head
DataArray.tail
"""
ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs)
return self._from_temp_dataset(ds)
def broadcast_like(
self,
other: T_DataArrayOrSet,
*,
exclude: Iterable[Hashable] | None = None,
) -> Self:
"""Broadcast this DataArray against another Dataset or DataArray.
This is equivalent to xr.broadcast(other, self)[1]
xarray objects are broadcast against each other in arithmetic
operations, so this method is not be necessary for most uses.
If no change is needed, the input data is returned to the output
without being copied.
If new coords are added by the broadcast, their values are
NaN filled.
Parameters
----------
other : Dataset or DataArray
Object against which to broadcast this array.
exclude : iterable of Hashable, optional
Dimensions that must not be broadcasted
Returns
-------
new_da : DataArray
The caller broadcasted against ``other``.
Examples
--------
>>> arr1 = xr.DataArray(
... np.random.randn(2, 3),
... dims=("x", "y"),
... coords={"x": ["a", "b"], "y": ["a", "b", "c"]},
... )
>>> arr2 = xr.DataArray(
... np.random.randn(3, 2),
... dims=("x", "y"),
... coords={"x": ["a", "b", "c"], "y": ["a", "b"]},
... )
>>> arr1
<xarray.DataArray (x: 2, y: 3)> Size: 48B
array([[ 1.76405235, 0.40015721, 0.97873798],
[ 2.2408932 , 1.86755799, -0.97727788]])
Coordinates:
* x (x) <U1 8B 'a' 'b'
* y (y) <U1 12B 'a' 'b' 'c'
>>> arr2
<xarray.DataArray (x: 3, y: 2)> Size: 48B
array([[ 0.95008842, -0.15135721],
[-0.10321885, 0.4105985 ],
[ 0.14404357, 1.45427351]])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
* y (y) <U1 8B 'a' 'b'
>>> arr1.broadcast_like(arr2)
<xarray.DataArray (x: 3, y: 3)> Size: 72B
array([[ 1.76405235, 0.40015721, 0.97873798],
[ 2.2408932 , 1.86755799, -0.97727788],
[ nan, nan, nan]])
Coordinates:
* x (x) <U1 12B 'a' 'b' 'c'
* y (y) <U1 12B 'a' 'b' 'c'
"""
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
args = align(other, self, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
return _broadcast_helper(args[1], exclude, dims_map, common_coords)
def _reindex_callback(
self,
aligner: alignment.Aligner,
dim_pos_indexers: dict[Hashable, Any],
variables: dict[Hashable, Variable],
indexes: dict[Hashable, Index],
fill_value: Any,
exclude_dims: frozenset[Hashable],
exclude_vars: frozenset[Hashable],
) -> Self:
"""Callback called from ``Aligner`` to create a new reindexed DataArray."""
if isinstance(fill_value, dict):
fill_value = fill_value.copy()
sentinel = object()
value = fill_value.pop(self.name, sentinel)
if value is not sentinel:
fill_value[_THIS_ARRAY] = value
ds = self._to_temp_dataset()
reindexed = ds._reindex_callback(
aligner,
dim_pos_indexers,
variables,
indexes,
fill_value,
exclude_dims,
exclude_vars,
)
da = self._from_temp_dataset(reindexed)
da.encoding = self.encoding
return da
def reindex_like(
self,
other: T_DataArrayOrSet,
*,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value=dtypes.NA,
) -> Self:
"""
Conform this object onto the indexes of another object, for indexes which the
objects share. Missing values are filled with ``fill_value``. The default fill
value is NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mismatched index values will be filled in with
NaN, and any mismatched dimension names will simply be ignored.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for filling index values from other not found on this
data array:
- None (default): don't fill gaps
- pad / ffill: propagate last valid index value forward
- backfill / bfill: propagate next valid index value backward
- nearest: use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
copy : bool, default: True
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values. Use this
data array's name to refer to the data array's values.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but coordinates from
the other object.
Examples
--------
>>> data = np.arange(12).reshape(4, 3)
>>> da1 = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]},
... )
>>> da1
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) int64 32B 10 20 30 40
* y (y) int64 24B 70 80 90
>>> da2 = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [40, 30, 20, 10], "y": [90, 80, 70]},
... )
>>> da2
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) int64 32B 40 30 20 10
* y (y) int64 24B 90 80 70
Reindexing with both DataArrays having the same coordinates set, but in different order:
>>> da1.reindex_like(da2)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[11, 10, 9],
[ 8, 7, 6],
[ 5, 4, 3],
[ 2, 1, 0]])
Coordinates:
* x (x) int64 32B 40 30 20 10
* y (y) int64 24B 90 80 70
Reindexing with the other array having additional coordinates:
>>> da3 = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [20, 10, 29, 39], "y": [70, 80, 90]},
... )
>>> da1.reindex_like(da3)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 3., 4., 5.],
[ 0., 1., 2.],
[nan, nan, nan],
[nan, nan, nan]])
Coordinates:
* x (x) int64 32B 20 10 29 39
* y (y) int64 24B 70 80 90
Filling missing values with the previous valid index with respect to the coordinates' value:
>>> da1.reindex_like(da3, method="ffill")
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[3, 4, 5],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
Coordinates:
* x (x) int64 32B 20 10 29 39
* y (y) int64 24B 70 80 90
Filling missing values while tolerating specified error for inexact matches:
>>> da1.reindex_like(da3, method="ffill", tolerance=5)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 3., 4., 5.],
[ 0., 1., 2.],
[nan, nan, nan],
[nan, nan, nan]])
Coordinates:
* x (x) int64 32B 20 10 29 39
* y (y) int64 24B 70 80 90
Filling missing values with manually specified values:
>>> da1.reindex_like(da3, fill_value=19)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 3, 4, 5],
[ 0, 1, 2],
[19, 19, 19],
[19, 19, 19]])
Coordinates:
* x (x) int64 32B 20 10 29 39
* y (y) int64 24B 70 80 90
Note that unlike ``broadcast_like``, ``reindex_like`` doesn't create new dimensions:
>>> da1.sel(x=20)
<xarray.DataArray (y: 3)> Size: 24B
array([3, 4, 5])
Coordinates:
* y (y) int64 24B 70 80 90
x int64 8B 20
...so ``b`` in not added here:
>>> da1.sel(x=20).reindex_like(da1)
<xarray.DataArray (y: 3)> Size: 24B
array([3, 4, 5])
Coordinates:
* y (y) int64 24B 70 80 90
x int64 8B 20
See Also
--------
DataArray.reindex
DataArray.broadcast_like
align
"""
return alignment.reindex_like(
self,
other=other,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
def reindex(
self,
indexers: Mapping[Any, Any] | None = None,
*,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value=dtypes.NA,
**indexers_kwargs: Any,
) -> Self:
"""Conform this object onto the indexes of another object, filling in
missing values with ``fill_value``. The default fill value is NaN.
Parameters
----------
indexers : dict, optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mismatched coordinate
values will be filled in with NaN, and any mismatched dimension
names will simply be ignored.
One of indexers or indexers_kwargs must be provided.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found on
this data array:
- None (default): don't fill gaps
- pad / ffill: propagate last valid index value forward
- backfill / bfill: propagate next valid index value backward
- nearest: use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values. Use this
data array's name to refer to the data array's values.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but replaced
coordinates.
Examples
--------
Reverse latitude:
>>> da = xr.DataArray(
... np.arange(4),
... coords=[np.array([90, 89, 88, 87])],
... dims="lat",
... )
>>> da
<xarray.DataArray (lat: 4)> Size: 32B
array([0, 1, 2, 3])
Coordinates:
* lat (lat) int64 32B 90 89 88 87
>>> da.reindex(lat=da.lat[::-1])
<xarray.DataArray (lat: 4)> Size: 32B
array([3, 2, 1, 0])
Coordinates:
* lat (lat) int64 32B 87 88 89 90
See Also
--------
DataArray.reindex_like
align
"""
indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex")
return alignment.reindex(
self,
indexers=indexers,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
def interp(
self,
coords: Mapping[Any, Any] | None = None,
method: InterpOptions = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] | None = None,
**coords_kwargs: Any,
) -> Self:
"""
Interpolate a DataArray onto new coordinates.
Performs univariate or multivariate interpolation of a Dataset onto new coordinates,
utilizing either NumPy or SciPy interpolation routines.
Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordinates, their dimensions are
used for the broadcasting. Missing values are skipped.
method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \
"quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" }
Interpolation method to use (see descriptions above).
assume_sorted : bool, default: False
If False, values of x can be in any order and they are sorted
first. If True, x has to be an array of monotonically increasing
values.
kwargs : dict-like or None, default: None
Additional keyword arguments passed to scipy's interpolator. Valid
options and their behavior depend whether ``interp1d`` or
``interpn`` is used.
**coords_kwargs : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated : DataArray
New dataarray on the new coordinates.
Notes
-----
- SciPy is required for certain interpolation methods.
- When interpolating along multiple dimensions with methods `linear` and `nearest`,
the process attempts to decompose the interpolation into independent interpolations
along one dimension at a time.
- The specific interpolation method and dimensionality determine which
interpolant is used:
1. **Interpolation along one dimension of 1D data (`method='linear'`)**
- Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`.
2. **Interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"}
use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp`
(as in the case of `method='linear'` for 1D data).
- If `method='polynomial'`, the `order` keyword argument must also be provided.
3. **Special interpolants for interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used:
- `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator`
- `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator`
- `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator`
- `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator`
(`makima` is handled by passing the `makima` flag).
4. **Interpolation along multiple dimensions of multi-dimensional data**
- Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear",
"cubic", "quintic", "pchip"}.
See Also
--------
:mod:`scipy.interpolate`
:doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions`
Tutorial material on manipulating data resolution using :py:func:`~xarray.DataArray.interp`
Examples
--------
>>> da = xr.DataArray(
... data=[[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]],
... dims=("x", "y"),
... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]},
... )
>>> da
<xarray.DataArray (x: 3, y: 4)> Size: 96B
array([[ 1., 4., 2., 9.],
[ 2., 7., 6., nan],
[ 6., nan, 5., 8.]])
Coordinates:
* x (x) int64 24B 0 1 2
* y (y) int64 32B 10 12 14 16
1D linear interpolation (the default):
>>> da.interp(x=[0, 0.75, 1.25, 1.75])
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[1. , 4. , 2. , nan],
[1.75, 6.25, 5. , nan],
[3. , nan, 5.75, nan],
[5. , nan, 5.25, nan]])
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 32B 10 12 14 16
1D nearest interpolation:
>>> da.interp(x=[0, 0.75, 1.25, 1.75], method="nearest")
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[ 1., 4., 2., 9.],
[ 2., 7., 6., nan],
[ 2., 7., 6., nan],
[ 6., nan, 5., 8.]])
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 32B 10 12 14 16
1D linear extrapolation:
>>> da.interp(
... x=[1, 1.5, 2.5, 3.5],
... method="linear",
... kwargs={"fill_value": "extrapolate"},
... )
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[ 2. , 7. , 6. , nan],
[ 4. , nan, 5.5, nan],
[ 8. , nan, 4.5, nan],
[12. , nan, 3.5, nan]])
Coordinates:
* x (x) float64 32B 1.0 1.5 2.5 3.5
* y (y) int64 32B 10 12 14 16
2D linear interpolation:
>>> da.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear")
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[2.5 , 3. , nan],
[4. , 5.625, nan],
[ nan, nan, nan],
[ nan, nan, nan]])
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 24B 11 13 15
"""
if self.dtype.kind not in "uifc":
raise TypeError(
f"interp only works for a numeric type array. Given {self.dtype}."
)
ds = self._to_temp_dataset().interp(
coords,
method=method,
kwargs=kwargs,
assume_sorted=assume_sorted,
**coords_kwargs,
)
return self._from_temp_dataset(ds)
def interp_like(
self,
other: T_Xarray,
method: InterpOptions = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] | None = None,
) -> Self:
"""Interpolate this object onto the coordinates of another object,
filling out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset. Missing values are skipped.
method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \
"quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" }
Interpolation method to use (see descriptions above).
assume_sorted : bool, default: False
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs : dict, optional
Additional keyword arguments passed to the interpolant.
Returns
-------
interpolated : DataArray
Another dataarray by interpolating this dataarray's data along the
coordinates of the other object.
Notes
-----
- scipy is required.
- If the dataarray has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
- When interpolating along multiple dimensions with methods `linear` and `nearest`,
the process attempts to decompose the interpolation into independent interpolations
along one dimension at a time.
- The specific interpolation method and dimensionality determine which
interpolant is used:
1. **Interpolation along one dimension of 1D data (`method='linear'`)**
- Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`.
2. **Interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"}
use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp`
(as in the case of `method='linear'` for 1D data).
- If `method='polynomial'`, the `order` keyword argument must also be provided.
3. **Special interpolants for interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used:
- `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator`
- `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator`
- `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator`
- `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator`
(`makima` is handled by passing the `makima` flag).
4. **Interpolation along multiple dimensions of multi-dimensional data**
- Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear",
"cubic", "quintic", "pchip"}.
See Also
--------
:func:`DataArray.interp`
:func:`DataArray.reindex_like`
:mod:`scipy.interpolate`
Examples
--------
>>> data = np.arange(12).reshape(4, 3)
>>> da1 = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]},
... )
>>> da1
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) int64 32B 10 20 30 40
* y (y) int64 24B 70 80 90
>>> da2 = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [10, 20, 29, 39], "y": [70, 80, 90]},
... )
>>> da2
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) int64 32B 10 20 29 39
* y (y) int64 24B 70 80 90
Interpolate the values in the coordinates of the other DataArray with respect to the source's values:
>>> da2.interp_like(da1)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[0. , 1. , 2. ],
[3. , 4. , 5. ],
[6.3, 7.3, 8.3],
[nan, nan, nan]])
Coordinates:
* x (x) int64 32B 10 20 30 40
* y (y) int64 24B 70 80 90
Could also extrapolate missing values:
>>> da2.interp_like(da1, kwargs={"fill_value": "extrapolate"})
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0. , 1. , 2. ],
[ 3. , 4. , 5. ],
[ 6.3, 7.3, 8.3],
[ 9.3, 10.3, 11.3]])
Coordinates:
* x (x) int64 32B 10 20 30 40
* y (y) int64 24B 70 80 90
"""
if self.dtype.kind not in "uifc":
raise TypeError(
f"interp only works for a numeric type array. Given {self.dtype}."
)
ds = self._to_temp_dataset().interp_like(
other, method=method, kwargs=kwargs, assume_sorted=assume_sorted
)
return self._from_temp_dataset(ds)
def rename(
self,
new_name_or_name_dict: Hashable | Mapping[Any, Hashable] | None = None,
**names: Hashable,
) -> Self:
"""Returns a new DataArray with renamed coordinates, dimensions or a new name.
Parameters
----------
new_name_or_name_dict : str or dict-like, optional
If the argument is dict-like, it used as a mapping from old
names to new names for coordinates or dimensions. Otherwise,
use the argument as the new name for this array.
**names : Hashable, optional
The keyword arguments form of a mapping from old names to
new names for coordinates or dimensions.
One of new_name_or_name_dict or names must be provided.
Returns
-------
renamed : DataArray
Renamed array or array with renamed coordinates.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
if new_name_or_name_dict is None and not names:
# change name to None?
return self._replace(name=None)
if utils.is_dict_like(new_name_or_name_dict) or new_name_or_name_dict is None:
# change dims/coords
name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, "rename")
dataset = self._to_temp_dataset()._rename(name_dict)
return self._from_temp_dataset(dataset)
if utils.hashable(new_name_or_name_dict) and names:
# change name + dims/coords
dataset = self._to_temp_dataset()._rename(names)
dataarray = self._from_temp_dataset(dataset)
return dataarray._replace(name=new_name_or_name_dict)
# only change name
return self._replace(name=new_name_or_name_dict)
def swap_dims(
self,
dims_dict: Mapping[Any, Hashable] | None = None,
**dims_kwargs,
) -> Self:
"""Returns a new DataArray with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names.
**dims_kwargs : {existing_dim: new_dim, ...}, optional
The keyword arguments form of ``dims_dict``.
One of dims_dict or dims_kwargs must be provided.
Returns
-------
swapped : DataArray
DataArray with swapped dimensions.
Examples
--------
>>> arr = xr.DataArray(
... data=[0, 1],
... dims="x",
... coords={"x": ["a", "b"], "y": ("x", [0, 1])},
... )
>>> arr
<xarray.DataArray (x: 2)> Size: 16B
array([0, 1])
Coordinates:
* x (x) <U1 8B 'a' 'b'
y (x) int64 16B 0 1
>>> arr.swap_dims({"x": "y"})
<xarray.DataArray (y: 2)> Size: 16B
array([0, 1])
Coordinates:
* y (y) int64 16B 0 1
x (y) <U1 8B 'a' 'b'
>>> arr.swap_dims({"x": "z"})
<xarray.DataArray (z: 2)> Size: 16B
array([0, 1])
Coordinates:
x (z) <U1 8B 'a' 'b'
y (z) int64 16B 0 1
Dimensions without coordinates: z
See Also
--------
DataArray.rename
Dataset.swap_dims
"""
dims_dict = either_dict_or_kwargs(dims_dict, dims_kwargs, "swap_dims")
ds = self._to_temp_dataset().swap_dims(dims_dict)
return self._from_temp_dataset(ds)
def expand_dims(
self,
dim: Hashable | Sequence[Hashable] | Mapping[Any, Any] | None = None,
axis: int | Sequence[int] | None = None,
create_index_for_new_dim: bool = True,
**dim_kwargs: Any,
) -> Self:
"""Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape. The new object is a
view into the underlying array, not a copy.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
The automatic creation of indexes to back new 1D coordinate variables
controlled by the create_index_for_new_dim kwarg.
Parameters
----------
dim : Hashable, sequence of Hashable, dict, or None, optional
Dimensions to include on the new variable.
If provided as str or sequence of str, then dimensions are inserted
with length 1. If provided as a dict, then the keys are the new
dimensions and the values are either integers (giving the length of
the new dimensions) or sequence/ndarray (giving the coordinates of
the new dimensions).
axis : int, sequence of int, or None, default: None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a sequence of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
create_index_for_new_dim : bool, default: True
Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``.
**dim_kwargs : int or sequence or ndarray
The keywords are arbitrary dimensions being inserted and the values
are either the lengths of the new dims (if int is given), or their
coordinates. Note, this is an alternative to passing a dict to the
dim kwarg and will only be used if dim is None.
Returns
-------
expanded : DataArray
This object, but with additional dimension(s).
See Also
--------
Dataset.expand_dims
Examples
--------
>>> da = xr.DataArray(np.arange(5), dims=("x"))
>>> da
<xarray.DataArray (x: 5)> Size: 40B
array([0, 1, 2, 3, 4])
Dimensions without coordinates: x
Add new dimension of length 2:
>>> da.expand_dims(dim={"y": 2})
<xarray.DataArray (y: 2, x: 5)> Size: 80B
array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Dimensions without coordinates: y, x
>>> da.expand_dims(dim={"y": 2}, axis=1)
<xarray.DataArray (x: 5, y: 2)> Size: 80B
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4]])
Dimensions without coordinates: x, y
Add a new dimension with coordinates from array:
>>> da.expand_dims(dim={"y": np.arange(5)}, axis=0)
<xarray.DataArray (y: 5, x: 5)> Size: 200B
array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Coordinates:
* y (y) int64 40B 0 1 2 3 4
Dimensions without coordinates: x
"""
if isinstance(dim, int):
raise TypeError("dim should be Hashable or sequence/mapping of Hashables")
elif isinstance(dim, Sequence) and not isinstance(dim, str):
if len(dim) != len(set(dim)):
raise ValueError("dims should not contain duplicate values.")
dim = dict.fromkeys(dim, 1)
elif dim is not None and not isinstance(dim, Mapping):
dim = {dim: 1}
dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims")
ds = self._to_temp_dataset().expand_dims(
dim, axis, create_index_for_new_dim=create_index_for_new_dim
)
return self._from_temp_dataset(ds)
def set_index(
self,
indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None,
append: bool = False,
**indexes_kwargs: Hashable | Sequence[Hashable],
) -> Self:
"""Set DataArray (multi-)indexes using one or more existing
coordinates.
This legacy method is limited to pandas (multi-)indexes and
1-dimensional "dimension" coordinates. See
:py:meth:`~DataArray.set_xindex` for setting a pandas or a custom
Xarray-compatible index from one or more arbitrary coordinates.
Parameters
----------
indexes : {dim: index, ...}
Mapping from names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
append : bool, default: False
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es).
**indexes_kwargs : optional
The keyword arguments form of ``indexes``.
One of indexes or indexes_kwargs must be provided.
Returns
-------
obj : DataArray
Another DataArray, with this data but replaced coordinates.
Examples
--------
>>> arr = xr.DataArray(
... data=np.ones((2, 3)),
... dims=["x", "y"],
... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])},
... )
>>> arr
<xarray.DataArray (x: 2, y: 3)> Size: 48B
array([[1., 1., 1.],
[1., 1., 1.]])
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 24B 0 1 2
a (x) int64 16B 3 4
>>> arr.set_index(x="a")
<xarray.DataArray (x: 2, y: 3)> Size: 48B
array([[1., 1., 1.],
[1., 1., 1.]])
Coordinates:
* x (x) int64 16B 3 4
* y (y) int64 24B 0 1 2
See Also
--------
DataArray.reset_index
DataArray.set_xindex
"""
ds = self._to_temp_dataset().set_index(indexes, append=append, **indexes_kwargs)
return self._from_temp_dataset(ds)
def reset_index(
self,
dims_or_levels: Hashable | Sequence[Hashable],
drop: bool = False,
) -> Self:
"""Reset the specified index(es) or multi-index level(s).
This legacy method is specific to pandas (multi-)indexes and
1-dimensional "dimension" coordinates. See the more generic
:py:meth:`~DataArray.drop_indexes` and :py:meth:`~DataArray.set_xindex`
method to respectively drop and set pandas or custom indexes for
arbitrary coordinates.
Parameters
----------
dims_or_levels : Hashable or sequence of Hashable
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, default: False
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
Returns
-------
obj : DataArray
Another dataarray, with this dataarray's data but replaced
coordinates.
See Also
--------
DataArray.set_index
DataArray.set_xindex
DataArray.drop_indexes
"""
ds = self._to_temp_dataset().reset_index(dims_or_levels, drop=drop)
return self._from_temp_dataset(ds)
def set_xindex(
self,
coord_names: str | Sequence[Hashable],
index_cls: type[Index] | None = None,
**options,
) -> Self:
"""Set a new, Xarray-compatible index from one or more existing
coordinate(s).
Parameters
----------
coord_names : str or list
Name(s) of the coordinate(s) used to build the index.
If several names are given, their order matters.
index_cls : subclass of :class:`~xarray.indexes.Index`
The type of index to create. By default, try setting
a pandas (multi-)index from the supplied coordinates.
**options
Options passed to the index constructor.
Returns
-------
obj : DataArray
Another dataarray, with this dataarray's data and with a new index.
"""
ds = self._to_temp_dataset().set_xindex(coord_names, index_cls, **options)
return self._from_temp_dataset(ds)
def reorder_levels(
self,
dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None,
**dim_order_kwargs: Sequence[int | Hashable],
) -> Self:
"""Rearrange index levels using input order.
Parameters
----------
dim_order dict-like of Hashable to int or Hashable: optional
Mapping from names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
**dim_order_kwargs : optional
The keyword arguments form of ``dim_order``.
One of dim_order or dim_order_kwargs must be provided.
Returns
-------
obj : DataArray
Another dataarray, with this dataarray's data but replaced
coordinates.
"""
ds = self._to_temp_dataset().reorder_levels(dim_order, **dim_order_kwargs)
return self._from_temp_dataset(ds)
@partial(deprecate_dims, old_name="dimensions")
def stack(
self,
dim: Mapping[Any, Sequence[Hashable]] | None = None,
create_index: bool | None = True,
index_cls: type[Index] = PandasMultiIndex,
**dim_kwargs: Sequence[Hashable | EllipsisType],
) -> Self:
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
dim : mapping of Hashable to sequence of Hashable
Mapping of the form `new_name=(dim1, dim2, ...)`.
Names of new dimensions, and the existing dimensions that they
replace. An ellipsis (`...`) will be replaced by all unlisted dimensions.
Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over
all dimensions.
create_index : bool or None, default: True
If True, create a multi-index for each of the stacked dimensions.
If False, don't create any index.
If None, create a multi-index only if exactly one single (1-d) coordinate
index is found for every dimension to stack.
index_cls: class, optional
Can be used to pass a custom multi-index type. Must be an Xarray index that
implements `.stack()`. By default, a pandas multi-index wrapper is used.
**dim_kwargs
The keyword arguments form of ``dim``.
One of dim or dim_kwargs must be provided.
Returns
-------
stacked : DataArray
DataArray with stacked data.
Examples
--------
>>> arr = xr.DataArray(
... np.arange(6).reshape(2, 3),
... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])],
... )
>>> arr
<xarray.DataArray (x: 2, y: 3)> Size: 48B
array([[0, 1, 2],
[3, 4, 5]])
Coordinates:
* x (x) <U1 8B 'a' 'b'
* y (y) int64 24B 0 1 2
>>> stacked = arr.stack(z=("x", "y"))
>>> stacked.indexes["z"]
MultiIndex([('a', 0),
('a', 1),
('a', 2),
('b', 0),
('b', 1),
('b', 2)],
name='z')
See Also
--------
DataArray.unstack
"""
ds = self._to_temp_dataset().stack(
dim,
create_index=create_index,
index_cls=index_cls,
**dim_kwargs,
)
return self._from_temp_dataset(ds)
def unstack(
self,
dim: Dims = None,
*,
fill_value: Any = dtypes.NA,
sparse: bool = False,
) -> Self:
"""
Unstack existing dimensions corresponding to MultiIndexes into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str, Iterable of Hashable or None, optional
Dimension(s) over which to unstack. By default unstacks all
MultiIndexes.
fill_value : scalar or dict-like, default: nan
Value to be filled. If a dict-like, maps variable names to
fill values. Use the data array's name to refer to its
name. If not provided or if the dict-like does not contain
all variables, the dtype's NA value will be used.
sparse : bool, default: False
Use sparse-array if True
Returns
-------
unstacked : DataArray
Array with unstacked data.
Examples
--------
>>> arr = xr.DataArray(
... np.arange(6).reshape(2, 3),
... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])],
... )
>>> arr
<xarray.DataArray (x: 2, y: 3)> Size: 48B
array([[0, 1, 2],
[3, 4, 5]])
Coordinates:
* x (x) <U1 8B 'a' 'b'
* y (y) int64 24B 0 1 2
>>> stacked = arr.stack(z=("x", "y"))
>>> stacked.indexes["z"]
MultiIndex([('a', 0),
('a', 1),
('a', 2),
('b', 0),
('b', 1),
('b', 2)],
name='z')
>>> roundtripped = stacked.unstack()
>>> arr.identical(roundtripped)
True
See Also
--------
DataArray.stack
"""
ds = self._to_temp_dataset().unstack(dim, fill_value=fill_value, sparse=sparse)
return self._from_temp_dataset(ds)
def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Dataset:
"""Unstack DataArray expanding to Dataset along a given level of a
stacked coordinate.
This is the inverse operation of Dataset.to_stacked_array.
Parameters
----------
dim : Hashable
Name of existing dimension to unstack
level : int or Hashable, default: 0
The MultiIndex level to expand to a dataset along. Can either be
the integer index of the level or its name.
Returns
-------
unstacked: Dataset
Examples
--------
>>> arr = xr.DataArray(
... np.arange(6).reshape(2, 3),
... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])],
... )
>>> data = xr.Dataset({"a": arr, "b": arr.isel(y=0)})
>>> data
<xarray.Dataset> Size: 96B
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) <U1 8B 'a' 'b'
* y (y) int64 24B 0 1 2
Data variables:
a (x, y) int64 48B 0 1 2 3 4 5
b (x) int64 16B 0 3
>>> stacked = data.to_stacked_array("z", ["x"])
>>> stacked.indexes["z"]
MultiIndex([('a', 0),
('a', 1),
('a', 2),
('b', nan)],
name='z')
>>> roundtripped = stacked.to_unstacked_dataset(dim="z")
>>> data.identical(roundtripped)
True
See Also
--------
Dataset.to_stacked_array
"""
idx = self._indexes[dim].to_pandas_index()
if not isinstance(idx, pd.MultiIndex):
raise ValueError(f"'{dim}' is not a stacked coordinate")
level_number = idx._get_level_number(level) # type: ignore[attr-defined]
variables = idx.levels[level_number]
variable_dim = idx.names[level_number]
# pull variables out of datarray
data_dict = {}
for k in variables:
data_dict[k] = self.sel({variable_dim: k}, drop=True).squeeze(drop=True)
# unstacked dataset
return Dataset(data_dict)
@deprecate_dims
def transpose(
self,
*dim: Hashable,
transpose_coords: bool = True,
missing_dims: ErrorOptionsWithWarn = "raise",
) -> Self:
"""Return a new DataArray object with transposed dimensions.
Parameters
----------
*dim : Hashable, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
transpose_coords : bool, default: True
If True, also transpose the coordinates of this DataArray.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
transposed : DataArray
The returned DataArray's array is transposed.
Notes
-----
This operation returns a view of this array's data. It is
lazy for dask-backed DataArrays but not for numpy-backed DataArrays
-- the data will be fully loaded.
See Also
--------
numpy.transpose
Dataset.transpose
"""
if dim:
dim = tuple(infix_dims(dim, self.dims, missing_dims))
variable = self.variable.transpose(*dim)
if transpose_coords:
coords: dict[Hashable, Variable] = {}
for name, coord in self.coords.items():
coord_dims = tuple(d for d in dim if d in coord.dims)
coords[name] = coord.variable.transpose(*coord_dims)
return self._replace(variable, coords)
else:
return self._replace(variable)
@property
def T(self) -> Self:
return self.transpose()
def drop_vars(
self,
names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Returns an array with dropped variables.
Parameters
----------
names : Hashable or iterable of Hashable or Callable
Name(s) of variables to drop. If a Callable, this object is passed as its
only argument and its result is used.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the variable
passed are not in the dataset. If 'ignore', any given names that are in the
DataArray are dropped and no error is raised.
Returns
-------
dropped : Dataset
New Dataset copied from `self` with variables removed.
Examples
-------
>>> data = np.arange(12).reshape(4, 3)
>>> da = xr.DataArray(
... data=data,
... dims=["x", "y"],
... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]},
... )
>>> da
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) int64 32B 10 20 30 40
* y (y) int64 24B 70 80 90
Removing a single variable:
>>> da.drop_vars("x")
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* y (y) int64 24B 70 80 90
Dimensions without coordinates: x
Removing a list of variables:
>>> da.drop_vars(["x", "y"])
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Dimensions without coordinates: x, y
>>> da.drop_vars(lambda x: x.coords)
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Dimensions without coordinates: x, y
"""
if callable(names):
names = names(self)
ds = self._to_temp_dataset().drop_vars(names, errors=errors)
return self._from_temp_dataset(ds)
def drop_indexes(
self,
coord_names: Hashable | Iterable[Hashable],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop the indexes assigned to the given coordinates.
Parameters
----------
coord_names : hashable or iterable of hashable
Name(s) of the coordinate(s) for which to drop the index.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the coordinates
passed have no index or are not in the dataset.
If 'ignore', no error is raised.
Returns
-------
dropped : DataArray
A new dataarray with dropped indexes.
"""
ds = self._to_temp_dataset().drop_indexes(coord_names, errors=errors)
return self._from_temp_dataset(ds)
def drop(
self,
labels: Mapping[Any, Any] | None = None,
dim: Hashable | None = None,
*,
errors: ErrorOptions = "raise",
**labels_kwargs,
) -> Self:
"""Backward compatible method based on `drop_vars` and `drop_sel`
Using either `drop_vars` or `drop_sel` is encouraged
See Also
--------
DataArray.drop_vars
DataArray.drop_sel
"""
ds = self._to_temp_dataset().drop(labels, dim, errors=errors, **labels_kwargs)
return self._from_temp_dataset(ds)
def drop_sel(
self,
labels: Mapping[Any, Any] | None = None,
*,
errors: ErrorOptions = "raise",
**labels_kwargs,
) -> Self:
"""Drop index labels from this DataArray.
Parameters
----------
labels : mapping of Hashable to Any
Index labels to drop
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if
any of the index labels passed are not
in the dataset. If 'ignore', any given labels that are in the
dataset are dropped and no error is raised.
**labels_kwargs : {dim: label, ...}, optional
The keyword arguments form of ``dim`` and ``labels``
Returns
-------
dropped : DataArray
Examples
--------
>>> da = xr.DataArray(
... np.arange(25).reshape(5, 5),
... coords={"x": np.arange(0, 9, 2), "y": np.arange(0, 13, 3)},
... dims=("x", "y"),
... )
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
* x (x) int64 40B 0 2 4 6 8
* y (y) int64 40B 0 3 6 9 12
>>> da.drop_sel(x=[0, 2], y=9)
<xarray.DataArray (x: 3, y: 4)> Size: 96B
array([[10, 11, 12, 14],
[15, 16, 17, 19],
[20, 21, 22, 24]])
Coordinates:
* x (x) int64 24B 4 6 8
* y (y) int64 32B 0 3 6 12
>>> da.drop_sel({"x": 6, "y": [0, 3]})
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 2, 3, 4],
[ 7, 8, 9],
[12, 13, 14],
[22, 23, 24]])
Coordinates:
* x (x) int64 32B 0 2 4 8
* y (y) int64 24B 6 9 12
"""
if labels_kwargs or isinstance(labels, dict):
labels = either_dict_or_kwargs(labels, labels_kwargs, "drop")
ds = self._to_temp_dataset().drop_sel(labels, errors=errors)
return self._from_temp_dataset(ds)
def drop_isel(
self, indexers: Mapping[Any, Any] | None = None, **indexers_kwargs
) -> Self:
"""Drop index positions from this DataArray.
Parameters
----------
indexers : mapping of Hashable to Any or None, default: None
Index locations to drop
**indexers_kwargs : {dim: position, ...}, optional
The keyword arguments form of ``dim`` and ``positions``
Returns
-------
dropped : DataArray
Raises
------
IndexError
Examples
--------
>>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("X", "Y"))
>>> da
<xarray.DataArray (X: 5, Y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Dimensions without coordinates: X, Y
>>> da.drop_isel(X=[0, 4], Y=2)
<xarray.DataArray (X: 3, Y: 4)> Size: 96B
array([[ 5, 6, 8, 9],
[10, 11, 13, 14],
[15, 16, 18, 19]])
Dimensions without coordinates: X, Y
>>> da.drop_isel({"X": 3, "Y": 3})
<xarray.DataArray (X: 4, Y: 4)> Size: 128B
array([[ 0, 1, 2, 4],
[ 5, 6, 7, 9],
[10, 11, 12, 14],
[20, 21, 22, 24]])
Dimensions without coordinates: X, Y
"""
dataset = self._to_temp_dataset()
dataset = dataset.drop_isel(indexers=indexers, **indexers_kwargs)
return self._from_temp_dataset(dataset)
def dropna(
self,
dim: Hashable,
*,
how: Literal["any", "all"] = "any",
thresh: int | None = None,
) -> Self:
"""Returns a new array with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : Hashable
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {"any", "all"}, default: "any"
- any : if any NA values are present, drop that label
- all : if all values are NA, drop that label
thresh : int or None, default: None
If supplied, require this many non-NA values.
Returns
-------
dropped : DataArray
Examples
--------
>>> temperature = [
... [0, 4, 2, 9],
... [np.nan, np.nan, np.nan, np.nan],
... [np.nan, 4, 2, 0],
... [3, 1, 0, 0],
... ]
>>> da = xr.DataArray(
... data=temperature,
... dims=["Y", "X"],
... coords=dict(
... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75])),
... lon=("X", np.array([10.0, 10.25, 10.5, 10.75])),
... ),
... )
>>> da
<xarray.DataArray (Y: 4, X: 4)> Size: 128B
array([[ 0., 4., 2., 9.],
[nan, nan, nan, nan],
[nan, 4., 2., 0.],
[ 3., 1., 0., 0.]])
Coordinates:
lat (Y) float64 32B -20.0 -20.25 -20.5 -20.75
lon (X) float64 32B 10.0 10.25 10.5 10.75
Dimensions without coordinates: Y, X
>>> da.dropna(dim="Y", how="any")
<xarray.DataArray (Y: 2, X: 4)> Size: 64B
array([[0., 4., 2., 9.],
[3., 1., 0., 0.]])
Coordinates:
lat (Y) float64 16B -20.0 -20.75
lon (X) float64 32B 10.0 10.25 10.5 10.75
Dimensions without coordinates: Y, X
Drop values only if all values along the dimension are NaN:
>>> da.dropna(dim="Y", how="all")
<xarray.DataArray (Y: 3, X: 4)> Size: 96B
array([[ 0., 4., 2., 9.],
[nan, 4., 2., 0.],
[ 3., 1., 0., 0.]])
Coordinates:
lat (Y) float64 24B -20.0 -20.5 -20.75
lon (X) float64 32B 10.0 10.25 10.5 10.75
Dimensions without coordinates: Y, X
"""
ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh)
return self._from_temp_dataset(ds)
def fillna(self, value: Any) -> Self:
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray or DataArray
Used to fill all matching missing values in this array. If the
argument is a DataArray, it is first aligned with (reindexed to)
this array.
Returns
-------
filled : DataArray
Examples
--------
>>> da = xr.DataArray(
... np.array([1, 4, np.nan, 0, 3, np.nan]),
... dims="Z",
... coords=dict(
... Z=("Z", np.arange(6)),
... height=("Z", np.array([0, 10, 20, 30, 40, 50])),
... ),
... )
>>> da
<xarray.DataArray (Z: 6)> Size: 48B
array([ 1., 4., nan, 0., 3., nan])
Coordinates:
* Z (Z) int64 48B 0 1 2 3 4 5
height (Z) int64 48B 0 10 20 30 40 50
Fill all NaN values with 0:
>>> da.fillna(0)
<xarray.DataArray (Z: 6)> Size: 48B
array([1., 4., 0., 0., 3., 0.])
Coordinates:
* Z (Z) int64 48B 0 1 2 3 4 5
height (Z) int64 48B 0 10 20 30 40 50
Fill NaN values with corresponding values in array:
>>> da.fillna(np.array([2, 9, 4, 2, 8, 9]))
<xarray.DataArray (Z: 6)> Size: 48B
array([1., 4., 4., 0., 3., 9.])
Coordinates:
* Z (Z) int64 48B 0 1 2 3 4 5
height (Z) int64 48B 0 10 20 30 40 50
"""
if utils.is_dict_like(value):
raise TypeError(
"cannot provide fill value as a dictionary with fillna on a DataArray"
)
out = ops.fillna(self, value)
return out
def interpolate_na(
self,
dim: Hashable | None = None,
method: InterpOptions = "linear",
limit: int | None = None,
use_coordinate: bool | str = True,
max_gap: (
None
| int
| float
| str
| pd.Timedelta
| np.timedelta64
| datetime.timedelta
) = None,
keep_attrs: bool | None = None,
**kwargs: Any,
) -> Self:
"""Fill in NaNs by interpolating according to different methods.
Parameters
----------
dim : Hashable or None, optional
Specifies the dimension along which to interpolate.
method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \
"barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear"
String indicating which method to use for interpolation:
- 'linear': linear interpolation. Additional keyword
arguments are passed to :py:func:`numpy.interp`
- 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial':
are passed to :py:func:`scipy.interpolate.interp1d`. If
``method='polynomial'``, the ``order`` keyword argument must also be
provided.
- 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their
respective :py:class:`scipy.interpolate` classes.
use_coordinate : bool or str, default: True
Specifies which index to use as the x values in the interpolation
formulated as `y = f(x)`. If False, values are treated as if
equally-spaced along ``dim``. If True, the IndexVariable `dim` is
used. If ``use_coordinate`` is a string, it specifies the name of a
coordinate variable to use as the index.
limit : int or None, default: None
Maximum number of consecutive NaNs to fill. Must be greater than 0
or None for no limit. This filling is done regardless of the size of
the gap in the data. To only interpolate over gaps less than a given length,
see ``max_gap``.
max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None
Maximum size of gap, a continuous sequence of NaNs, that will be filled.
Use None for no limit. When interpolating along a datetime64 dimension
and ``use_coordinate=True``, ``max_gap`` can be one of the following:
- a string that is valid input for pandas.to_timedelta
- a :py:class:`numpy.timedelta64` object
- a :py:class:`pandas.Timedelta` object
- a :py:class:`datetime.timedelta` object
Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled
dimensions has not been implemented yet. Gap length is defined as the difference
between coordinate values at the first data point after a gap and the last value
before a gap. For gaps at the beginning (end), gap length is defined as the difference
between coordinate values at the first (last) valid data point and the first (last) NaN.
For example, consider::
<xarray.DataArray (x: 9)>
array([nan, nan, nan, 1., nan, nan, 4., nan, nan])
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 7 8
The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively
keep_attrs : bool or None, default: None
If True, the dataarray's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
**kwargs : dict, optional
parameters passed verbatim to the underlying interpolation function
Returns
-------
interpolated: DataArray
Filled in DataArray.
See Also
--------
numpy.interp
scipy.interpolate
Examples
--------
>>> da = xr.DataArray(
... [np.nan, 2, 3, np.nan, 0], dims="x", coords={"x": [0, 1, 2, 3, 4]}
... )
>>> da
<xarray.DataArray (x: 5)> Size: 40B
array([nan, 2., 3., nan, 0.])
Coordinates:
* x (x) int64 40B 0 1 2 3 4
>>> da.interpolate_na(dim="x", method="linear")
<xarray.DataArray (x: 5)> Size: 40B
array([nan, 2. , 3. , 1.5, 0. ])
Coordinates:
* x (x) int64 40B 0 1 2 3 4
>>> da.interpolate_na(dim="x", method="linear", fill_value="extrapolate")
<xarray.DataArray (x: 5)> Size: 40B
array([1. , 2. , 3. , 1.5, 0. ])
Coordinates:
* x (x) int64 40B 0 1 2 3 4
"""
from xarray.core.missing import interp_na
return interp_na(
self,
dim=dim,
method=method,
limit=limit,
use_coordinate=use_coordinate,
max_gap=max_gap,
keep_attrs=keep_attrs,
**kwargs,
)
def ffill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values forward
*Requires bottleneck.*
Parameters
----------
dim : Hashable
Specifies the dimension along which to propagate values when
filling.
limit : int or None, default: None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit. Must be None or greater than or equal
to axis length if filling along chunked axes (dimensions).
Returns
-------
filled : DataArray
Examples
--------
>>> temperature = np.array(
... [
... [np.nan, 1, 3],
... [0, np.nan, 5],
... [5, np.nan, np.nan],
... [3, np.nan, np.nan],
... [0, 2, 0],
... ]
... )
>>> da = xr.DataArray(
... data=temperature,
... dims=["Y", "X"],
... coords=dict(
... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])),
... lon=("X", np.array([10.0, 10.25, 10.5])),
... ),
... )
>>> da
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[nan, 1., 3.],
[ 0., nan, 5.],
[ 5., nan, nan],
[ 3., nan, nan],
[ 0., 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
Fill all NaN values:
>>> da.ffill(dim="Y", limit=None)
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[nan, 1., 3.],
[ 0., 1., 5.],
[ 5., 1., 5.],
[ 3., 1., 5.],
[ 0., 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
Fill only the first of consecutive NaN values:
>>> da.ffill(dim="Y", limit=1)
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[nan, 1., 3.],
[ 0., 1., 5.],
[ 5., nan, 5.],
[ 3., nan, nan],
[ 0., 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
"""
from xarray.core.missing import ffill
return ffill(self, dim, limit=limit)
def bfill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int or None, default: None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit. Must be None or greater than or equal
to axis length if filling along chunked axes (dimensions).
Returns
-------
filled : DataArray
Examples
--------
>>> temperature = np.array(
... [
... [0, 1, 3],
... [0, np.nan, 5],
... [5, np.nan, np.nan],
... [3, np.nan, np.nan],
... [np.nan, 2, 0],
... ]
... )
>>> da = xr.DataArray(
... data=temperature,
... dims=["Y", "X"],
... coords=dict(
... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])),
... lon=("X", np.array([10.0, 10.25, 10.5])),
... ),
... )
>>> da
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[ 0., 1., 3.],
[ 0., nan, 5.],
[ 5., nan, nan],
[ 3., nan, nan],
[nan, 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
Fill all NaN values:
>>> da.bfill(dim="Y", limit=None)
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[ 0., 1., 3.],
[ 0., 2., 5.],
[ 5., 2., 0.],
[ 3., 2., 0.],
[nan, 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
Fill only the first of consecutive NaN values:
>>> da.bfill(dim="Y", limit=1)
<xarray.DataArray (Y: 5, X: 3)> Size: 120B
array([[ 0., 1., 3.],
[ 0., nan, 5.],
[ 5., nan, nan],
[ 3., 2., 0.],
[nan, 2., 0.]])
Coordinates:
lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0
lon (X) float64 24B 10.0 10.25 10.5
Dimensions without coordinates: Y, X
"""
from xarray.core.missing import bfill
return bfill(self, dim, limit=limit)
def combine_first(self, other: Self) -> Self:
"""Combine two DataArray objects, with union of coordinates.
This operation follows the normal broadcasting and alignment rules of
``join='outer'``. Default to non-null values of array calling the
method. Use np.nan to fill in vacant cells after alignment.
Parameters
----------
other : DataArray
Used to fill all matching missing values in this array.
Returns
-------
DataArray
"""
return ops.fillna(self, other, join="outer")
def reduce(
self,
func: Callable[..., Any],
dim: Dims = None,
*,
axis: int | Sequence[int] | None = None,
keep_attrs: bool | None = None,
keepdims: bool = False,
**kwargs: Any,
) -> Self:
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : "...", str, Iterable of Hashable or None, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
axis : int or sequence of int, optional
Axis(es) over which to repeatedly apply `func`. Only one of the
'dim' and 'axis' arguments can be supplied. If neither are
supplied, then the reduction is calculated over the flattened array
(by calling `f(x)` without an axis argument).
keep_attrs : bool or None, optional
If True (default), the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one. Coordinates that use these dimensions
are removed.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
DataArray with this object's array replaced with an array with
summarized data and the indicated dimension(s) removed.
"""
var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs)
return self._replace_maybe_drop_dims(var)
def to_pandas(self) -> Self | pd.Series | pd.DataFrame:
"""Convert this array into a pandas object with the same shape.
The type of the returned object depends on the number of DataArray
dimensions:
* 0D -> `xarray.DataArray`
* 1D -> `pandas.Series`
* 2D -> `pandas.DataFrame`
Only works for arrays with 2 or fewer dimensions.
The DataArray constructor performs the inverse transformation.
Returns
-------
result : DataArray | Series | DataFrame
DataArray, pandas Series or pandas DataFrame.
"""
# TODO: consolidate the info about pandas constructors and the
# attributes that correspond to their indexes into a separate module?
constructors: dict[int, Callable] = {
0: lambda x: x,
1: pd.Series,
2: pd.DataFrame,
}
try:
constructor = constructors[self.ndim]
except KeyError as err:
raise ValueError(
f"Cannot convert arrays with {self.ndim} dimensions into "
"pandas objects. Requires 2 or fewer dimensions."
) from err
indexes = [self.get_index(dim) for dim in self.dims]
if isinstance(self._variable._data, PandasExtensionArray):
values = self._variable._data.array
else:
values = self.values
pandas_object = constructor(values, *indexes)
if isinstance(pandas_object, pd.Series):
pandas_object.name = self.name
return pandas_object
def to_dataframe(
self, name: Hashable | None = None, dim_order: Sequence[Hashable] | None = None
) -> pd.DataFrame:
"""Convert this array and its coordinates into a tidy pandas.DataFrame.
The DataFrame is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`). Other coordinates are
included as columns in the DataFrame.
For 1D and 2D DataArrays, see also :py:func:`DataArray.to_pandas` which
doesn't rely on a MultiIndex to build the DataFrame.
Parameters
----------
name: Hashable or None, optional
Name to give to this array (required if unnamed).
dim_order: Sequence of Hashable or None, optional
Hierarchical dimension order for the resulting dataframe.
Array content is transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting
dataframe.
If provided, must include all dimensions of this DataArray. By default,
dimensions are sorted according to the DataArray dimensions order.
Returns
-------
result: DataFrame
DataArray as a pandas DataFrame.
See also
--------
DataArray.to_pandas
DataArray.to_series
"""
if name is None:
name = self.name
if name is None:
raise ValueError(
"cannot convert an unnamed DataArray to a "
"DataFrame: use the ``name`` parameter"
)
if self.ndim == 0:
raise ValueError("cannot convert a scalar to a DataFrame")
# By using a unique name, we can convert a DataArray into a DataFrame
# even if it shares a name with one of its coordinates.
# I would normally use unique_name = object() but that results in a
# dataframe with columns in the wrong order, for reasons I have not
# been able to debug (possibly a pandas bug?).
unique_name = "__unique_name_identifier_z98xfz98xugfg73ho__"
ds = self._to_dataset_whole(name=unique_name)
if dim_order is None:
ordered_dims = dict(zip(self.dims, self.shape, strict=True))
else:
ordered_dims = ds._normalize_dim_order(dim_order=dim_order)
df = ds._to_dataframe(ordered_dims)
df.columns = [name if c == unique_name else c for c in df.columns]
return df
def to_series(self) -> pd.Series:
"""Convert this array into a pandas.Series.
The Series is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`).
Returns
-------
result : Series
DataArray as a pandas Series.
See also
--------
DataArray.to_pandas
DataArray.to_dataframe
"""
index = self.coords.to_index()
return pd.Series(self.values.reshape(-1), index=index, name=self.name)
def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray:
"""Convert this array into a numpy.ma.MaskedArray
Parameters
----------
copy : bool, default: True
If True make a copy of the array in the result. If False,
a MaskedArray view of DataArray.values is returned.
Returns
-------
result : MaskedArray
Masked where invalid values (nan or inf) occur.
"""
values = self.to_numpy() # only compute lazy arrays once
isnull = pd.isnull(values)
return np.ma.MaskedArray(data=values, mask=isnull, copy=copy)
# path=None writes to bytes
@overload
def to_netcdf(
self,
path: None = None,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> memoryview: ...
# compute=False returns dask.Delayed
@overload
def to_netcdf(
self,
path: str | PathLike,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
*,
compute: Literal[False],
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> Delayed: ...
# default return None
@overload
def to_netcdf(
self,
path: str | PathLike,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: Literal[True] = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> None: ...
# if compute cannot be evaluated at type check time
# we may get back either Delayed or None
@overload
def to_netcdf(
self,
path: str | PathLike,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> Delayed | None: ...
def to_netcdf(
self,
path: str | PathLike | None = None,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> memoryview | Delayed | None:
"""Write DataArray contents to a netCDF file.
Parameters
----------
path : str, path-like, file-like or None, optional
Path to which to save this datatree, or a file-like object to write
it to (which must support read and write and be seekable) or None
(default) to return in-memory bytes as a memoryview.
mode : {"w", "a"}, default: "w"
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
"NETCDF3_CLASSIC"}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {"netcdf4", "h5netcdf", "scipy"}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, by default
preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via
``netcdf_engine_order`` in ``xarray.set_options()``).
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,
"zlib": True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py
ones ``{"compression": "gzip", "compression_opts": 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : iterable of Hashable, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding["unlimited_dims"]``.
compute: bool, default: True
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
invalid_netcdf: bool, default: False
Only valid along with ``engine="h5netcdf"``. If True, allow writing
hdf5 files which are invalid netcdf as described in
https://github.com/h5netcdf/h5netcdf.
Returns
-------
* ``memoryview`` if path is None
* ``dask.delayed.Delayed`` if compute is False
* None otherwise
Notes
-----
Only xarray.Dataset objects can be written to netCDF files, so
the xarray.DataArray is converted to a xarray.Dataset object
containing a single variable. If the DataArray has no name, or if the
name is the same as a coordinate name, then it is given the name
``"__xarray_dataarray_variable__"``.
[netCDF4 backend only] netCDF4 enums are decoded into the
dataarray dtype metadata.
See Also
--------
Dataset.to_netcdf
"""
from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
from xarray.backends.writers import to_netcdf
if self.name is None:
# If no name is set then use a generic xarray name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
elif self.name in self.coords or self.name in self.dims:
# The name is the same as one of the coords names, which netCDF
# doesn't support, so rename it but keep track of the old name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
dataset.attrs[DATAARRAY_NAME] = self.name
else:
# No problems with the name - so we're fine!
dataset = self.to_dataset()
return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:(
dataset,
path,
mode=mode,
format=format,
group=group,
engine=engine,
encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute,
multifile=False,
invalid_netcdf=invalid_netcdf,
auto_complex=auto_complex,
)
# compute=True (default) returns ZarrStore
@overload
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
*,
encoding: Mapping | None = None,
compute: Literal[True] = True,
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> ZarrStore: ...
# compute=False returns dask.Delayed
@overload
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
encoding: Mapping | None = None,
*,
compute: Literal[False],
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> Delayed: ...
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
encoding: Mapping | None = None,
*,
compute: bool = True,
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> ZarrStore | Delayed:
"""Write DataArray contents to a Zarr store
Zarr chunks are determined in the following way:
- From the ``chunks`` attribute in each variable's ``encoding``
(can be set via `DataArray.chunk`).
- If the variable is a Dask array, from the dask chunks
- If neither Dask chunks nor encoding chunks are present, chunks will
be determined automatically by Zarr
- If both Dask chunks and encoding chunks are present, encoding chunks
will be used, provided that there is a many-to-one relationship between
encoding chunks and dask chunks (i.e. Dask chunks are bigger than and
evenly divide encoding chunks); otherwise raise a ``ValueError``.
This restriction ensures that no synchronization / locks are required
when writing. To disable this restriction, use ``safe_chunks=False``.
Parameters
----------
store : zarr.storage.StoreLike, optional
Store or path to directory in local or remote file system.
chunk_store : MutableMapping, str or path-like, optional
Store or path to directory in local or remote file system only for Zarr
array chunks. Requires zarr-python v2.4.0 or later.
mode : {"w", "w-", "a", "a-", r+", None}, optional
Persistence mode: "w" means create (overwrite if exists);
"w-" means create (fail if exists);
"a" means override all existing variables including dimension coordinates (create if does not exist);
"a-" means only append those variables that have ``append_dim``.
"r+" means modify existing array *values* only (raise an error if
any metadata or shapes would change).
The default mode is "a" if ``append_dim`` is set. Otherwise, it is
"r+" if ``region`` is set and ``w-`` otherwise.
synchronizer : object, optional
Zarr array synchronizer.
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}``
compute : bool, default: True
If True write array data immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed to write
array data later. Metadata is always updated eagerly.
consolidated : bool, optional
If True, apply zarr's `consolidate_metadata` function to the store
after writing metadata and read existing stores with consolidated
metadata; if False, do not. The default (`consolidated=None`) means
write consolidated metadata and attempt to read consolidated
metadata for existing stores (falling back to non-consolidated).
When the experimental ``zarr_version=3``, ``consolidated`` must be
either be ``None`` or ``False``.
append_dim : hashable, optional
If set, the dimension along which the data will be appended. All
other dimensions on overridden variables must remain the same size.
region : dict, optional
Optional mapping from dimension names to integer slices along
dataarray dimensions to indicate the region of existing zarr array(s)
in which to write this datarray's data. For example,
``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate
that values should be written to the region ``0:1000`` along ``x``
and ``10000:11000`` along ``y``.
Two restrictions apply to the use of ``region``:
- If ``region`` is set, _all_ variables in a dataarray must have at
least one dimension in common with the region. Other variables
should be written in a separate call to ``to_zarr()``.
- Dimensions cannot be included in both ``region`` and
``append_dim`` at the same time. To create empty arrays to fill
in with ``region``, use a separate call to ``to_zarr()`` with
``compute=False``. See "Modifying existing Zarr stores" in
the reference documentation for full details.
Users are expected to ensure that the specified region aligns with
Zarr chunk boundaries, and that dask chunks are also aligned.
Xarray makes limited checks that these multiple chunk boundaries line up.
It is possible to write incomplete chunks and corrupt the data with this
option if you are not careful.
safe_chunks : bool, default: True
If True, only allow writes to when there is a many-to-one relationship
between Zarr chunks (specified in encoding) and Dask chunks.
Set False to override this restriction; however, data may become corrupted
if Zarr arrays are written in parallel. This option may be useful in combination
with ``compute=False`` to initialize a Zarr store from an existing
DataArray with arbitrary chunk structure.
In addition to the many-to-one relationship validation, it also detects partial
chunks writes when using the region parameter,
these partial chunks are considered unsafe in the mode "r+" but safe in
the mode "a".
Note: Even with these validations it can still be unsafe to write
two or more chunked arrays in the same location in parallel if they are
not writing in independent regions, for those cases it is better to use
a synchronizer.
align_chunks: bool, default False
If True, rechunks the Dask array to align with Zarr chunks before writing.
This ensures each Dask chunk maps to one or more contiguous Zarr chunks,
which avoids race conditions.
Internally, the process sets safe_chunks=False and tries to preserve
the original Dask chunking as much as possible.
Note: While this alignment avoids write conflicts stemming from chunk
boundary misalignment, it does not protect against race conditions
if multiple uncoordinated processes write to the same
Zarr array concurrently.
storage_options : dict, optional
Any additional parameters for the storage backend (ignored for local
paths).
zarr_version : int or None, optional
.. deprecated:: 2024.9.1
Use ``zarr_format`` instead.
zarr_format : int or None, optional
The desired zarr format to target (currently 2 or 3). The default
of None will attempt to determine the zarr version from ``store`` when
possible, otherwise defaulting to the default version used by
the zarr-python library installed.
write_empty_chunks : bool or None, optional
If True, all chunks will be stored regardless of their
contents. If False, each chunk is compared to the array's fill value
prior to storing. If a chunk is uniformly equal to the fill value, then
that chunk is not be stored, and the store entry for that chunk's key
is deleted. This setting enables sparser storage, as only chunks with
non-fill-value data are stored, at the expense of overhead associated
with checking the data of each chunk. If None (default) fall back to
specification(s) in ``encoding`` or Zarr defaults. A ``ValueError``
will be raised if the value of this (if not None) differs with
``encoding``.
chunkmanager_store_kwargs : dict, optional
Additional keyword arguments passed on to the `ChunkManager.store` method used to store
chunked arrays. For example for a dask array additional kwargs will be passed eventually to
:py:func:`dask.array.store()`. Experimental API that should not be relied upon.
Returns
-------
* ``dask.delayed.Delayed`` if compute is False
* ZarrStore otherwise
References
----------
https://zarr.readthedocs.io/
Notes
-----
Zarr chunking behavior:
If chunks are found in the encoding argument or attribute
corresponding to any DataArray, those chunks are used.
If a DataArray is a dask array, it is written with those chunks.
If not other chunks are found, Zarr uses its own heuristics to
choose automatic chunk sizes.
encoding:
The encoding attribute (if exists) of the DataArray(s) will be
used. Override any existing encodings by providing the ``encoding`` kwarg.
``fill_value`` handling:
There exists a subtlety in interpreting zarr's ``fill_value`` property. For zarr v2 format
arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute
in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used
to mask the data if requested using ``mask_and_scale=True``. See this `Github issue <https://github.com/pydata/xarray/issues/5475>`_
for more.
See Also
--------
Dataset.to_zarr
:ref:`io.zarr`
The I/O user guide, with more details and examples.
"""
from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
from xarray.backends.writers import to_zarr
if self.name is None:
# If no name is set then use a generic xarray name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
elif self.name in self.coords or self.name in self.dims:
# The name is the same as one of the coords names, which the netCDF data model
# does not support, so rename it but keep track of the old name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
dataset.attrs[DATAARRAY_NAME] = self.name
else:
# No problems with the name - so we're fine!
dataset = self.to_dataset()
return to_zarr( # type: ignore[call-overload,misc]
dataset,
store=store,
chunk_store=chunk_store,
mode=mode,
synchronizer=synchronizer,
group=group,
encoding=encoding,
compute=compute,
consolidated=consolidated,
append_dim=append_dim,
region=region,
safe_chunks=safe_chunks,
align_chunks=align_chunks,
storage_options=storage_options,
zarr_version=zarr_version,
zarr_format=zarr_format,
write_empty_chunks=write_empty_chunks,
chunkmanager_store_kwargs=chunkmanager_store_kwargs,
)
def to_dict(
self, data: bool | Literal["list", "array"] = "list", encoding: bool = False
) -> dict[str, Any]:
"""
Convert this xarray.DataArray into a dictionary following xarray
naming conventions.
Converts all variables and attributes to native Python objects.
Useful for converting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarray.open_dataset.
Parameters
----------
data : bool or {"list", "array"}, default: "list"
Whether to include the actual data in the dictionary. When set to
False, returns just the schema. If set to "array", returns data as
underlying array type. If set to "list" (or True for backwards
compatibility), returns data in lists of Python data types. Note
that for obtaining the "list" output efficiently, use
`da.compute().to_dict(data="list")`.
encoding : bool, default: False
Whether to include the Dataset's encoding in the dictionary.
Returns
-------
dict: dict
See Also
--------
DataArray.from_dict
Dataset.to_dict
"""
d = self.variable.to_dict(data=data)
d.update({"coords": {}, "name": self.name})
for k, coord in self.coords.items():
d["coords"][k] = coord.variable.to_dict(data=data)
if encoding:
d["encoding"] = dict(self.encoding)
return d
@classmethod
def from_dict(cls, d: Mapping[str, Any]) -> Self:
"""Convert a dictionary into an xarray.DataArray
Parameters
----------
d : dict
Mapping with a minimum structure of {"dims": [...], "data": [...]}
Returns
-------
obj : xarray.DataArray
See Also
--------
DataArray.to_dict
Dataset.from_dict
Examples
--------
>>> d = {"dims": "t", "data": [1, 2, 3]}
>>> da = xr.DataArray.from_dict(d)
>>> da
<xarray.DataArray (t: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: t
>>> d = {
... "coords": {
... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}}
... },
... "attrs": {"title": "air temperature"},
... "dims": "t",
... "data": [10, 20, 30],
... "name": "a",
... }
>>> da = xr.DataArray.from_dict(d)
>>> da
<xarray.DataArray 'a' (t: 3)> Size: 24B
array([10, 20, 30])
Coordinates:
* t (t) int64 24B 0 1 2
Attributes:
title: air temperature
"""
coords = None
if "coords" in d:
try:
coords = {
k: (v["dims"], v["data"], v.get("attrs"))
for k, v in d["coords"].items()
}
except KeyError as e:
raise ValueError(
f"cannot convert dict when coords are missing the key '{e.args[0]}'"
) from e
try:
data = d["data"]
except KeyError as err:
raise ValueError("cannot convert dict without the key 'data''") from err
else:
obj = cls(data, coords, d.get("dims"), d.get("name"), d.get("attrs"))
obj.encoding.update(d.get("encoding", {}))
return obj
@classmethod
def from_series(cls, series: pd.Series, sparse: bool = False) -> DataArray:
"""Convert a pandas.Series into an xarray.DataArray.
If the series's index is a MultiIndex, it will be expanded into a
tensor product of one-dimensional coordinates (filling in missing
values with NaN). Thus this operation should be the inverse of the
`to_series` method.
Parameters
----------
series : Series
Pandas Series object to convert.
sparse : bool, default: False
If sparse=True, creates a sparse array instead of a dense NumPy array.
Requires the pydata/sparse package.
See Also
--------
DataArray.to_series
Dataset.from_dataframe
"""
temp_name = "__temporary_name"
df = pd.DataFrame({temp_name: series})
ds = Dataset.from_dataframe(df, sparse=sparse)
result = ds[temp_name]
result.name = series.name
return result
def to_iris(self) -> iris_Cube:
"""Convert this array into a iris.cube.Cube"""
from xarray.convert import to_iris
return to_iris(self)
@classmethod
def from_iris(cls, cube: iris_Cube) -> Self:
"""Convert a iris.cube.Cube into an xarray.DataArray"""
from xarray.convert import from_iris
return from_iris(cube)
def _all_compat(self, other: Self, compat_str: str) -> bool:
"""Helper function for equals, broadcast_equals, and identical"""
def compat(x, y):
return getattr(x.variable, compat_str)(y.variable)
return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat(
self, other
)
def broadcast_equals(self, other: Self) -> bool:
"""Two DataArrays are broadcast equal if they are equal after
broadcasting them against each other such that they have the same
dimensions.
Parameters
----------
other : DataArray
DataArray to compare to.
Returns
----------
equal : bool
True if the two DataArrays are broadcast equal.
See Also
--------
DataArray.equals
DataArray.identical
Examples
--------
>>> a = xr.DataArray([1, 2], dims="X")
>>> b = xr.DataArray([[1, 1], [2, 2]], dims=["X", "Y"])
>>> a
<xarray.DataArray (X: 2)> Size: 16B
array([1, 2])
Dimensions without coordinates: X
>>> b
<xarray.DataArray (X: 2, Y: 2)> Size: 32B
array([[1, 1],
[2, 2]])
Dimensions without coordinates: X, Y
.equals returns True if two DataArrays have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two DataArrays against each other have the same values, dimensions, and coordinates.
>>> a.equals(b)
False
>>> a2, b2 = xr.broadcast(a, b)
>>> a2.equals(b2)
True
>>> a.broadcast_equals(b)
True
"""
try:
return self._all_compat(other, "broadcast_equals")
except (TypeError, AttributeError):
return False
def equals(self, other: Self) -> bool:
"""True if two DataArrays have the same dimensions, coordinates and
values; otherwise False.
DataArrays can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``DataArray``
does element-wise comparisons (like numpy.ndarrays).
Parameters
----------
other : DataArray
DataArray to compare to.
Returns
----------
equal : bool
True if the two DataArrays are equal.
See Also
--------
DataArray.broadcast_equals
DataArray.identical
Examples
--------
>>> a = xr.DataArray([1, 2, 3], dims="X")
>>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"))
>>> c = xr.DataArray([1, 2, 3], dims="Y")
>>> d = xr.DataArray([3, 2, 1], dims="X")
>>> a
<xarray.DataArray (X: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: X
>>> b
<xarray.DataArray (X: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: X
Attributes:
units: m
>>> c
<xarray.DataArray (Y: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: Y
>>> d
<xarray.DataArray (X: 3)> Size: 24B
array([3, 2, 1])
Dimensions without coordinates: X
>>> a.equals(b)
True
>>> a.equals(c)
False
>>> a.equals(d)
False
"""
try:
return self._all_compat(other, "equals")
except (TypeError, AttributeError):
return False
def identical(self, other: Self) -> bool:
"""Like equals, but also checks the array name and attributes, and
attributes on all coordinates.
Parameters
----------
other : DataArray
DataArray to compare to.
Returns
----------
equal : bool
True if the two DataArrays are identical.
See Also
--------
DataArray.broadcast_equals
DataArray.equals
Examples
--------
>>> a = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width")
>>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width")
>>> c = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="ft"), name="Width")
>>> a
<xarray.DataArray 'Width' (X: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: X
Attributes:
units: m
>>> b
<xarray.DataArray 'Width' (X: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: X
Attributes:
units: m
>>> c
<xarray.DataArray 'Width' (X: 3)> Size: 24B
array([1, 2, 3])
Dimensions without coordinates: X
Attributes:
units: ft
>>> a.equals(b)
True
>>> a.identical(b)
True
>>> a.equals(c)
True
>>> a.identical(c)
False
"""
try:
return self.name == other.name and self._all_compat(other, "identical")
except (TypeError, AttributeError):
return False
def __array_wrap__(self, obj, context=None, return_scalar=False) -> Self:
new_var = self.variable.__array_wrap__(obj, context, return_scalar)
return self._replace(new_var)
def __matmul__(self, obj: T_Xarray) -> T_Xarray:
return self.dot(obj)
def __rmatmul__(self, other: T_Xarray) -> T_Xarray:
# currently somewhat duplicative, as only other DataArrays are
# compatible with matmul
return computation.dot(other, self)
def _unary_op(self, f: Callable, *args, **kwargs) -> Self:
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
with np.errstate(all="ignore"):
da = self.__array_wrap__(f(self.variable.data, *args, **kwargs))
if keep_attrs:
da.attrs = self.attrs
return da
def _binary_op(
self, other: DaCompatible, f: Callable, reflexive: bool = False
) -> Self:
from xarray.core.datatree import DataTree
from xarray.core.groupby import GroupBy
if isinstance(other, DataTree | Dataset | GroupBy):
return NotImplemented
if isinstance(other, DataArray):
align_type = OPTIONS["arithmetic_join"]
self, other = align(self, other, join=align_type, copy=False)
other_variable_or_arraylike: DaCompatible = getattr(other, "variable", other)
other_coords = getattr(other, "coords", None)
variable = (
f(self.variable, other_variable_or_arraylike)
if not reflexive
else f(other_variable_or_arraylike, self.variable)
)
coords, indexes = self.coords._merge_raw(other_coords, reflexive)
name = result_name([self, other])
return self._replace(variable, coords, name, indexes=indexes)
def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self:
from xarray.core.groupby import GroupBy
if isinstance(other, GroupBy):
raise TypeError(
"in-place operations between a DataArray and "
"a grouped object are not permitted"
)
# n.b. we can't align other to self (with other.reindex_like(self))
# because `other` may be converted into floats, which would cause
# in-place arithmetic to fail unpredictably. Instead, we simply
# don't support automatic alignment with in-place arithmetic.
other_coords = getattr(other, "coords", None)
other_variable = getattr(other, "variable", other)
try:
with self.coords._merge_inplace(other_coords):
f(self.variable, other_variable)
except MergeError as exc:
raise MergeError(
"Automatic alignment is not supported for in-place operations.\n"
"Consider aligning the indices manually or using a not-in-place operation.\n"
"See https://github.com/pydata/xarray/issues/3910 for more explanations."
) from exc
return self
def _copy_attrs_from(self, other: DataArray | Dataset | Variable) -> None:
self.attrs = other.attrs
plot = utils.UncachedAccessor(DataArrayPlotAccessor)
def _title_for_slice(self, truncate: int = 50) -> str:
"""
If the dataarray has 1 dimensional coordinates or comes from a slice
we can show that info in the title
Parameters
----------
truncate : int, default: 50
maximum number of characters for title
Returns
-------
title : string
Can be used for plot titles
"""
one_dims = []
for dim, coord in self.coords.items():
if coord.size == 1:
one_dims.append(
f"{dim} = {format_item(coord.values)}{_get_units_from_attrs(coord)}"
)
title = ", ".join(one_dims)
if len(title) > truncate:
title = title[: (truncate - 3)] + "..."
return title
def diff(
self,
dim: Hashable,
n: int = 1,
*,
label: Literal["upper", "lower"] = "upper",
) -> Self:
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : Hashable
Dimension over which to calculate the finite difference.
n : int, default: 1
The number of times values are differenced.
label : {"upper", "lower"}, default: "upper"
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively.
Returns
-------
difference : DataArray
The n-th order finite difference of this object.
Notes
-----
`n` matches numpy's behavior and is different from pandas' first argument named
`periods`.
Examples
--------
>>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ["x"])
>>> arr.diff("x")
<xarray.DataArray (x: 3)> Size: 24B
array([0, 1, 0])
Coordinates:
* x (x) int64 24B 2 3 4
>>> arr.diff("x", 2)
<xarray.DataArray (x: 2)> Size: 16B
array([ 1, -1])
Coordinates:
* x (x) int64 16B 3 4
See Also
--------
DataArray.differentiate
"""
ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label)
return self._from_temp_dataset(ds)
def shift(
self,
shifts: Mapping[Any, int] | None = None,
fill_value: Any = dtypes.NA,
**shifts_kwargs: int,
) -> Self:
"""Shift this DataArray by an offset along one or more dimensions.
Only the data is moved; coordinates stay in place. This is consistent
with the behavior of ``shift`` in pandas.
Values shifted from beyond array bounds will appear at one end of
each dimension, which are filled according to `fill_value`. For periodic
offsets instead see `roll`.
Parameters
----------
shifts : mapping of Hashable to int or None, optional
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value : scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : DataArray
DataArray with the same coordinates and attributes but shifted
data.
See Also
--------
roll
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims="x")
>>> arr.shift(x=1)
<xarray.DataArray (x: 3)> Size: 24B
array([nan, 5., 6.])
Dimensions without coordinates: x
"""
variable = self.variable.shift(
shifts=shifts, fill_value=fill_value, **shifts_kwargs
)
return self._replace(variable=variable)
def roll(
self,
shifts: Mapping[Hashable, int] | None = None,
roll_coords: bool = False,
**shifts_kwargs: int,
) -> Self:
"""Roll this array by an offset along one or more dimensions.
Unlike shift, roll treats the given dimensions as periodic, so will not
create any missing values to be filled.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : mapping of Hashable to int, optional
Integer offset to rotate each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
roll_coords : bool, default: False
Indicates whether to roll the coordinates by the offset too.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : DataArray
DataArray with the same attributes but rolled data and coordinates.
See Also
--------
shift
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims="x")
>>> arr.roll(x=1)
<xarray.DataArray (x: 3)> Size: 24B
array([7, 5, 6])
Dimensions without coordinates: x
"""
ds = self._to_temp_dataset().roll(
shifts=shifts, roll_coords=roll_coords, **shifts_kwargs
)
return self._from_temp_dataset(ds)
@property
def real(self) -> Self:
"""
The real part of the array.
See Also
--------
numpy.ndarray.real
"""
return self._replace(self.variable.real)
@property
def imag(self) -> Self:
"""
The imaginary part of the array.
See Also
--------
numpy.ndarray.imag
"""
return self._replace(self.variable.imag)
@deprecate_dims
def dot(
self,
other: T_Xarray,
dim: Dims = None,
) -> T_Xarray:
"""Perform dot product of two DataArrays along their shared dims.
Equivalent to taking taking tensordot over all shared dims.
Parameters
----------
other : DataArray
The other array with which the dot product is performed.
dim : ..., str, Iterable of Hashable or None, optional
Which dimensions to sum over. Ellipsis (`...`) sums over all dimensions.
If not specified, then all the common dimensions are summed over.
Returns
-------
result : DataArray
Array resulting from the dot product over all shared dimensions.
See Also
--------
dot
numpy.tensordot
Examples
--------
>>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))
>>> da = xr.DataArray(da_vals, dims=["x", "y", "z"])
>>> dm_vals = np.arange(4)
>>> dm = xr.DataArray(dm_vals, dims=["z"])
>>> dm.dims
('z',)
>>> da.dims
('x', 'y', 'z')
>>> dot_result = da.dot(dm)
>>> dot_result.dims
('x', 'y')
"""
if isinstance(other, Dataset):
raise NotImplementedError(
"dot products are not yet supported with Dataset objects."
)
if not isinstance(other, DataArray):
raise TypeError("dot only operates on DataArrays.")
return computation.dot(self, other, dim=dim)
def sortby(
self,
variables: (
Hashable
| DataArray
| Sequence[Hashable | DataArray]
| Callable[[Self], Hashable | DataArray | Sequence[Hashable | DataArray]]
),
ascending: bool = True,
) -> Self:
"""Sort object by labels or values (along an axis).
Sorts the dataarray, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable
1D DataArray objects or name(s) of 1D variable(s) in coords whose values are
used to sort this array. If a callable, the callable is passed this object,
and the result is used as the value for cond.
ascending : bool, default: True
Whether to sort by ascending or descending order.
Returns
-------
sorted : DataArray
A new dataarray where all the specified dims are sorted by dim
labels.
See Also
--------
Dataset.sortby
numpy.sort
pandas.sort_values
pandas.sort_index
Examples
--------
>>> da = xr.DataArray(
... np.arange(5, 0, -1),
... coords=[pd.date_range("1/1/2000", periods=5)],
... dims="time",
... )
>>> da
<xarray.DataArray (time: 5)> Size: 40B
array([5, 4, 3, 2, 1])
Coordinates:
* time (time) datetime64[ns] 40B 2000-01-01 2000-01-02 ... 2000-01-05
>>> da.sortby(da)
<xarray.DataArray (time: 5)> Size: 40B
array([1, 2, 3, 4, 5])
Coordinates:
* time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01
>>> da.sortby(lambda x: x)
<xarray.DataArray (time: 5)> Size: 40B
array([1, 2, 3, 4, 5])
Coordinates:
* time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01
"""
# We need to convert the callable here rather than pass it through to the
# dataset method, since otherwise the dataset method would try to call the
# callable with the dataset as the object
if callable(variables):
variables = variables(self)
ds = self._to_temp_dataset().sortby(variables, ascending=ascending)
return self._from_temp_dataset(ds)
def quantile(
self,
q: ArrayLike,
dim: Dims = None,
*,
method: QuantileMethods = "linear",
keep_attrs: bool | None = None,
skipna: bool | None = None,
interpolation: QuantileMethods | None = None,
) -> Self:
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or array-like of float
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or Iterable of Hashable, optional
Dimension(s) over which to apply quantile.
method : str, default: "linear"
This optional parameter specifies the interpolation method to use when the
desired quantile lies between two data points. The options sorted by their R
type as summarized in the H&F paper [1]_ are:
1. "inverted_cdf"
2. "averaged_inverted_cdf"
3. "closest_observation"
4. "interpolated_inverted_cdf"
5. "hazen"
6. "weibull"
7. "linear" (default)
8. "median_unbiased"
9. "normal_unbiased"
The first three methods are discontiuous. The following discontinuous
variations of the default "linear" (7.) option are also available:
* "lower"
* "higher"
* "midpoint"
* "nearest"
See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
was previously called "interpolation", renamed in accordance with numpy
version 1.22.0.
keep_attrs : bool or None, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
quantiles : DataArray
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile
Examples
--------
>>> da = xr.DataArray(
... data=[[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]},
... dims=("x", "y"),
... )
>>> da.quantile(0) # or da.quantile(0, dim=...)
<xarray.DataArray ()> Size: 8B
array(0.7)
Coordinates:
quantile float64 8B 0.0
>>> da.quantile(0, dim="x")
<xarray.DataArray (y: 4)> Size: 32B
array([0.7, 4.2, 2.6, 1.5])
Coordinates:
* y (y) float64 32B 1.0 1.5 2.0 2.5
quantile float64 8B 0.0
>>> da.quantile([0, 0.5, 1])
<xarray.DataArray (quantile: 3)> Size: 24B
array([0.7, 3.4, 9.4])
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
>>> da.quantile([0, 0.5, 1], dim="x")
<xarray.DataArray (quantile: 3, y: 4)> Size: 96B
array([[0.7 , 4.2 , 2.6 , 1.5 ],
[3.6 , 5.75, 6. , 1.7 ],
[6.5 , 7.3 , 9.4 , 1.9 ]])
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
* y (y) float64 32B 1.0 1.5 2.0 2.5
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
ds = self._to_temp_dataset().quantile(
q,
dim=dim,
keep_attrs=keep_attrs,
method=method,
skipna=skipna,
interpolation=interpolation,
)
return self._from_temp_dataset(ds)
def rank(
self,
dim: Hashable,
*,
pct: bool = False,
keep_attrs: bool | None = None,
) -> Self:
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : Hashable
Dimension over which to compute rank.
pct : bool, default: False
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool or None, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : DataArray
DataArray with the same coordinates and dtype 'float64'.
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims="x")
>>> arr.rank("x")
<xarray.DataArray (x: 3)> Size: 24B
array([1., 2., 3.])
Dimensions without coordinates: x
"""
ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)
return self._from_temp_dataset(ds)
def differentiate(
self,
coord: Hashable,
edge_order: Literal[1, 2] = 1,
datetime_unit: DatetimeUnitOptions = None,
) -> Self:
"""Differentiate the array with the second order accurate central
differences.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : Hashable
The coordinate to be used to compute the gradient.
edge_order : {1, 2}, default: 1
N-th order accurate differences at the boundaries.
datetime_unit : {"W", "D", "h", "m", "s", "ms", \
"us", "ns", "ps", "fs", "as", None}, optional
Unit to compute gradient. Only valid for datetime coordinate. "Y" and "M" are not available as
datetime_unit.
Returns
-------
differentiated: DataArray
See also
--------
numpy.gradient: corresponding numpy function
Examples
--------
>>> da = xr.DataArray(
... np.arange(12).reshape(4, 3),
... dims=["x", "y"],
... coords={"x": [0, 0.1, 1.1, 1.2]},
... )
>>> da
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) float64 32B 0.0 0.1 1.1 1.2
Dimensions without coordinates: y
>>>
>>> da.differentiate("x")
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[30. , 30. , 30. ],
[27.54545455, 27.54545455, 27.54545455],
[27.54545455, 27.54545455, 27.54545455],
[30. , 30. , 30. ]])
Coordinates:
* x (x) float64 32B 0.0 0.1 1.1 1.2
Dimensions without coordinates: y
"""
ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit)
return self._from_temp_dataset(ds)
def integrate(
self,
coord: Hashable | Sequence[Hashable] = None,
datetime_unit: DatetimeUnitOptions = None,
) -> Self:
"""Integrate along the given coordinate using the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : Hashable, or sequence of Hashable
Coordinate(s) used for the integration.
datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
'ps', 'fs', 'as', None}, optional
Specify the unit if a datetime coordinate is used.
Returns
-------
integrated : DataArray
See also
--------
Dataset.integrate
numpy.trapz : corresponding numpy function
Examples
--------
>>> da = xr.DataArray(
... np.arange(12).reshape(4, 3),
... dims=["x", "y"],
... coords={"x": [0, 0.1, 1.1, 1.2]},
... )
>>> da
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) float64 32B 0.0 0.1 1.1 1.2
Dimensions without coordinates: y
>>>
>>> da.integrate("x")
<xarray.DataArray (y: 3)> Size: 24B
array([5.4, 6.6, 7.8])
Dimensions without coordinates: y
"""
ds = self._to_temp_dataset().integrate(coord, datetime_unit)
return self._from_temp_dataset(ds)
def cumulative_integrate(
self,
coord: Hashable | Sequence[Hashable] = None,
datetime_unit: DatetimeUnitOptions = None,
) -> Self:
"""Integrate cumulatively along the given coordinate using the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
The first entry of the cumulative integral is always 0, in order to keep the
length of the dimension unchanged between input and output.
Parameters
----------
coord : Hashable, or sequence of Hashable
Coordinate(s) used for the integration.
datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
'ps', 'fs', 'as', None}, optional
Specify the unit if a datetime coordinate is used.
Returns
-------
integrated : DataArray
See also
--------
Dataset.cumulative_integrate
scipy.integrate.cumulative_trapezoid : corresponding scipy function
Examples
--------
>>> da = xr.DataArray(
... np.arange(12).reshape(4, 3),
... dims=["x", "y"],
... coords={"x": [0, 0.1, 1.1, 1.2]},
... )
>>> da
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
Coordinates:
* x (x) float64 32B 0.0 0.1 1.1 1.2
Dimensions without coordinates: y
>>>
>>> da.cumulative_integrate("x")
<xarray.DataArray (x: 4, y: 3)> Size: 96B
array([[0. , 0. , 0. ],
[0.15, 0.25, 0.35],
[4.65, 5.75, 6.85],
[5.4 , 6.6 , 7.8 ]])
Coordinates:
* x (x) float64 32B 0.0 0.1 1.1 1.2
Dimensions without coordinates: y
"""
ds = self._to_temp_dataset().cumulative_integrate(coord, datetime_unit)
return self._from_temp_dataset(ds)
def unify_chunks(self) -> Self:
"""Unify chunk size along all chunked dimensions of this DataArray.
Returns
-------
DataArray with consistent chunk sizes for all dask-array variables
See Also
--------
dask.array.core.unify_chunks
"""
return unify_chunks(self)[0]
def map_blocks(
self,
func: Callable[..., T_Xarray],
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] | None = None,
template: DataArray | Dataset | None = None,
) -> T_Xarray:
"""
Apply a function to each block of this DataArray.
.. warning::
This method is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a DataArray as its first
parameter. The function will receive a subset or 'block' of this DataArray (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_dataarray, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with this object, otherwise an error is raised.
kwargs : mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray or Dataset, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like this object but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in this object is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
:func:`dask.array.map_blocks <dask.array.map_blocks>`
:func:`xarray.apply_ufunc <xarray.apply_ufunc>`
:func:`xarray.Dataset.map_blocks <xarray.Dataset.map_blocks>`
:doc:`xarray-tutorial:advanced/map_blocks/map_blocks`
Advanced Tutorial on map_blocks with dask
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True)
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> array.map_blocks(calculate_anomaly, template=array).compute()
<xarray.DataArray (time: 24)> Size: 192B
array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862,
0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714,
-0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 ,
0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
0.07673453, 0.22865714, 0.19063865, -0.0590131 ])
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> array.map_blocks(
... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array
... ) # doctest: +ELLIPSIS
<xarray.DataArray (time: 24)> Size: 192B
dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
"""
from xarray.core.parallel import map_blocks
return map_blocks(func, self, args, kwargs, template)
def polyfit(
self,
dim: Hashable,
deg: int,
skipna: bool | None = None,
rcond: float | None = None,
w: Hashable | Any | None = None,
full: bool = False,
cov: bool | Literal["unscaled"] = False,
) -> Dataset:
"""
Least squares polynomial fit.
This replicates the behaviour of `numpy.polyfit` but differs by skipping
invalid values when `skipna = True`.
Parameters
----------
dim : Hashable
Coordinate along which to fit the polynomials.
deg : int
Degree of the fitting polynomial.
skipna : bool or None, optional
If True, removes all invalid values before fitting each 1D slices of the array.
Default is True if data is stored in a dask.array or if there is any
invalid values, False otherwise.
rcond : float or None, optional
Relative condition number to the fit.
w : Hashable, array-like or None, optional
Weights to apply to the y-coordinate of the sample points.
Can be an array-like object or the name of a coordinate in the dataset.
full : bool, default: False
Whether to return the residuals, matrix rank and singular values in addition
to the coefficients.
cov : bool or "unscaled", default: False
Whether to return to the covariance matrix in addition to the coefficients.
The matrix is not scaled if `cov='unscaled'`.
Returns
-------
polyfit_results : Dataset
A single dataset which contains:
polyfit_coefficients
The coefficients of the best fit.
polyfit_residuals
The residuals of the least-square computation (only included if `full=True`).
When the matrix rank is deficient, np.nan is returned.
[dim]_matrix_rank
The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`)
[dim]_singular_value
The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`)
polyfit_covariance
The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`)
See Also
--------
numpy.polyfit
numpy.polyval
xarray.polyval
DataArray.curvefit
"""
# For DataArray, use the original implementation by converting to a dataset
return self._to_temp_dataset().polyfit(
dim, deg, skipna=skipna, rcond=rcond, w=w, full=full, cov=cov
)
def pad(
self,
pad_width: Mapping[Any, int | tuple[int, int]] | None = None,
mode: PadModeOptions = "constant",
stat_length: (
int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None
) = None,
constant_values: (
float | tuple[float, float] | Mapping[Any, tuple[float, float]] | None
) = None,
end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None,
reflect_type: PadReflectOptions = None,
keep_attrs: bool | None = None,
**pad_width_kwargs: Any,
) -> Self:
"""Pad this array along one or more dimensions.
.. warning::
This function is experimental and its behaviour is likely to change
especially regarding padding of dimension coordinates (or IndexVariables).
When using one of the modes ("edge", "reflect", "symmetric", "wrap"),
coordinates will be padded with the same mode, otherwise coordinates
are padded using the "constant" mode with fill_value dtypes.NA.
Parameters
----------
pad_width : mapping of Hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \
"minimum", "reflect", "symmetric", "wrap"}, default: "constant"
How to pad the DataArray (taken from numpy docs):
- "constant": Pads with a constant value.
- "edge": Pads with the edge values of array.
- "linear_ramp": Pads with the linear ramp between end_value and the
array edge value.
- "maximum": Pads with the maximum value of all or part of the
vector along each axis.
- "mean": Pads with the mean value of all or part of the
vector along each axis.
- "median": Pads with the median value of all or part of the
vector along each axis.
- "minimum": Pads with the minimum value of all or part of the
vector along each axis.
- "reflect": Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
- "symmetric": Pads with the reflection of the vector mirrored
along the edge of the array.
- "wrap": Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
stat_length : int, tuple or mapping of Hashable to tuple, default: None
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique
statistic lengths along each dimension.
((before, after),) yields same before and after statistic lengths
for each dimension.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : scalar, tuple or mapping of Hashable to tuple, default: 0
Used in 'constant'. The values to set the padded values for each
axis.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
pad constants along each dimension.
``((before, after),)`` yields same before and after constants for each
dimension.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all dimensions.
Default is 0.
end_values : scalar, tuple or mapping of Hashable to tuple, default: 0
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
end values along each dimension.
``((before, after),)`` yields same before and after end values for each
axis.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all axes.
Default is 0.
reflect_type : {"even", "odd", None}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
**pad_width_kwargs
The keyword arguments form of ``pad_width``.
One of ``pad_width`` or ``pad_width_kwargs`` must be provided.
Returns
-------
padded : DataArray
DataArray with the padded coordinates and data.
See Also
--------
DataArray.shift, DataArray.roll, DataArray.bfill, DataArray.ffill, numpy.pad, dask.array.pad
Notes
-----
For ``mode="constant"`` and ``constant_values=None``, integer types will be
promoted to ``float`` and padded with ``np.nan``.
Padding coordinates will drop their corresponding index (if any) and will reset default
indexes for dimension coordinates.
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], coords=[("x", [0, 1, 2])])
>>> arr.pad(x=(1, 2), constant_values=0)
<xarray.DataArray (x: 6)> Size: 48B
array([0, 5, 6, 7, 0, 0])
Coordinates:
* x (x) float64 48B nan 0.0 1.0 2.0 nan nan
>>> da = xr.DataArray(
... [[0, 1, 2, 3], [10, 11, 12, 13]],
... dims=["x", "y"],
... coords={"x": [0, 1], "y": [10, 20, 30, 40], "z": ("x", [100, 200])},
... )
>>> da.pad(x=1)
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[nan, nan, nan, nan],
[ 0., 1., 2., 3.],
[10., 11., 12., 13.],
[nan, nan, nan, nan]])
Coordinates:
* x (x) float64 32B nan 0.0 1.0 nan
* y (y) int64 32B 10 20 30 40
z (x) float64 32B nan 100.0 200.0 nan
Careful, ``constant_values`` are coerced to the data type of the array which may
lead to a loss of precision:
>>> da.pad(x=1, constant_values=1.23456789)
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[ 1, 1, 1, 1],
[ 0, 1, 2, 3],
[10, 11, 12, 13],
[ 1, 1, 1, 1]])
Coordinates:
* x (x) float64 32B nan 0.0 1.0 nan
* y (y) int64 32B 10 20 30 40
z (x) float64 32B nan 100.0 200.0 nan
"""
ds = self._to_temp_dataset().pad(
pad_width=pad_width,
mode=mode,
stat_length=stat_length,
constant_values=constant_values,
end_values=end_values,
reflect_type=reflect_type,
keep_attrs=keep_attrs,
**pad_width_kwargs,
)
return self._from_temp_dataset(ds)
def idxmin(
self,
dim: Hashable | None = None,
*,
skipna: bool | None = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
) -> Self:
"""Return the coordinate label of the minimum value along a dimension.
Returns a new `DataArray` named after the dimension with the values of
the coordinate labels along that dimension corresponding to minimum
values along that dimension.
In comparison to :py:meth:`~DataArray.argmin`, this returns the
coordinate label while :py:meth:`~DataArray.argmin` returns the index.
Parameters
----------
dim : str, optional
Dimension over which to apply `idxmin`. This is optional for 1D
arrays, but required for arrays with 2 or more dimensions.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
Returns
-------
reduced : DataArray
New `DataArray` object with `idxmin` applied to its data and the
indicated dimension removed.
See Also
--------
Dataset.idxmin, DataArray.idxmax, DataArray.min, DataArray.argmin
Examples
--------
>>> array = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array.min()
<xarray.DataArray ()> Size: 8B
array(-2)
>>> array.argmin(...)
{'x': <xarray.DataArray ()> Size: 8B
array(4)}
>>> array.idxmin()
<xarray.DataArray 'x' ()> Size: 4B
array('e', dtype='<U1')
>>> array = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.nan, 2.0, np.nan, -2.0],
... [np.nan, np.nan, 1.0, np.nan, np.nan],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2},
... )
>>> array.min(dim="x")
<xarray.DataArray (y: 3)> Size: 24B
array([-2., -4., 1.])
Coordinates:
* y (y) int64 24B -1 0 1
>>> array.argmin(dim="x")
<xarray.DataArray (y: 3)> Size: 24B
array([4, 0, 2])
Coordinates:
* y (y) int64 24B -1 0 1
>>> array.idxmin(dim="x")
<xarray.DataArray 'x' (y: 3)> Size: 24B
array([16., 0., 4.])
Coordinates:
* y (y) int64 24B -1 0 1
"""
return computation._calc_idxminmax(
array=self,
func=lambda x, *args, **kwargs: x.argmin(*args, **kwargs),
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
def idxmax(
self,
dim: Hashable = None,
*,
skipna: bool | None = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool | None = None,
) -> Self:
"""Return the coordinate label of the maximum value along a dimension.
Returns a new `DataArray` named after the dimension with the values of
the coordinate labels along that dimension corresponding to maximum
values along that dimension.
In comparison to :py:meth:`~DataArray.argmax`, this returns the
coordinate label while :py:meth:`~DataArray.argmax` returns the index.
Parameters
----------
dim : Hashable, optional
Dimension over which to apply `idxmax`. This is optional for 1D
arrays, but required for arrays with 2 or more dimensions.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
Returns
-------
reduced : DataArray
New `DataArray` object with `idxmax` applied to its data and the
indicated dimension removed.
See Also
--------
Dataset.idxmax, DataArray.idxmin, DataArray.max, DataArray.argmax
Examples
--------
>>> array = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array.max()
<xarray.DataArray ()> Size: 8B
array(2)
>>> array.argmax(...)
{'x': <xarray.DataArray ()> Size: 8B
array(1)}
>>> array.idxmax()
<xarray.DataArray 'x' ()> Size: 4B
array('b', dtype='<U1')
>>> array = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.nan, 2.0, np.nan, -2.0],
... [np.nan, np.nan, 1.0, np.nan, np.nan],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2},
... )
>>> array.max(dim="x")
<xarray.DataArray (y: 3)> Size: 24B
array([2., 2., 1.])
Coordinates:
* y (y) int64 24B -1 0 1
>>> array.argmax(dim="x")
<xarray.DataArray (y: 3)> Size: 24B
array([0, 2, 2])
Coordinates:
* y (y) int64 24B -1 0 1
>>> array.idxmax(dim="x")
<xarray.DataArray 'x' (y: 3)> Size: 24B
array([0., 4., 4.])
Coordinates:
* y (y) int64 24B -1 0 1
"""
return computation._calc_idxminmax(
array=self,
func=lambda x, *args, **kwargs: x.argmax(*args, **kwargs),
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
def argmin(
self,
dim: Dims = None,
*,
axis: int | None = None,
keep_attrs: bool | None = None,
skipna: bool | None = None,
) -> Self | dict[Hashable, Self]:
"""Index or indices of the minimum of the DataArray over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of DataArrays,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a DataArray with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : "...", str, Iterable of Hashable or None, optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int or None, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool or None, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : DataArray or dict of DataArray
See Also
--------
Variable.argmin, DataArray.idxmin
Examples
--------
>>> array = xr.DataArray([0, 2, -1, 3], dims="x")
>>> array.min()
<xarray.DataArray ()> Size: 8B
array(-1)
>>> array.argmin(...)
{'x': <xarray.DataArray ()> Size: 8B
array(2)}
>>> array.isel(array.argmin(...))
<xarray.DataArray ()> Size: 8B
array(-1)
>>> array = xr.DataArray(
... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, -5, 1], [2, 3, 1]]],
... dims=("x", "y", "z"),
... )
>>> array.min(dim="x")
<xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[ 1, 2, 1],
[ 2, -5, 1],
[ 2, 1, 1]])
Dimensions without coordinates: y, z
>>> array.argmin(dim="x")
<xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[1, 0, 0],
[1, 1, 1],
[0, 0, 1]])
Dimensions without coordinates: y, z
>>> array.argmin(dim=["x"])
{'x': <xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[1, 0, 0],
[1, 1, 1],
[0, 0, 1]])
Dimensions without coordinates: y, z}
>>> array.min(dim=("x", "z"))
<xarray.DataArray (y: 3)> Size: 24B
array([ 1, -5, 1])
Dimensions without coordinates: y
>>> array.argmin(dim=["x", "z"])
{'x': <xarray.DataArray (y: 3)> Size: 24B
array([0, 1, 0])
Dimensions without coordinates: y, 'z': <xarray.DataArray (y: 3)> Size: 24B
array([2, 1, 1])
Dimensions without coordinates: y}
>>> array.isel(array.argmin(dim=["x", "z"]))
<xarray.DataArray (y: 3)> Size: 24B
array([ 1, -5, 1])
Dimensions without coordinates: y
"""
result = self.variable.argmin(dim, axis, keep_attrs, skipna)
if isinstance(result, dict):
return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()}
else:
return self._replace_maybe_drop_dims(result)
def argmax(
self,
dim: Dims = None,
*,
axis: int | None = None,
keep_attrs: bool | None = None,
skipna: bool | None = None,
) -> Self | dict[Hashable, Self]:
"""Index or indices of the maximum of the DataArray over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of DataArrays,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a DataArray with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : "...", str, Iterable of Hashable or None, optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int or None, optional
Axis over which to apply `argmax`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool or None, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False, the new object will be
returned without attributes.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : DataArray or dict of DataArray
See Also
--------
Variable.argmax, DataArray.idxmax
Examples
--------
>>> array = xr.DataArray([0, 2, -1, 3], dims="x")
>>> array.max()
<xarray.DataArray ()> Size: 8B
array(3)
>>> array.argmax(...)
{'x': <xarray.DataArray ()> Size: 8B
array(3)}
>>> array.isel(array.argmax(...))
<xarray.DataArray ()> Size: 8B
array(3)
>>> array = xr.DataArray(
... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, 5, 1], [2, 3, 1]]],
... dims=("x", "y", "z"),
... )
>>> array.max(dim="x")
<xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[3, 3, 2],
[3, 5, 2],
[2, 3, 3]])
Dimensions without coordinates: y, z
>>> array.argmax(dim="x")
<xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[0, 1, 1],
[0, 1, 0],
[0, 1, 0]])
Dimensions without coordinates: y, z
>>> array.argmax(dim=["x"])
{'x': <xarray.DataArray (y: 3, z: 3)> Size: 72B
array([[0, 1, 1],
[0, 1, 0],
[0, 1, 0]])
Dimensions without coordinates: y, z}
>>> array.max(dim=("x", "z"))
<xarray.DataArray (y: 3)> Size: 24B
array([3, 5, 3])
Dimensions without coordinates: y
>>> array.argmax(dim=["x", "z"])
{'x': <xarray.DataArray (y: 3)> Size: 24B
array([0, 1, 0])
Dimensions without coordinates: y, 'z': <xarray.DataArray (y: 3)> Size: 24B
array([0, 1, 2])
Dimensions without coordinates: y}
>>> array.isel(array.argmax(dim=["x", "z"]))
<xarray.DataArray (y: 3)> Size: 24B
array([3, 5, 3])
Dimensions without coordinates: y
"""
result = self.variable.argmax(dim, axis, keep_attrs, skipna)
if isinstance(result, dict):
return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()}
else:
return self._replace_maybe_drop_dims(result)
def query(
self,
queries: Mapping[Any, Any] | None = None,
parser: QueryParserOptions = "pandas",
engine: QueryEngineOptions = None,
missing_dims: ErrorOptionsWithWarn = "raise",
**queries_kwargs: Any,
) -> DataArray:
"""Return a new data array indexed along the specified
dimension(s), where the indexers are given as strings containing
Python expressions to be evaluated against the values in the array.
Parameters
----------
queries : dict-like or None, optional
A dict-like with keys matching dimensions and values given by strings
containing Python expressions to be evaluated against the data variables
in the dataset. The expressions will be evaluated using the pandas
eval() function, and can contain any valid Python expressions but cannot
contain any Python statements.
parser : {"pandas", "python"}, default: "pandas"
The parser to use to construct the syntax tree from the expression.
The default of 'pandas' parses code slightly different than standard
Python. Alternatively, you can parse an expression using the 'python'
parser to retain strict Python semantics.
engine : {"python", "numexpr", None}, default: None
The engine used to evaluate the expression. Supported engines are:
- None: tries to use numexpr, falls back to python
- "numexpr": evaluates expressions using numexpr
- "python": performs operations as if you had eval’d in top level python
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**queries_kwargs : {dim: query, ...}, optional
The keyword arguments form of ``queries``.
One of queries or queries_kwargs must be provided.
Returns
-------
obj : DataArray
A new DataArray with the same contents as this dataset, indexed by
the results of the appropriate queries.
See Also
--------
DataArray.isel
Dataset.query
pandas.eval
Examples
--------
>>> da = xr.DataArray(np.arange(0, 5, 1), dims="x", name="a")
>>> da
<xarray.DataArray 'a' (x: 5)> Size: 40B
array([0, 1, 2, 3, 4])
Dimensions without coordinates: x
>>> da.query(x="a > 2")
<xarray.DataArray 'a' (x: 2)> Size: 16B
array([3, 4])
Dimensions without coordinates: x
"""
ds = self._to_dataset_whole(shallow_copy=True)
ds = ds.query(
queries=queries,
parser=parser,
engine=engine,
missing_dims=missing_dims,
**queries_kwargs,
)
return ds[self.name]
def curvefit(
self,
coords: str | DataArray | Iterable[str | DataArray],
func: Callable[..., Any],
reduce_dims: Dims = None,
skipna: bool = True,
p0: Mapping[str, float | DataArray] | None = None,
bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None,
param_names: Sequence[str] | None = None,
errors: ErrorOptions = "raise",
kwargs: dict[str, Any] | None = None,
) -> Dataset:
"""
Curve fitting optimization for arbitrary functions.
Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`.
Parameters
----------
coords : Hashable, DataArray, or sequence of DataArray or Hashable
Independent coordinate(s) over which to perform the curve fitting. Must share
at least one dimension with the calling object. When fitting multi-dimensional
functions, supply `coords` as a sequence in the same order as arguments in
`func`. To fit along existing dimensions of the calling object, `coords` can
also be specified as a str or sequence of strs.
func : callable
User specified function in the form `f(x, *params)` which returns a numpy
array of length `len(x)`. `params` are the fittable parameters which are optimized
by scipy curve_fit. `x` can also be specified as a sequence containing multiple
coordinates, e.g. `f((x0, x1), *params)`.
reduce_dims : str, Iterable of Hashable or None, optional
Additional dimension(s) over which to aggregate while fitting. For example,
calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will
aggregate all lat and lon points and fit the specified function along the
time dimension.
skipna : bool, default: True
Whether to skip missing values when fitting. Default is True.
p0 : dict-like or None, optional
Optional dictionary of parameter names to initial guesses passed to the
`curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately
broadcast to the coordinates of the array. If none or only some parameters are
passed, the rest will be assigned initial values following the default scipy
behavior.
bounds : dict-like, optional
Optional dictionary of parameter names to tuples of bounding values passed to the
`curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be
appropriately broadcast to the coordinates of the array. If none or only some
parameters are passed, the rest will be unbounded following the default scipy
behavior.
param_names : sequence of Hashable or None, optional
Sequence of names for the fittable parameters of `func`. If not supplied,
this will be automatically determined by arguments of `func`. `param_names`
should be manually supplied when fitting a function that takes a variable
number of parameters.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will
raise an exception. If 'ignore', the coefficients and covariances for the
coordinates where the fitting failed will be NaN.
**kwargs : optional
Additional keyword arguments to passed to scipy curve_fit.
Returns
-------
curvefit_results : Dataset
A single dataset which contains:
[var]_curvefit_coefficients
The coefficients of the best fit.
[var]_curvefit_covariance
The covariance matrix of the coefficient estimates.
Examples
--------
Generate some exponentially decaying data, where the decay constant and amplitude are
different for different values of the coordinate ``x``:
>>> rng = np.random.default_rng(seed=0)
>>> def exp_decay(t, time_constant, amplitude):
... return np.exp(-t / time_constant) * amplitude
...
>>> t = np.arange(11)
>>> da = xr.DataArray(
... np.stack(
... [
... exp_decay(t, 1, 0.1),
... exp_decay(t, 2, 0.2),
... exp_decay(t, 3, 0.3),
... ]
... )
... + rng.normal(size=(3, t.size)) * 0.01,
... coords={"x": [0, 1, 2], "time": t},
... )
>>> da
<xarray.DataArray (x: 3, time: 11)> Size: 264B
array([[ 0.1012573 , 0.0354669 , 0.01993775, 0.00602771, -0.00352513,
0.00428975, 0.01328788, 0.009562 , -0.00700381, -0.01264187,
-0.0062282 ],
[ 0.20041326, 0.09805582, 0.07138797, 0.03216692, 0.01974438,
0.01097441, 0.00679441, 0.01015578, 0.01408826, 0.00093645,
0.01501222],
[ 0.29334805, 0.21847449, 0.16305984, 0.11130396, 0.07164415,
0.04744543, 0.03602333, 0.03129354, 0.01074885, 0.01284436,
0.00910995]])
Coordinates:
* x (x) int64 24B 0 1 2
* time (time) int64 88B 0 1 2 3 4 5 6 7 8 9 10
Fit the exponential decay function to the data along the ``time`` dimension:
>>> fit_result = da.curvefit("time", exp_decay)
>>> fit_result["curvefit_coefficients"].sel(
... param="time_constant"
... ) # doctest: +NUMBER
<xarray.DataArray 'curvefit_coefficients' (x: 3)> Size: 24B
array([1.05692036, 1.73549638, 2.94215771])
Coordinates:
* x (x) int64 24B 0 1 2
param <U13 52B 'time_constant'
>>> fit_result["curvefit_coefficients"].sel(param="amplitude")
<xarray.DataArray 'curvefit_coefficients' (x: 3)> Size: 24B
array([0.1005489 , 0.19631423, 0.30003579])
Coordinates:
* x (x) int64 24B 0 1 2
param <U13 52B 'amplitude'
An initial guess can also be given with the ``p0`` arg (although it does not make much
of a difference in this simple example). To have a different guess for different
coordinate points, the guess can be a DataArray. Here we use the same initial guess
for the amplitude but different guesses for the time constant:
>>> fit_result = da.curvefit(
... "time",
... exp_decay,
... p0={
... "amplitude": 0.2,
... "time_constant": xr.DataArray([1, 2, 3], coords=[da.x]),
... },
... )
>>> fit_result["curvefit_coefficients"].sel(param="time_constant")
<xarray.DataArray 'curvefit_coefficients' (x: 3)> Size: 24B
array([1.0569213 , 1.73550052, 2.94215733])
Coordinates:
* x (x) int64 24B 0 1 2
param <U13 52B 'time_constant'
>>> fit_result["curvefit_coefficients"].sel(param="amplitude")
<xarray.DataArray 'curvefit_coefficients' (x: 3)> Size: 24B
array([0.10054889, 0.1963141 , 0.3000358 ])
Coordinates:
* x (x) int64 24B 0 1 2
param <U13 52B 'amplitude'
See Also
--------
DataArray.polyfit
scipy.optimize.curve_fit
xarray.DataArray.xlm.modelfit
External method from `xarray-lmfit <https://xarray-lmfit.readthedocs.io/>`_
with more curve fitting functionality.
"""
# For DataArray, use the original implementation by converting to a dataset first
return self._to_temp_dataset().curvefit(
coords,
func,
reduce_dims=reduce_dims,
skipna=skipna,
p0=p0,
bounds=bounds,
param_names=param_names,
errors=errors,
kwargs=kwargs,
)
def drop_duplicates(
self,
dim: Hashable | Iterable[Hashable],
*,
keep: Literal["first", "last", False] = "first",
) -> Self:
"""Returns a new DataArray with duplicate dimension values removed.
Parameters
----------
dim : dimension label or labels
Pass `...` to drop duplicates along all dimensions.
keep : {"first", "last", False}, default: "first"
Determines which duplicates (if any) to keep.
- ``"first"`` : Drop duplicates except for the first occurrence.
- ``"last"`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
Returns
-------
DataArray
See Also
--------
Dataset.drop_duplicates
Examples
--------
>>> da = xr.DataArray(
... np.arange(25).reshape(5, 5),
... dims=("x", "y"),
... coords={"x": np.array([0, 0, 1, 2, 3]), "y": np.array([0, 1, 2, 3, 3])},
... )
>>> da
<xarray.DataArray (x: 5, y: 5)> Size: 200B
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
* x (x) int64 40B 0 0 1 2 3
* y (y) int64 40B 0 1 2 3 3
>>> da.drop_duplicates(dim="x")
<xarray.DataArray (x: 4, y: 5)> Size: 160B
array([[ 0, 1, 2, 3, 4],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
* x (x) int64 32B 0 1 2 3
* y (y) int64 40B 0 1 2 3 3
>>> da.drop_duplicates(dim="x", keep="last")
<xarray.DataArray (x: 4, y: 5)> Size: 160B
array([[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
Coordinates:
* x (x) int64 32B 0 1 2 3
* y (y) int64 40B 0 1 2 3 3
Drop all duplicate dimension values:
>>> da.drop_duplicates(dim=...)
<xarray.DataArray (x: 4, y: 4)> Size: 128B
array([[ 0, 1, 2, 3],
[10, 11, 12, 13],
[15, 16, 17, 18],
[20, 21, 22, 23]])
Coordinates:
* x (x) int64 32B 0 1 2 3
* y (y) int64 32B 0 1 2 3
"""
deduplicated = self._to_temp_dataset().drop_duplicates(dim, keep=keep)
return self._from_temp_dataset(deduplicated)
def convert_calendar(
self,
calendar: str,
dim: str = "time",
align_on: str | None = None,
missing: Any | None = None,
use_cftime: bool | None = None,
) -> Self:
"""Convert the DataArray to another calendar.
Only converts the individual timestamps, does not modify any data except
in dropping invalid/surplus dates or inserting missing dates.
If the source and target calendars are either no_leap, all_leap or a
standard type, only the type of the time array is modified.
When converting to a leap year from a non-leap year, the 29th of February
is removed from the array. In the other direction the 29th of February
will be missing in the output, unless `missing` is specified,
in which case that value is inserted.
For conversions involving `360_day` calendars, see Notes.
This method is safe to use with sub-daily data as it doesn't touch the
time part of the timestamps.
Parameters
---------
calendar : str
The target calendar name.
dim : str
Name of the time coordinate.
align_on : {None, 'date', 'year'}
Must be specified when either source or target is a `360_day` calendar,
ignored otherwise. See Notes.
missing : Optional[any]
By default, i.e. if the value is None, this method will simply attempt
to convert the dates in the source calendar to the same dates in the
target calendar, and drop any of those that are not possible to
represent. If a value is provided, a new time coordinate will be
created in the target calendar with the same frequency as the original
time coordinate; for any dates that are not present in the source, the
data will be filled with this value. Note that using this mode requires
that the source data have an inferable frequency; for more information
see :py:func:`xarray.infer_freq`. For certain frequency, source, and
target calendar combinations, this could result in many missing values, see notes.
use_cftime : boolean, optional
Whether to use cftime objects in the output, only used if `calendar`
is one of {"proleptic_gregorian", "gregorian" or "standard"}.
If True, the new time axis uses cftime objects.
If None (default), it uses :py:class:`numpy.datetime64` values if the
date range permits it, and :py:class:`cftime.datetime` objects if not.
If False, it uses :py:class:`numpy.datetime64` or fails.
Returns
-------
DataArray
Copy of the dataarray with the time coordinate converted to the
target calendar. If 'missing' was None (default), invalid dates in
the new calendar are dropped, but missing dates are not inserted.
If `missing` was given, the new data is reindexed to have a time axis
with the same frequency as the source, but in the new calendar; any
missing datapoints are filled with `missing`.
Notes
-----
Passing a value to `missing` is only usable if the source's time coordinate as an
inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
if the target coordinate, generated from this frequency, has dates equivalent to the
source. It is usually **not** appropriate to use this mode with:
- Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
- Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1`
or 'mH' where 24 % m != 0).
If one of the source or target calendars is `"360_day"`, `align_on` must
be specified and two options are offered.
- "year"
The dates are translated according to their relative position in the year,
ignoring their original month and day information, meaning that the
missing/surplus days are added/removed at regular intervals.
From a `360_day` to a standard calendar, the output will be missing the
following dates (day of year in parentheses):
To a leap year:
January 31st (31), March 31st (91), June 1st (153), July 31st (213),
September 31st (275) and November 30th (335).
To a non-leap year:
February 6th (36), April 19th (109), July 2nd (183),
September 12th (255), November 25th (329).
From a standard calendar to a `"360_day"`, the following dates in the
source array will be dropped:
From a leap year:
January 31st (31), April 1st (92), June 1st (153), August 1st (214),
September 31st (275), December 1st (336)
From a non-leap year:
February 6th (37), April 20th (110), July 2nd (183),
September 13th (256), November 25th (329)
This option is best used on daily and subdaily data.
- "date"
The month/day information is conserved and invalid dates are dropped
from the output. This means that when converting from a `"360_day"` to a
standard calendar, all 31st (Jan, March, May, July, August, October and
December) will be missing as there is no equivalent dates in the
`"360_day"` calendar and the 29th (on non-leap years) and 30th of February
will be dropped as there are no equivalent dates in a standard calendar.
This option is best used with data on a frequency coarser than daily.
"""
return convert_calendar(
self,
calendar,
dim=dim,
align_on=align_on,
missing=missing,
use_cftime=use_cftime,
)
def interp_calendar(
self,
target: pd.DatetimeIndex | CFTimeIndex | DataArray,
dim: str = "time",
) -> Self:
"""Interpolates the DataArray to another calendar based on decimal year measure.
Each timestamp in `source` and `target` are first converted to their decimal
year equivalent then `source` is interpolated on the target coordinate.
The decimal year of a timestamp is its year plus its sub-year component
converted to the fraction of its year. For example "2000-03-01 12:00" is
2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar.
This method should only be used when the time (HH:MM:SS) information of
time coordinate is not important.
Parameters
----------
target: DataArray or DatetimeIndex or CFTimeIndex
The target time coordinate of a valid dtype
(np.datetime64 or cftime objects)
dim : str
The time coordinate name.
Return
------
DataArray
The source interpolated on the decimal years of target,
"""
return interp_calendar(self, target, dim=dim)
@_deprecate_positional_args("v2024.07.0")
def groupby(
self,
group: GroupInput = None,
*,
squeeze: Literal[False] = False,
restore_coord_dims: bool = False,
eagerly_compute_group: Literal[False] | None = None,
**groupers: Grouper,
) -> DataArrayGroupBy:
"""Returns a DataArrayGroupBy object for performing grouped operations.
Parameters
----------
group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper
Array whose unique values should be used to group this array. If a
Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary,
must map an existing variable name to a :py:class:`Grouper` instance.
squeeze : False
This argument is deprecated.
restore_coord_dims : bool, default: False
If True, also restore the dimension order of multi-dimensional
coordinates.
eagerly_compute_group: bool, optional
This argument is deprecated.
**groupers : Mapping of str to Grouper or Resampler
Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object.
One of ``group`` or ``groupers`` must be provided.
Only a single ``grouper`` is allowed at present.
Returns
-------
grouped : DataArrayGroupBy
A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
Examples
--------
Calculate daily anomalies for daily data:
>>> da = xr.DataArray(
... np.linspace(0, 1826, num=1827),
... coords=[pd.date_range("2000-01-01", "2004-12-31", freq="D")],
... dims="time",
... )
>>> da
<xarray.DataArray (time: 1827)> Size: 15kB
array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03,
1.826e+03], shape=(1827,))
Coordinates:
* time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31
>>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time")
<xarray.DataArray (time: 1827)> Size: 15kB
array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5], shape=(1827,))
Coordinates:
* time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31
dayofyear (time) int64 15kB 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366
Use a ``Grouper`` object to be more explicit
>>> da.coords["dayofyear"] = da.time.dt.dayofyear
>>> da.groupby(dayofyear=xr.groupers.UniqueGrouper()).mean()
<xarray.DataArray (dayofyear: 366)> Size: 3kB
array([ 730.8, 731.8, 732.8, ..., 1093.8, 1094.8, 1095.5])
Coordinates:
* dayofyear (dayofyear) int64 3kB 1 2 3 4 5 6 7 ... 361 362 363 364 365 366
>>> da = xr.DataArray(
... data=np.arange(12).reshape((4, 3)),
... dims=("x", "y"),
... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))},
... )
Grouping by a single variable is easy
>>> da.groupby("letters")
<DataArrayGroupBy, grouped over 1 grouper(s), 2 groups in total:
'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'>
Execute a reduction
>>> da.groupby("letters").sum()
<xarray.DataArray (letters: 2, y: 3)> Size: 48B
array([[ 9, 11, 13],
[ 9, 11, 13]])
Coordinates:
* letters (letters) object 16B 'a' 'b'
Dimensions without coordinates: y
Grouping by multiple variables
>>> da.groupby(["letters", "x"])
<DataArrayGroupBy, grouped over 2 grouper(s), 8 groups in total:
'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'
'x': UniqueGrouper('x'), 4/4 groups with labels 10, 20, 30, 40>
Use Grouper objects to express more complicated GroupBy operations
>>> from xarray.groupers import BinGrouper, UniqueGrouper
>>>
>>> da.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum()
<xarray.DataArray (x_bins: 2, letters: 2, y: 3)> Size: 96B
array([[[ 0., 1., 2.],
[nan, nan, nan]],
<BLANKLINE>
[[nan, nan, nan],
[ 3., 4., 5.]]])
Coordinates:
* x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25]
* letters (letters) object 16B 'a' 'b'
Dimensions without coordinates: y
See Also
--------
:ref:`groupby`
Users guide explanation of how to group and bin data.
:doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns`
Tutorial on :py:func:`~xarray.DataArray.Groupby` for windowed computation
:doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray`
Tutorial on :py:func:`~xarray.DataArray.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.DataArray.resample`
:external:py:meth:`pandas.DataFrame.groupby <pandas.DataFrame.groupby>`
:func:`DataArray.groupby_bins <DataArray.groupby_bins>`
:func:`Dataset.groupby <Dataset.groupby>`
:func:`core.groupby.DataArrayGroupBy <core.groupby.DataArrayGroupBy>`
:func:`DataArray.coarsen <DataArray.coarsen>`
:func:`Dataset.resample <Dataset.resample>`
:func:`DataArray.resample <DataArray.resample>`
"""
from xarray.core.groupby import (
DataArrayGroupBy,
_parse_group_and_groupers,
_validate_groupby_squeeze,
)
_validate_groupby_squeeze(squeeze)
rgroupers = _parse_group_and_groupers(
self, group, groupers, eagerly_compute_group=eagerly_compute_group
)
return DataArrayGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims)
@_deprecate_positional_args("v2024.07.0")
def groupby_bins(
self,
group: Hashable | DataArray | IndexVariable,
bins: Bins,
right: bool = True,
labels: ArrayLike | Literal[False] | None = None,
precision: int = 3,
include_lowest: bool = False,
squeeze: Literal[False] = False,
restore_coord_dims: bool = False,
duplicates: Literal["raise", "drop"] = "raise",
eagerly_compute_group: Literal[False] | None = None,
) -> DataArrayGroupBy:
"""Returns a DataArrayGroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : Hashable, DataArray or IndexVariable
Array whose binned values should be used to group this array. If a
Hashable, must be the name of a coordinate contained in this dataarray.
bins : int or array-like
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : bool, default: True
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array-like, False or None, default: None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int, default: 3
The precision at which to store and display the bins labels.
include_lowest : bool, default: False
Whether the first interval should be left-inclusive or not.
squeeze : False
This argument is deprecated.
restore_coord_dims : bool, default: False
If True, also restore the dimension order of multi-dimensional
coordinates.
duplicates : {"raise", "drop"}, default: "raise"
If bin edges are not unique, raise ValueError or drop non-uniques.
eagerly_compute_group: bool, optional
This argument is deprecated.
Returns
-------
grouped : DataArrayGroupBy
A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
See Also
--------
:ref:`groupby`
Users guide explanation of how to group and bin data.
DataArray.groupby
Dataset.groupby_bins
core.groupby.DataArrayGroupBy
pandas.DataFrame.groupby
References
----------
.. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html
"""
from xarray.core.groupby import (
DataArrayGroupBy,
ResolvedGrouper,
_validate_groupby_squeeze,
)
from xarray.groupers import BinGrouper
_validate_groupby_squeeze(squeeze)
grouper = BinGrouper(
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
)
rgrouper = ResolvedGrouper(
grouper, group, self, eagerly_compute_group=eagerly_compute_group
)
return DataArrayGroupBy(
self,
(rgrouper,),
restore_coord_dims=restore_coord_dims,
)
def weighted(self, weights: DataArray) -> DataArrayWeighted:
"""
Weighted DataArray operations.
Parameters
----------
weights : DataArray
An array of weights associated with the values in this Dataset.
Each value in the data contributes to the reduction operation
according to its associated weight.
Notes
-----
``weights`` must be a DataArray and cannot contain missing values.
Missing values can be replaced by ``weights.fillna(0)``.
Returns
-------
computation.weighted.DataArrayWeighted
See Also
--------
:func:`Dataset.weighted <Dataset.weighted>`
:ref:`compute.weighted`
User guide on weighted array reduction using :py:func:`~xarray.DataArray.weighted`
:doc:`xarray-tutorial:fundamentals/03.4_weighted`
Tutorial on Weighted Reduction using :py:func:`~xarray.DataArray.weighted`
"""
from xarray.computation.weighted import DataArrayWeighted
return DataArrayWeighted(self, weights)
def rolling(
self,
dim: Mapping[Any, int] | None = None,
min_periods: int | None = None,
center: bool | Mapping[Any, bool] = False,
**window_kwargs: int,
) -> DataArrayRolling:
"""
Rolling window object for DataArrays.
Parameters
----------
dim : dict, optional
Mapping from the dimension name to create the rolling iterator
along (e.g. `time`) to its moving window size.
min_periods : int or None, default: None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : bool or Mapping to int, default: False
Set the labels at the center of the window. The default, False,
sets the labels at the right edge of the window.
**window_kwargs : optional
The keyword arguments form of ``dim``.
One of dim or window_kwargs must be provided.
Returns
-------
computation.rolling.DataArrayRolling
Examples
--------
Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:
>>> da = xr.DataArray(
... np.linspace(0, 11, num=12),
... coords=[
... pd.date_range(
... "1999-12-15",
... periods=12,
... freq=pd.DateOffset(months=1),
... )
... ],
... dims="time",
... )
>>> da
<xarray.DataArray (time: 12)> Size: 96B
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15
>>> da.rolling(time=3, center=True).mean()
<xarray.DataArray (time: 12)> Size: 96B
array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])
Coordinates:
* time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15
Remove the NaNs using ``dropna()``:
>>> da.rolling(time=3, center=True).mean().dropna("time")
<xarray.DataArray (time: 10)> Size: 80B
array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
Coordinates:
* time (time) datetime64[ns] 80B 2000-01-15 2000-02-15 ... 2000-10-15
See Also
--------
DataArray.cumulative
Dataset.rolling
computation.rolling.DataArrayRolling
"""
from xarray.computation.rolling import DataArrayRolling
dim = either_dict_or_kwargs(dim, window_kwargs, "rolling")
return DataArrayRolling(self, dim, min_periods=min_periods, center=center)
def cumulative(
self,
dim: str | Iterable[Hashable],
min_periods: int = 1,
) -> DataArrayRolling:
"""
Accumulating object for DataArrays.
Parameters
----------
dims : iterable of hashable
The name(s) of the dimensions to create the cumulative window along
min_periods : int, default: 1
Minimum number of observations in window required to have a value
(otherwise result is NA). The default is 1 (note this is different
from ``Rolling``, whose default is the size of the window).
Returns
-------
computation.rolling.DataArrayRolling
Examples
--------
Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:
>>> da = xr.DataArray(
... np.linspace(0, 11, num=12),
... coords=[
... pd.date_range(
... "1999-12-15",
... periods=12,
... freq=pd.DateOffset(months=1),
... )
... ],
... dims="time",
... )
>>> da
<xarray.DataArray (time: 12)> Size: 96B
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15
>>> da.cumulative("time").sum()
<xarray.DataArray (time: 12)> Size: 96B
array([ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55., 66.])
Coordinates:
* time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15
See Also
--------
DataArray.rolling
Dataset.cumulative
computation.rolling.DataArrayRolling
"""
from xarray.computation.rolling import DataArrayRolling
# Could we abstract this "normalize and check 'dim'" logic? It's currently shared
# with the same method in Dataset.
if isinstance(dim, str):
if dim not in self.dims:
raise ValueError(
f"Dimension {dim} not found in data dimensions: {self.dims}"
)
dim = {dim: self.sizes[dim]}
else:
missing_dims = set(dim) - set(self.dims)
if missing_dims:
raise ValueError(
f"Dimensions {missing_dims} not found in data dimensions: {self.dims}"
)
dim = {d: self.sizes[d] for d in dim}
return DataArrayRolling(self, dim, min_periods=min_periods, center=False)
def coarsen(
self,
dim: Mapping[Any, int] | None = None,
boundary: CoarsenBoundaryOptions = "exact",
side: SideOptions | Mapping[Any, SideOptions] = "left",
coord_func: str | Callable | Mapping[Any, str | Callable] = "mean",
**window_kwargs: int,
) -> DataArrayCoarsen:
"""
Coarsen object for DataArrays.
Parameters
----------
dim : mapping of hashable to int, optional
Mapping from the dimension name to the window size.
boundary : {"exact", "trim", "pad"}, default: "exact"
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left"
coord_func : str or mapping of hashable to str, default: "mean"
function (name) that is applied to the coordinates,
or a mapping from coordinate name to function (name).
Returns
-------
computation.rolling.DataArrayCoarsen
Examples
--------
Coarsen the long time series by averaging over every three days.
>>> da = xr.DataArray(
... np.linspace(0, 364, num=364),
... dims="time",
... coords={"time": pd.date_range("1999-12-15", periods=364)},
... )
>>> da # +doctest: ELLIPSIS
<xarray.DataArray (time: 364)> Size: 3kB
array([ 0. , 1.00275482, 2.00550964, 3.00826446,
4.01101928, 5.0137741 , 6.01652893, 7.01928375,
8.02203857, 9.02479339, 10.02754821, 11.03030303,
12.03305785, 13.03581267, 14.03856749, 15.04132231,
16.04407713, 17.04683196, 18.04958678, 19.0523416 ,
20.05509642, 21.05785124, 22.06060606, 23.06336088,
24.0661157 , 25.06887052, 26.07162534, 27.07438017,
28.07713499, 29.07988981, 30.08264463, 31.08539945,
32.08815427, 33.09090909, 34.09366391, 35.09641873,
36.09917355, 37.10192837, 38.1046832 , 39.10743802,
40.11019284, 41.11294766, 42.11570248, 43.1184573 ,
44.12121212, 45.12396694, 46.12672176, 47.12947658,
48.1322314 , 49.13498623, 50.13774105, 51.14049587,
52.14325069, 53.14600551, 54.14876033, 55.15151515,
56.15426997, 57.15702479, 58.15977961, 59.16253444,
60.16528926, 61.16804408, 62.1707989 , 63.17355372,
64.17630854, 65.17906336, 66.18181818, 67.184573 ,
68.18732782, 69.19008264, 70.19283747, 71.19559229,
72.19834711, 73.20110193, 74.20385675, 75.20661157,
76.20936639, 77.21212121, 78.21487603, 79.21763085,
...
284.78236915, 285.78512397, 286.78787879, 287.79063361,
288.79338843, 289.79614325, 290.79889807, 291.80165289,
292.80440771, 293.80716253, 294.80991736, 295.81267218,
296.815427 , 297.81818182, 298.82093664, 299.82369146,
300.82644628, 301.8292011 , 302.83195592, 303.83471074,
304.83746556, 305.84022039, 306.84297521, 307.84573003,
308.84848485, 309.85123967, 310.85399449, 311.85674931,
312.85950413, 313.86225895, 314.86501377, 315.8677686 ,
316.87052342, 317.87327824, 318.87603306, 319.87878788,
320.8815427 , 321.88429752, 322.88705234, 323.88980716,
324.89256198, 325.8953168 , 326.89807163, 327.90082645,
328.90358127, 329.90633609, 330.90909091, 331.91184573,
332.91460055, 333.91735537, 334.92011019, 335.92286501,
336.92561983, 337.92837466, 338.93112948, 339.9338843 ,
340.93663912, 341.93939394, 342.94214876, 343.94490358,
344.9476584 , 345.95041322, 346.95316804, 347.95592287,
348.95867769, 349.96143251, 350.96418733, 351.96694215,
352.96969697, 353.97245179, 354.97520661, 355.97796143,
356.98071625, 357.98347107, 358.9862259 , 359.98898072,
360.99173554, 361.99449036, 362.99724518, 364. ])
Coordinates:
* time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-12-12
>>> da.coarsen(time=3, boundary="trim").mean() # +doctest: ELLIPSIS
<xarray.DataArray (time: 121)> Size: 968B
array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821,
13.03581267, 16.04407713, 19.0523416 , 22.06060606,
25.06887052, 28.07713499, 31.08539945, 34.09366391,
37.10192837, 40.11019284, 43.1184573 , 46.12672176,
49.13498623, 52.14325069, 55.15151515, 58.15977961,
61.16804408, 64.17630854, 67.184573 , 70.19283747,
73.20110193, 76.20936639, 79.21763085, 82.22589532,
85.23415978, 88.24242424, 91.25068871, 94.25895317,
97.26721763, 100.27548209, 103.28374656, 106.29201102,
109.30027548, 112.30853994, 115.31680441, 118.32506887,
121.33333333, 124.3415978 , 127.34986226, 130.35812672,
133.36639118, 136.37465565, 139.38292011, 142.39118457,
145.39944904, 148.4077135 , 151.41597796, 154.42424242,
157.43250689, 160.44077135, 163.44903581, 166.45730028,
169.46556474, 172.4738292 , 175.48209366, 178.49035813,
181.49862259, 184.50688705, 187.51515152, 190.52341598,
193.53168044, 196.5399449 , 199.54820937, 202.55647383,
205.56473829, 208.57300275, 211.58126722, 214.58953168,
217.59779614, 220.60606061, 223.61432507, 226.62258953,
229.63085399, 232.63911846, 235.64738292, 238.65564738,
241.66391185, 244.67217631, 247.68044077, 250.68870523,
253.6969697 , 256.70523416, 259.71349862, 262.72176309,
265.73002755, 268.73829201, 271.74655647, 274.75482094,
277.7630854 , 280.77134986, 283.77961433, 286.78787879,
289.79614325, 292.80440771, 295.81267218, 298.82093664,
301.8292011 , 304.83746556, 307.84573003, 310.85399449,
313.86225895, 316.87052342, 319.87878788, 322.88705234,
325.8953168 , 328.90358127, 331.91184573, 334.92011019,
337.92837466, 340.93663912, 343.94490358, 346.95316804,
349.96143251, 352.96969697, 355.97796143, 358.9862259 ,
361.99449036])
Coordinates:
* time (time) datetime64[ns] 968B 1999-12-16 1999-12-19 ... 2000-12-10
>>>
See Also
--------
:class:`computation.rolling.DataArrayCoarsen <computation.rolling.DataArrayCoarsen>`
:func:`Dataset.coarsen <Dataset.coarsen>`
:ref:`reshape.coarsen`
User guide describing :py:func:`~xarray.DataArray.coarsen`
:ref:`compute.coarsen`
User guide on block aggregation :py:func:`~xarray.DataArray.coarsen`
:doc:`xarray-tutorial:fundamentals/03.3_windowed`
Tutorial on windowed computation using :py:func:`~xarray.DataArray.coarsen`
"""
from xarray.computation.rolling import DataArrayCoarsen
dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen")
return DataArrayCoarsen(
self,
dim,
boundary=boundary,
side=side,
coord_func=coord_func,
)
@_deprecate_positional_args("v2024.07.0")
def resample(
self,
indexer: Mapping[Hashable, ResampleCompatible | Resampler] | None = None,
*,
skipna: bool | None = None,
closed: SideOptions | None = None,
label: SideOptions | None = None,
offset: pd.Timedelta | datetime.timedelta | str | None = None,
origin: str | DatetimeLike = "start_day",
restore_coord_dims: bool | None = None,
**indexer_kwargs: ResampleCompatible | Resampler,
) -> DataArrayResample:
"""Returns a Resample object for performing resampling operations.
Handles both downsampling and upsampling. The resampled
dimension must be a datetime-like coordinate. If any intervals
contain no values from the original object, they will be given
the value ``NaN``.
Parameters
----------
indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional
Mapping from the dimension name to resample frequency [1]_. The
dimension must be datetime-like.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : {"left", "right"}, optional
Side of each interval to treat as closed.
label : {"left", "right"}, optional
Side of each interval to use for labeling.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : pd.Timedelta, datetime.timedelta, or str, default is None
An offset timedelta added to the origin.
restore_coord_dims : bool, optional
If True, also restore the dimension order of multi-dimensional
coordinates.
**indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler
The keyword arguments form of ``indexer``.
One of indexer or indexer_kwargs must be provided.
Returns
-------
resampled : core.resample.DataArrayResample
This object resampled.
Examples
--------
Downsample monthly time-series data to seasonal data:
>>> da = xr.DataArray(
... np.linspace(0, 11, num=12),
... coords=[
... pd.date_range(
... "1999-12-15",
... periods=12,
... freq=pd.DateOffset(months=1),
... )
... ],
... dims="time",
... )
>>> da
<xarray.DataArray (time: 12)> Size: 96B
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15
>>> da.resample(time="QS-DEC").mean()
<xarray.DataArray (time: 4)> Size: 32B
array([ 1., 4., 7., 10.])
Coordinates:
* time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01
Upsample monthly time-series data to daily data:
>>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS
<xarray.DataArray (time: 337)> Size: 3kB
array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226,
0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258,
0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 ,
0.48387097, 0.51612903, 0.5483871 , 0.58064516, 0.61290323,
0.64516129, 0.67741935, 0.70967742, 0.74193548, 0.77419355,
0.80645161, 0.83870968, 0.87096774, 0.90322581, 0.93548387,
0.96774194, 1. , ...,
9. , 9.03333333, 9.06666667, 9.1 , 9.13333333,
9.16666667, 9.2 , 9.23333333, 9.26666667, 9.3 ,
9.33333333, 9.36666667, 9.4 , 9.43333333, 9.46666667,
9.5 , 9.53333333, 9.56666667, 9.6 , 9.63333333,
9.66666667, 9.7 , 9.73333333, 9.76666667, 9.8 ,
9.83333333, 9.86666667, 9.9 , 9.93333333, 9.96666667,
10. , 10.03225806, 10.06451613, 10.09677419, 10.12903226,
10.16129032, 10.19354839, 10.22580645, 10.25806452, 10.29032258,
10.32258065, 10.35483871, 10.38709677, 10.41935484, 10.4516129 ,
10.48387097, 10.51612903, 10.5483871 , 10.58064516, 10.61290323,
10.64516129, 10.67741935, 10.70967742, 10.74193548, 10.77419355,
10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387,
10.96774194, 11. ])
Coordinates:
* time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15
Limit scope of upsampling method
>>> da.resample(time="1D").nearest(tolerance="1D")
<xarray.DataArray (time: 337)> Size: 3kB
array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, 2., 2., 2., nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3.,
3., 3., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, 4., 4., 4., nan, nan, nan, nan, nan, ...,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, 10., 10., 10., nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.])
Coordinates:
* time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15
See Also
--------
Dataset.resample
pandas.Series.resample
pandas.DataFrame.resample
References
----------
.. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
from xarray.core.resample import DataArrayResample
return self._resample(
resample_cls=DataArrayResample,
indexer=indexer,
skipna=skipna,
closed=closed,
label=label,
offset=offset,
origin=origin,
restore_coord_dims=restore_coord_dims,
**indexer_kwargs,
)
def to_dask_dataframe(
self,
dim_order: Sequence[Hashable] | None = None,
set_index: bool = False,
) -> DaskDataFrame:
"""Convert this array into a dask.dataframe.DataFrame.
Parameters
----------
dim_order : Sequence of Hashable or None , optional
Hierarchical dimension order for the resulting dataframe.
Array content is transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major influence
on which operations are efficient on the resulting dask dataframe.
set_index : bool, default: False
If set_index=True, the dask DataFrame is indexed by this dataset's
coordinate. Since dask DataFrames do not support multi-indexes,
set_index only works if the dataset only contains one dimension.
Returns
-------
dask.dataframe.DataFrame
Examples
--------
>>> da = xr.DataArray(
... np.arange(4 * 2 * 2).reshape(4, 2, 2),
... dims=("time", "lat", "lon"),
... coords={
... "time": np.arange(4),
... "lat": [-30, -20],
... "lon": [120, 130],
... },
... name="eg_dataarray",
... attrs={"units": "Celsius", "description": "Random temperature data"},
... )
>>> da.to_dask_dataframe(["lat", "lon", "time"]).compute()
lat lon time eg_dataarray
0 -30 120 0 0
1 -30 120 1 4
2 -30 120 2 8
3 -30 120 3 12
4 -30 130 0 1
5 -30 130 1 5
6 -30 130 2 9
7 -30 130 3 13
8 -20 120 0 2
9 -20 120 1 6
10 -20 120 2 10
11 -20 120 3 14
12 -20 130 0 3
13 -20 130 1 7
14 -20 130 2 11
15 -20 130 3 15
"""
if self.name is None:
raise ValueError(
"Cannot convert an unnamed DataArray to a "
"dask dataframe : use the ``.rename`` method to assign a name."
)
name = self.name
ds = self._to_dataset_whole(name, shallow_copy=False)
return ds.to_dask_dataframe(dim_order, set_index)
# this needs to be at the end, or mypy will confuse with `str`
# https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names
str = utils.UncachedAccessor(StringAccessor["DataArray"])
def drop_attrs(self, *, deep: bool = True) -> Self:
"""
Removes all attributes from the DataArray.
Parameters
----------
deep : bool, default True
Removes attributes from coordinates.
Returns
-------
DataArray
"""
if not deep:
return self._replace(attrs={})
else:
return (
self._to_temp_dataset()
.drop_attrs(deep=deep)
.pipe(self._from_temp_dataset)
)
|
DataArray
|
python
|
dask__dask
|
dask/array/_array_expr/_rechunk.py
|
{
"start": 442,
"end": 2605
}
|
class ____(ArrayExpr):
_parameters = [
"array",
"_chunks",
"threshold",
"block_size_limit",
"balance",
"method",
]
_defaults = {
"_chunks": "auto",
"threshold": None,
"block_size_limit": None,
"balance": None,
"method": None,
}
@property
def _meta(self):
return self.array._meta
@property
def _name(self):
return "rechunk-merge-" + tokenize(*self.operands)
@cached_property
def chunks(self):
x = self.array
chunks = self.operand("_chunks")
# don't rechunk if array is empty
if x.ndim > 0 and all(s == 0 for s in x.shape):
return x.chunks
if isinstance(chunks, dict):
chunks = {validate_axis(c, x.ndim): v for c, v in chunks.items()}
for i in range(x.ndim):
if i not in chunks:
chunks[i] = x.chunks[i]
elif chunks[i] is None:
chunks[i] = x.chunks[i]
if isinstance(chunks, (tuple, list)):
chunks = tuple(
lc if lc is not None else rc for lc, rc in zip(chunks, x.chunks)
)
chunks = normalize_chunks(
chunks,
x.shape,
limit=self.block_size_limit,
dtype=x.dtype,
previous_chunks=x.chunks,
)
if not len(chunks) == x.ndim:
raise ValueError("Provided chunks are not consistent with shape")
if self.balance:
chunks = tuple(_balance_chunksizes(chunk) for chunk in chunks)
_validate_rechunk(x.chunks, chunks)
return chunks
def _lower(self):
if not self.balance and (self.chunks == self.array.chunks):
return self.array
method = self.method or _choose_rechunk_method(
self.array.chunks, self.chunks, threshold=self.threshold
)
if method == "p2p":
raise NotImplementedError
else:
return TasksRechunk(
self.array, self.chunks, self.threshold, self.block_size_limit
)
|
Rechunk
|
python
|
psf__black
|
gallery/gallery.py
|
{
"start": 781,
"end": 9065
}
|
class ____(NamedTuple):
version: str
config: str | None = None
def get_pypi_download_url(package: str, version: str | None) -> str:
with urlopen(PYPI_INSTANCE + f"/{package}/json") as page:
metadata = json.load(page)
if version is None:
sources = metadata["urls"]
else:
if version in metadata["releases"]:
sources = metadata["releases"][version]
else:
raise ValueError(
f"No releases found with version ('{version}') tag. "
f"Found releases: {metadata['releases'].keys()}"
)
for source in sources:
if source["python_version"] == "source":
break
else:
raise ValueError(f"Couldn't find any sources for {package}")
return cast(str, source["url"])
def get_top_packages() -> list[str]:
with urlopen(PYPI_TOP_PACKAGES) as page:
result = json.load(page)
return [package["project"] for package in result["rows"]]
def get_package_source(package: str, version: str | None) -> str:
if package == "cpython":
if version is None:
version = "main"
return f"https://github.com/python/cpython/archive/{version}.zip"
elif package == "pypy":
if version is None:
version = "branch/default"
return (
f"https://foss.heptapod.net/pypy/pypy/repository/{version}/archive.tar.bz2"
)
else:
return get_pypi_download_url(package, version)
def get_archive_manager(local_file: str) -> ArchiveKind:
if tarfile.is_tarfile(local_file):
return tarfile.open(local_file)
elif zipfile.is_zipfile(local_file):
return zipfile.ZipFile(local_file)
else:
raise ValueError("Unknown archive kind.")
def get_first_archive_member(archive: ArchiveKind) -> str:
if isinstance(archive, tarfile.TarFile):
return archive.getnames()[0]
elif isinstance(archive, zipfile.ZipFile):
return archive.namelist()[0]
def download_and_extract(package: str, version: str | None, directory: Path) -> Path:
source = get_package_source(package, version)
local_file, _ = urlretrieve(source, directory / f"{package}-src")
with get_archive_manager(local_file) as archive:
archive.extractall(path=directory)
result_dir = get_first_archive_member(archive)
return directory / result_dir
def get_package(package: str, version: str | None, directory: Path) -> Path | None:
try:
return download_and_extract(package, version, directory)
except Exception:
print(f"Caught an exception while downloading {package}.")
traceback.print_exc()
return None
DEFAULT_SLICE = slice(None) # for flake8
def download_and_extract_top_packages(
directory: Path,
workers: int = 8,
limit: slice = DEFAULT_SLICE,
) -> Generator[Path, None, None]:
with ThreadPoolExecutor(max_workers=workers) as executor:
bound_downloader = partial(get_package, version=None, directory=directory)
for package in executor.map(bound_downloader, get_top_packages()[limit]):
if package is not None:
yield package
def git_create_repository(repo: Path) -> None:
subprocess.run(["git", "init"], cwd=repo)
git_add_and_commit(msg="Initial commit", repo=repo)
def git_add_and_commit(msg: str, repo: Path) -> None:
subprocess.run(["git", "add", "."], cwd=repo)
subprocess.run(["git", "commit", "-m", msg, "--allow-empty"], cwd=repo)
def git_switch_branch(
branch: str, repo: Path, new: bool = False, from_branch: str | None = None
) -> None:
args = ["git", "checkout"]
if new:
args.append("-b")
args.append(branch)
if from_branch:
args.append(from_branch)
subprocess.run(args, cwd=repo)
def init_repos(options: Namespace) -> tuple[Path, ...]:
options.output.mkdir(exist_ok=True)
if options.top_packages:
source_directories = tuple(
download_and_extract_top_packages(
directory=options.output,
workers=options.workers,
limit=slice(None, options.top_packages),
)
)
else:
source_directories = (
download_and_extract(
package=options.pypi_package,
version=options.version,
directory=options.output,
),
)
for source_directory in source_directories:
git_create_repository(source_directory)
if options.black_repo is None:
subprocess.run(
["git", "clone", "https://github.com/psf/black.git", INTERNAL_BLACK_REPO],
cwd=options.output,
)
options.black_repo = options.output / INTERNAL_BLACK_REPO
return source_directories
@lru_cache(8)
def black_runner(version: str, black_repo: Path) -> Path:
directory = tempfile.TemporaryDirectory()
venv.create(directory.name, with_pip=True)
python = Path(directory.name) / "bin" / "python"
subprocess.run([python, "-m", "pip", "install", "-e", black_repo])
atexit.register(directory.cleanup)
return python
def format_repo_with_version(
repo: Path,
from_branch: str | None,
black_repo: Path,
black_version: BlackVersion,
input_directory: Path,
) -> str:
current_branch = f"black-{black_version.version}"
git_switch_branch(black_version.version, repo=black_repo)
git_switch_branch(current_branch, repo=repo, new=True, from_branch=from_branch)
format_cmd: list[Path | str] = [
black_runner(black_version.version, black_repo),
(black_repo / "black.py").resolve(),
".",
]
if black_version.config:
format_cmd.extend(["--config", input_directory / black_version.config])
subprocess.run(format_cmd, cwd=repo, check=False) # ensure the process
# continuess to run even it can't format some files. Reporting those
# should be enough
git_add_and_commit(f"Format with black:{black_version.version}", repo=repo)
return current_branch
def format_repos(repos: tuple[Path, ...], options: Namespace) -> None:
black_versions = tuple(
BlackVersion(*version.split(":")) for version in options.versions
)
for repo in repos:
from_branch = None
for black_version in black_versions:
from_branch = format_repo_with_version(
repo=repo,
from_branch=from_branch,
black_repo=options.black_repo,
black_version=black_version,
input_directory=options.input,
)
git_switch_branch("main", repo=repo)
git_switch_branch("main", repo=options.black_repo)
def main() -> None:
parser = ArgumentParser(description="""Black Gallery is a script that
automates the process of applying different Black versions to a selected
PyPI package and seeing the results between versions.""")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-p", "--pypi-package", help="PyPI package to download.")
group.add_argument(
"-t", "--top-packages", help="Top n PyPI packages to download.", type=int
)
parser.add_argument("-b", "--black-repo", help="Black's Git repository.", type=Path)
parser.add_argument(
"-v",
"--version",
help=(
"Version for given PyPI package. Will be discarded if used with -t option."
),
)
parser.add_argument(
"-w",
"--workers",
help=(
"Maximum number of threads to download with at the same time. "
"Will be discarded if used with -p option."
),
)
parser.add_argument(
"-i",
"--input",
default=Path("/input"),
type=Path,
help="Input directory to read configuration.",
)
parser.add_argument(
"-o",
"--output",
default=Path("/output"),
type=Path,
help="Output directory to download and put result artifacts.",
)
parser.add_argument("versions", nargs="*", default=("main",), help="")
options = parser.parse_args()
repos = init_repos(options)
format_repos(repos, options)
if __name__ == "__main__":
main()
|
BlackVersion
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/schemas/events.py
|
{
"start": 11561,
"end": 12137
}
|
class ____(PrefectBaseModel):
"""The count of events with the given filter value"""
value: str = Field(..., description="The value to use for filtering")
label: str = Field(..., description="The value to display for this count")
count: int = Field(..., description="The count of matching events")
start_time: prefect.types._datetime.DateTime = Field(
..., description="The start time of this group of events"
)
end_time: prefect.types._datetime.DateTime = Field(
..., description="The end time of this group of events"
)
|
EventCount
|
python
|
django__django
|
tests/forms_tests/tests/test_forms.py
|
{
"start": 232853,
"end": 233051
}
|
class ____(DjangoTemplates):
bound_field_class = BoundFieldWithoutColon
@override_settings(
FORM_RENDERER="forms_tests.tests.test_forms.BoundFieldOverrideRenderer"
)
|
BoundFieldOverrideRenderer
|
python
|
getsentry__sentry
|
tests/sentry/integrations/slack/webhooks/commands/test_link_team.py
|
{
"start": 5161,
"end": 8118
}
|
class ____(SlackCommandsLinkTeamTestBase):
def setUp(self) -> None:
super().setUp()
self.link_team()
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_team(self, mock_record: MagicMock) -> None:
data = self.send_slack_message(
"unlink team",
channel_name=self.channel_name,
channel_id=self.channel_id,
)
assert "Click here to unlink your team from this channel" in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_team_as_team_admin(self, mock_record: MagicMock) -> None:
"""
Test that when a user who is a team admin attempts to unlink a team we allow it.
"""
self.login_as(self.team_admin_user)
link_user(self.team_admin_user, self.idp, slack_id=OTHER_SLACK_ID)
data = self.send_slack_message(
"unlink team",
channel_name=self.channel_name,
channel_id=self.channel_id,
)
assert "Click here to unlink your team from this channel" in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_no_team(self, mock_record: MagicMock) -> None:
"""
Test for when a user attempts to remove a link between a Slack channel
and a Sentry team that does not exist.
"""
data = self.send_slack_message(
"unlink team",
channel_name="specific",
channel_id=OTHER_SLACK_ID,
)
assert TEAM_NOT_LINKED_MESSAGE in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_multiple_orgs(self, mock_record: MagicMock) -> None:
# Create another organization and team for this user that is linked through `self.integration`.
organization2 = self.create_organization(owner=self.user)
team2 = self.create_team(organization=organization2, members=[self.user])
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_organization_integration(
organization_id=organization2.id, integration=self.integration
)
self.link_team(team2)
data = self.send_slack_message(
"unlink team",
channel_name=self.channel_name,
channel_id=self.channel_id,
)
assert "Click here to unlink your team from this channel" in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
|
SlackCommandsUnlinkTeamTest
|
python
|
davidhalter__jedi
|
jedi/api/completion.py
|
{
"start": 1003,
"end": 4385
}
|
class ____(ParamNameWrapper):
def get_public_name(self):
return self.string_name + '='
def _get_signature_param_names(signatures, positional_count, used_kwargs):
# Add named params
for call_sig in signatures:
for i, p in enumerate(call_sig.params):
kind = p.kind
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
continue
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
and p.name not in used_kwargs:
yield ParamNameWithEquals(p._name)
def _must_be_kwarg(signatures, positional_count, used_kwargs):
if used_kwargs:
return True
must_be_kwarg = True
for signature in signatures:
for i, p in enumerate(signature.params):
kind = p.kind
if kind is Parameter.VAR_POSITIONAL:
# In case there were not already kwargs, the next param can
# always be a normal argument.
return False
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY):
must_be_kwarg = False
break
if not must_be_kwarg:
break
return must_be_kwarg
def filter_names(inference_state, completion_names, stack, like_name, fuzzy,
imported_names, cached_name):
comp_dct = set()
if settings.case_insensitive_completion:
like_name = like_name.lower()
for name in completion_names:
string = name.string_name
if string in imported_names and string != like_name:
continue
if settings.case_insensitive_completion:
string = string.lower()
if helpers.match(string, like_name, fuzzy=fuzzy):
new = classes.Completion(
inference_state,
name,
stack,
len(like_name),
is_fuzzy=fuzzy,
cached_name=cached_name,
)
k = (new.name, new.complete) # key
if k not in comp_dct:
comp_dct.add(k)
tree_name = name.tree_name
if tree_name is not None:
definition = tree_name.get_definition()
if definition is not None and definition.type == 'del_stmt':
continue
yield new
def _remove_duplicates(completions, other_completions):
names = {d.name for d in other_completions}
return [c for c in completions if c.name not in names]
def get_user_context(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
return module_context.create_context(leaf)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
@plugin_manager.decorate()
def complete_param_names(context, function_name, decorator_nodes):
# Basically there's no way to do param completion. The plugins are
# responsible for this.
return []
|
ParamNameWithEquals
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/host_profiles.py
|
{
"start": 7536,
"end": 13858
}
|
class ____[THostConfig: HostConfig](HostProfile[THostConfig], DebuggerProfile, metaclass=abc.ABCMeta):
"""Base class for profiles remote debugging."""
__DEBUGGING_PORT_KEY = 'debugging_port'
__DEBUGGING_FORWARDER_KEY = 'debugging_forwarder'
@property
def debugger(self) -> DebuggerSettings | None:
"""The debugger settings for this host if present and enabled, otherwise None."""
return self.args.metadata.debugger_settings
@property
def debugging_enabled(self) -> bool:
"""Returns `True` if debugging is enabled for this profile, otherwise `False`."""
if self.controller:
return self.args.metadata.debugger_flags.enable
return self.args.metadata.debugger_flags.ansiballz
@property
def debugger_host(self) -> str:
"""The debugger host to use."""
return 'localhost'
@property
def debugger_port(self) -> int:
"""The debugger port to use."""
return self.state.get(self.__DEBUGGING_PORT_KEY) or self.origin_debugger_port
@property
def debugging_forwarder(self) -> SshProcess | None:
"""The SSH forwarding process, if enabled."""
return self.cache.get(self.__DEBUGGING_FORWARDER_KEY)
@debugging_forwarder.setter
def debugging_forwarder(self, value: SshProcess) -> None:
"""The SSH forwarding process, if enabled."""
self.cache[self.__DEBUGGING_FORWARDER_KEY] = value
@property
def origin_debugger_port(self) -> int:
"""The debugger port on the origin."""
return self.debugger.port
def enable_debugger_forwarding(self, ssh: SshConnectionDetail) -> None:
"""Enable debugger port forwarding from the origin."""
if not self.debugging_enabled:
return
endpoint = ('localhost', self.origin_debugger_port)
forwards = [endpoint]
self.debugging_forwarder = create_ssh_port_forwards(self.args, ssh, forwards)
port_forwards = self.debugging_forwarder.collect_port_forwards()
self.state[self.__DEBUGGING_PORT_KEY] = port = port_forwards[endpoint]
display.info(f'Remote debugging of {self.name!r} is available on port {port}.', verbosity=1)
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
super().deprovision()
if not self.debugging_forwarder:
return # forwarding not in use
self.debugging_forwarder.terminate()
display.info(f'Waiting for the {self.name!r} remote debugging SSH port forwarding process to terminate.', verbosity=1)
self.debugging_forwarder.wait()
def get_source_mapping(self) -> dict[str, str]:
"""Get the source mapping from the given metadata."""
from . import data_context
if collection := data_context().content.collection:
source_mapping = {
f"{self.args.metadata.ansible_test_root}/": f'{ANSIBLE_TEST_ROOT}/',
f"{self.args.metadata.ansible_lib_root}/": f'{ANSIBLE_LIB_ROOT}/',
f'{self.args.metadata.collection_root}/ansible_collections/': f'{collection.root}/ansible_collections/',
}
else:
ansible_source_root = pathlib.Path(self.args.metadata.ansible_lib_root).parent.parent
source_mapping = {
f"{ansible_source_root}/": f'{ANSIBLE_SOURCE_ROOT}/',
}
source_mapping = {key: value for key, value in source_mapping.items() if key != value}
return source_mapping
def activate_debugger(self) -> None:
"""Activate the debugger after delegation."""
if not self.args.metadata.loaded or not self.args.metadata.debugger_flags.self:
return
display.info('Activating remote debugging of ansible-test.', verbosity=1)
os.environ.update(self.debugger.get_environment_variables(self))
self.debugger.activate_debugger(self)
pass # pylint: disable=unnecessary-pass # when suspend is True, execution pauses here -- it's also a convenient place to put a breakpoint
def get_ansiballz_inventory_variables(self) -> dict[str, t.Any]:
"""
Return inventory variables for remote debugging of AnsiballZ modules.
When delegating, this function must be called after delegation.
"""
if not self.args.metadata.debugger_flags.ansiballz:
return {}
debug_type = self.debugger.get_debug_type()
return {
f"_ansible_ansiballz_{debug_type}_config": json.dumps(self.get_ansiballz_debugger_config()),
}
def get_ansiballz_environment_variables(self) -> dict[str, t.Any]:
"""
Return environment variables for remote debugging of AnsiballZ modules.
When delegating, this function must be called after delegation.
"""
if not self.args.metadata.debugger_flags.ansiballz:
return {}
debug_type = self.debugger.get_debug_type().upper()
return {
f"_ANSIBLE_ANSIBALLZ_{debug_type}_CONFIG": json.dumps(self.get_ansiballz_debugger_config()),
}
def get_ansiballz_debugger_config(self) -> dict[str, t.Any]:
"""
Return config for remote debugging of AnsiballZ modules.
When delegating, this function must be called after delegation.
"""
debugger_config = self.debugger.get_ansiballz_config(self)
display.info(f'>>> Debugger Config ({self.name} AnsiballZ)\n{json.dumps(debugger_config, indent=4)}', verbosity=3)
return debugger_config
def get_ansible_cli_environment_variables(self) -> dict[str, t.Any]:
"""
Return environment variables for remote debugging of the Ansible CLI.
When delegating, this function must be called after delegation.
"""
if not self.args.metadata.debugger_flags.cli:
return {}
debugger_config = dict(
args=self.debugger.get_cli_arguments(self),
env=self.debugger.get_environment_variables(self),
)
display.info(f'>>> Debugger Config ({self.name} Ansible CLI)\n{json.dumps(debugger_config, indent=4)}', verbosity=3)
return dict(
ANSIBLE_TEST_DEBUGGER_CONFIG=json.dumps(debugger_config),
)
|
DebuggableProfile
|
python
|
gevent__gevent
|
src/greentest/3.13/test_queue.py
|
{
"start": 22138,
"end": 22224
}
|
class ____(QueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
|
PyQueueTest
|
python
|
Pylons__pyramid
|
tests/test_view.py
|
{
"start": 16218,
"end": 25523
}
|
class ____(unittest.TestCase):
def setUp(self):
testing.setUp()
def tearDown(self):
testing.tearDown()
def _getTargetClass(self):
from pyramid.view import view_config
return view_config
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def test_create_defaults(self):
decorator = self._makeOne()
self.assertEqual(list(decorator.__dict__.keys()), ['_info'])
def test_create_info(self):
target = self._getTargetClass()
decorator = target()
info = decorator._info
self.assertEqual(info[2], 'test_create_info')
self.assertEqual(info[3], 'decorator = target()')
def test_create_info_depth(self):
target = self._getTargetClass()
def make():
return target(_depth=1)
decorator = make()
info = decorator._info
self.assertEqual(info[2], 'test_create_info_depth')
self.assertEqual(info[3], 'decorator = make()')
def test_create_context_trumps_for(self):
decorator = self._makeOne(context='123', for_='456')
self.assertEqual(decorator.context, '123')
def test_create_for_trumps_context_None(self):
decorator = self._makeOne(context=None, for_='456')
self.assertEqual(decorator.context, '456')
def test_create_nondefaults(self):
decorator = self._makeOne(
name=None,
request_type=None,
for_=None,
permission='foo',
mapper='mapper',
decorator='decorator',
match_param='match_param',
)
self.assertEqual(decorator.name, None)
self.assertEqual(decorator.request_type, None)
self.assertEqual(decorator.context, None)
self.assertEqual(decorator.permission, 'foo')
self.assertEqual(decorator.mapper, 'mapper')
self.assertEqual(decorator.decorator, 'decorator')
self.assertEqual(decorator.match_param, 'match_param')
def test_create_with_other_predicates(self):
decorator = self._makeOne(foo=1)
self.assertEqual(decorator.foo, 1)
def test_create_decorator_tuple(self):
decorator = self._makeOne(decorator=('decorator1', 'decorator2'))
self.assertEqual(decorator.decorator, ('decorator1', 'decorator2'))
def test_call_function(self):
decorator = self._makeOne()
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 1)
self.assertEqual(len(settings), 1)
self.assertEqual(len(settings[0]), 3)
self.assertEqual(settings[0]['venusian'], venusian)
self.assertEqual(settings[0]['view'], None) # comes from call_venusian
def test_call_class(self):
decorator = self._makeOne()
venusian = DummyVenusian()
decorator.venusian = venusian
decorator.venusian.info.scope = 'class'
class foo:
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 1)
self.assertEqual(len(settings[0]), 4)
self.assertEqual(settings[0]['venusian'], venusian)
self.assertEqual(settings[0]['view'], None) # comes from call_venusian
self.assertEqual(settings[0]['attr'], 'foo')
def test_call_class_attr_already_set(self):
decorator = self._makeOne(attr='abc')
venusian = DummyVenusian()
decorator.venusian = venusian
decorator.venusian.info.scope = 'class'
class foo:
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 1)
self.assertEqual(len(settings[0]), 4)
self.assertEqual(settings[0]['venusian'], venusian)
self.assertEqual(settings[0]['view'], None) # comes from call_venusian
self.assertEqual(settings[0]['attr'], 'abc')
def test_stacking(self):
decorator1 = self._makeOne(name='1')
venusian1 = DummyVenusian()
decorator1.venusian = venusian1
venusian2 = DummyVenusian()
decorator2 = self._makeOne(name='2')
decorator2.venusian = venusian2
def foo(): # pragma: no cover
pass
wrapped1 = decorator1(foo)
wrapped2 = decorator2(wrapped1)
self.assertTrue(wrapped1 is foo)
self.assertTrue(wrapped2 is foo)
config1 = call_venusian(venusian1)
self.assertEqual(len(config1.settings), 1)
self.assertEqual(config1.settings[0]['name'], '1')
config2 = call_venusian(venusian2)
self.assertEqual(len(config2.settings), 1)
self.assertEqual(config2.settings[0]['name'], '2')
def test_call_as_method(self):
decorator = self._makeOne()
venusian = DummyVenusian()
decorator.venusian = venusian
decorator.venusian.info.scope = 'class'
def foo(self): # pragma: no cover
pass
def bar(self): # pragma: no cover
pass
class foo:
foomethod = decorator(foo)
barmethod = decorator(bar)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 2)
self.assertEqual(settings[0]['attr'], 'foo')
self.assertEqual(settings[1]['attr'], 'bar')
def test_with_custom_predicates(self):
decorator = self._makeOne(custom_predicates=(1,))
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(context, request): # pragma: no cover
pass
decorated = decorator(foo)
self.assertTrue(decorated is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(settings[0]['custom_predicates'], (1,))
def test_call_with_renderer_string(self):
import tests
decorator = self._makeOne(renderer='fixtures/minimal.pt')
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 1)
renderer = settings[0]['renderer']
self.assertEqual(renderer, 'fixtures/minimal.pt')
self.assertEqual(config.pkg, tests)
def test_call_with_renderer_dict(self):
import tests
decorator = self._makeOne(renderer={'a': 1})
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
config = call_venusian(venusian)
settings = config.settings
self.assertEqual(len(settings), 1)
self.assertEqual(settings[0]['renderer'], {'a': 1})
self.assertEqual(config.pkg, tests)
def test_call_with_renderer_IRendererInfo(self):
from pyramid.interfaces import IRendererInfo
import tests
@implementer(IRendererInfo)
class DummyRendererHelper:
pass
renderer_helper = DummyRendererHelper()
decorator = self._makeOne(renderer=renderer_helper)
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
wrapped = decorator(foo)
self.assertTrue(wrapped is foo)
context = DummyVenusianContext()
config = call_venusian(venusian, context)
settings = config.settings
self.assertEqual(len(settings), 1)
renderer = settings[0]['renderer']
self.assertTrue(renderer is renderer_helper)
self.assertEqual(config.pkg, tests)
def test_call_withdepth(self):
decorator = self._makeOne(_depth=1)
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
decorator(foo)
attachments = venusian.attachments
depth = attachments[0][3]
self.assertEqual(depth, 2)
def test_call_withoutcategory(self):
decorator = self._makeOne()
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
decorator(foo)
attachments = venusian.attachments
category = attachments[0][2]
self.assertEqual(category, 'pyramid')
def test_call_withcategory(self):
decorator = self._makeOne(_category='not_pyramid')
venusian = DummyVenusian()
decorator.venusian = venusian
def foo(): # pragma: no cover
pass
decorator(foo)
attachments = venusian.attachments
category = attachments[0][2]
self.assertEqual(category, 'not_pyramid')
|
TestViewConfigDecorator
|
python
|
davidhalter__jedi
|
jedi/inference/value/iterable.py
|
{
"start": 8521,
"end": 8597
}
|
class ____(_BaseComprehension, GeneratorBase):
pass
|
GeneratorComprehension
|
python
|
numba__numba
|
numba/core/codegen.py
|
{
"start": 2393,
"end": 22372
}
|
class ____(object):
"""
Wraps the CFG graph for different display method.
Instance of the class can be stringified (``__repr__`` is defined) to get
the graph in DOT format. The ``.display()`` method plots the graph in
PDF. If in IPython notebook, the returned image can be inlined.
"""
def __init__(self, cres, name, py_func, **kwargs):
self.cres = cres
self.name = name
self.py_func = py_func
fn = cres.get_function(name)
self.dot = ll.get_function_cfg(fn)
self.kwargs = kwargs
def pretty_printer(self, filename=None, view=None, render_format=None,
highlight=True,
interleave=False, strip_ir=False, show_key=True,
fontsize=10):
"""
"Pretty" prints the DOT graph of the CFG.
For explanation of the parameters see the docstring for
numba.core.dispatcher::inspect_cfg.
"""
import graphviz as gv
import re
import json
import inspect
from llvmlite import binding as ll
from numba.typed import List
from types import SimpleNamespace
from collections import defaultdict
_default = False
_highlight = SimpleNamespace(incref=_default,
decref=_default,
returns=_default,
raises=_default,
meminfo=_default,
branches=_default,
llvm_intrin_calls=_default,
function_calls=_default,)
_interleave = SimpleNamespace(python=_default, lineinfo=_default)
def parse_config(_config, kwarg):
""" Parses the kwarg into a consistent format for use in configuring
the Digraph rendering. _config is the configuration instance to
update, kwarg is the kwarg on which to base the updates.
"""
if isinstance(kwarg, bool):
for attr in _config.__dict__:
setattr(_config, attr, kwarg)
elif isinstance(kwarg, dict):
for k, v in kwarg.items():
if k not in _config.__dict__:
raise ValueError("Unexpected key in kwarg: %s" % k)
if isinstance(v, bool):
setattr(_config, k, v)
else:
msg = "Unexpected value for key: %s, got:%s"
raise ValueError(msg % (k, v))
elif isinstance(kwarg, set):
for item in kwarg:
if item not in _config.__dict__:
raise ValueError("Unexpected key in kwarg: %s" % item)
else:
setattr(_config, item, True)
else:
msg = "Unhandled configuration type for kwarg %s"
raise ValueError(msg % type(kwarg))
parse_config(_highlight, highlight)
parse_config(_interleave, interleave)
# This is the colour scheme. The graphviz HTML label renderer only takes
# names for colours: https://www.graphviz.org/doc/info/shapes.html#html
cs = defaultdict(lambda: 'white') # default bg colour is white
cs['marker'] = 'orange'
cs['python'] = 'yellow'
cs['truebr'] = 'green'
cs['falsebr'] = 'red'
cs['incref'] = 'cyan'
cs['decref'] = 'turquoise'
cs['raise'] = 'lightpink'
cs['meminfo'] = 'lightseagreen'
cs['return'] = 'purple'
cs['llvm_intrin_calls'] = 'rosybrown'
cs['function_calls'] = 'tomato'
# Get the raw dot format information from LLVM and the LLVM IR
fn = self.cres.get_function(self.name)
#raw_dot = ll.get_function_cfg(fn).replace('\\l...', '')
llvm_str = self.cres.get_llvm_str()
def get_metadata(llvm_str):
""" Gets the metadata entries from the LLVM IR, these look something
like '!123 = INFORMATION'. Returns a map of metadata key to metadata
value, i.e. from the example {'!123': INFORMATION}"""
md = {}
metadata_entry = re.compile(r'(^[!][0-9]+)(\s+=\s+.*)')
for x in llvm_str.splitlines():
match = metadata_entry.match(x)
if match is not None:
g = match.groups()
if g is not None:
assert len(g) == 2
md[g[0]] = g[1]
return md
md = get_metadata(llvm_str)
# setup digraph with initial properties
def init_digraph(name, fname, fontsize):
# name and fname are arbitrary graph and file names, they appear in
# some rendering formats, the fontsize determines the output
# fontsize.
# truncate massive mangled names as file names as it causes OSError
# when trying to render to pdf
cmax = 200
if len(fname) > cmax:
wstr = (f'CFG output filename "{fname}" exceeds maximum '
f'supported length, it will be truncated.')
warnings.warn(wstr, NumbaInvalidConfigWarning)
fname = fname[:cmax]
f = gv.Digraph(name, filename=fname)
f.attr(rankdir='TB')
f.attr('node', shape='none', fontsize='%s' % str(fontsize))
return f
f = init_digraph(self.name, self.name, fontsize)
# A lot of regex is needed to parse the raw dot output. This output
# contains a mix of LLVM IR in the labels, and also DOT markup.
# DOT syntax, matches a "port" (where the tail of an edge starts)
port_match = re.compile('.*{(.*)}.*')
# DOT syntax, matches the "port" value from a found "port_match"
port_jmp_match = re.compile('.*<(.*)>(.*)')
# LLVM syntax, matches a LLVM debug marker
metadata_marker = re.compile(r'.*!dbg\s+(![0-9]+).*')
# LLVM syntax, matches a location entry
location_expr = (r'.*!DILocation\(line:\s+([0-9]+),'
r'\s+column:\s+([0-9]),.*')
location_entry = re.compile(location_expr)
# LLVM syntax, matches LLVMs internal debug value calls
dbg_value = re.compile(r'.*call void @llvm.dbg.value.*')
# LLVM syntax, matches tokens for highlighting
nrt_incref = re.compile(r"@NRT_incref\b")
nrt_decref = re.compile(r"@NRT_decref\b")
nrt_meminfo = re.compile("@NRT_MemInfo")
ll_intrin_calls = re.compile(r".*call.*@llvm\..*")
ll_function_call = re.compile(r".*call.*@.*")
ll_raise = re.compile(r"store .*\!numba_exception_output.*")
ll_return = re.compile("ret i32 [^1],?.*")
# wrapper function for line wrapping LLVM lines
def wrap(s):
return textwrap.wrap(s, width=120, subsequent_indent='... ')
# function to fix (sometimes escaped for DOT!) LLVM IR etc that needs to
# be HTML escaped
def clean(s):
# Grab first 300 chars only, 1. this should be enough to identify
# the token and it keeps names short. 2. graphviz/dot has a maximum
# buffer size near 585?!, with additional transforms it's hard to
# know if this would be exceeded. 3. hash of the token string is
# written into the rendering to permit exact identification against
# e.g. LLVM IR dump if necessary.
n = 300
if len(s) > n:
hs = str(hash(s))
s = '{}...<hash={}>'.format(s[:n], hs)
s = html.escape(s) # deals with &, < and >
s = s.replace('\\{', "{")
s = s.replace('\\}', "}")
s = s.replace('\\', "\")
s = s.replace('%', "%")
s = s.replace('!', "!")
return s
# These hold the node and edge ids from the raw dot information. They
# are used later to wire up a new DiGraph that has the same structure
# as the raw dot but with new nodes.
node_ids = {}
edge_ids = {}
# Python source lines, used if python source interleave is requested
if _interleave.python:
src_code, firstlineno = inspect.getsourcelines(self.py_func)
# This is the dot info from LLVM, it's in DOT form and has continuation
# lines, strip them and then re-parse into `dot_json` form for use in
# producing a formatted output.
raw_dot = ll.get_function_cfg(fn).replace('\\l...', '')
json_bytes = gv.Source(raw_dot).pipe(format='dot_json')
jzon = json.loads(json_bytes.decode('utf-8'))
idc = 0
# Walk the "objects" (nodes) in the DOT output
for obj in jzon['objects']:
# These are used to keep tabs on the current line and column numbers
# as per the markers. They are tracked so as to make sure a marker
# is only emitted if there's a change in the marker.
cur_line, cur_col = -1, -1
label = obj['label']
name = obj['name']
gvid = obj['_gvid']
node_ids[gvid] = name
# Label is DOT format, it needs the head and tail removing and then
# splitting for walking.
label = label[1:-1]
lines = label.split('\\l')
# Holds the new lines
new_lines = []
# Aim is to produce an HTML table a bit like this:
#
# |------------|
# | HEADER | <-- this is the block header
# |------------|
# | LLVM SRC | <--
# | Marker? | < this is the label/block body
# | Python src?| <--
# |------------|
# | T | F | <-- this is the "ports", also determines col_span
# --------------
#
# This is HTML syntax, its the column span. If there's a switch or a
# branch at the bottom of the node this is rendered as multiple
# columns in a table. First job is to go and render that and work
# out how many columns are needed as that dictates how many columns
# the rest of the source lines must span. In DOT syntax the places
# that edges join nodes are referred to as "ports". Syntax in DOT
# is like `node:port`.
col_span = 1
# First see if there is a port entry for this node
port_line = ''
matched = port_match.match(lines[-1])
sliced_lines = lines
if matched is not None:
# There is a port
ports = matched.groups()[0]
ports_tokens = ports.split('|')
col_span = len(ports_tokens)
# Generate HTML table data cells, one for each port. If the
# ports correspond to a branch then they can optionally
# highlighted based on T/F.
tdfmt = ('<td BGCOLOR="{}" BORDER="1" ALIGN="center" '
'PORT="{}">{}</td>')
tbl_data = []
if _highlight.branches:
colors = {'T': cs['truebr'], 'F': cs['falsebr']}
else:
colors = {}
for tok in ports_tokens:
target, value = port_jmp_match.match(tok).groups()
color = colors.get(value, 'white')
tbl_data.append(tdfmt.format(color, target, value))
port_line = ''.join(tbl_data)
# Drop the last line from the rest of the parse as it's the port
# and just been dealt with.
sliced_lines = lines[:-1]
# loop peel the block header, it needs a HTML border
fmtheader = ('<tr><td BGCOLOR="{}" BORDER="1" ALIGN="left" '
'COLSPAN="{}">{}</td></tr>')
new_lines.append(fmtheader.format(cs['default'], col_span,
clean(sliced_lines[0].strip())))
# process rest of block creating the table row at a time.
fmt = ('<tr><td BGCOLOR="{}" BORDER="0" ALIGN="left" '
'COLSPAN="{}">{}</td></tr>')
def metadata_interleave(l, new_lines):
"""
Search line `l` for metadata associated with python or line info
and inject it into `new_lines` if requested.
"""
matched = metadata_marker.match(l)
if matched is not None:
# there's a metadata marker
g = matched.groups()
if g is not None:
assert len(g) == 1, g
marker = g[0]
debug_data = md.get(marker, None)
if debug_data is not None:
# and the metadata marker has a corresponding piece
# of metadata
ld = location_entry.match(debug_data)
if ld is not None:
# and the metadata is line info... proceed
assert len(ld.groups()) == 2, ld
line, col = ld.groups()
# only emit a new marker if the line number in
# the metadata is "new".
if line != cur_line or col != cur_col:
if _interleave.lineinfo:
mfmt = 'Marker %s, Line %s, column %s'
mark_line = mfmt % (marker, line, col)
ln = fmt.format(cs['marker'], col_span,
clean(mark_line))
new_lines.append(ln)
if _interleave.python:
# TODO:
# +1 for decorator, this probably needs
# the same thing doing as for the
# error messages where the decorator
# is scanned for, its not always +1!
lidx = int(line) - (firstlineno + 1)
source_line = src_code[lidx + 1]
ln = fmt.format(cs['python'], col_span,
clean(source_line))
new_lines.append(ln)
return line, col
for l in sliced_lines[1:]:
# Drop LLVM debug call entries
if dbg_value.match(l):
continue
# if requested generate interleaving of markers or python from
# metadata
if _interleave.lineinfo or _interleave.python:
updated_lineinfo = metadata_interleave(l, new_lines)
if updated_lineinfo is not None:
cur_line, cur_col = updated_lineinfo
# Highlight other LLVM features if requested, HTML BGCOLOR
# property is set by this.
if _highlight.incref and nrt_incref.search(l):
colour = cs['incref']
elif _highlight.decref and nrt_decref.search(l):
colour = cs['decref']
elif _highlight.meminfo and nrt_meminfo.search(l):
colour = cs['meminfo']
elif _highlight.raises and ll_raise.search(l):
# search for raise as its more specific than exit
colour = cs['raise']
elif _highlight.returns and ll_return.search(l):
colour = cs['return']
elif _highlight.llvm_intrin_calls and ll_intrin_calls.search(l):
colour = cs['llvm_intrin_calls']
elif _highlight.function_calls and ll_function_call.search(l):
colour = cs['function_calls']
else:
colour = cs['default']
# Use the default coloring as a flag to force printing if a
# special token print was requested AND LLVM ir stripping is
# required
if colour is not cs['default'] or not strip_ir:
for x in wrap(clean(l)):
new_lines.append(fmt.format(colour, col_span, x))
# add in the port line at the end of the block if it was present
# (this was built right at the top of the parse)
if port_line:
new_lines.append('<tr>{}</tr>'.format(port_line))
# If there was data, create a table, else don't!
dat = ''.join(new_lines)
if dat:
tab = (('<table id="%s" BORDER="1" CELLBORDER="0" '
'CELLPADDING="0" CELLSPACING="0">%s</table>') % (idc,
dat))
label = '<{}>'.format(tab)
else:
label = ''
# finally, add a replacement node for the original with a new marked
# up label.
f.node(name, label=label)
# Parse the edge data
if 'edges' in jzon: # might be a single block, no edges
for edge in jzon['edges']:
gvid = edge['_gvid']
tp = edge.get('tailport', None)
edge_ids[gvid] = (edge['head'], edge['tail'], tp)
# Write in the edge wiring with respect to the new nodes:ports.
for gvid, edge in edge_ids.items():
tail = node_ids[edge[1]]
head = node_ids[edge[0]]
port = edge[2]
if port is not None:
tail += ':%s' % port
f.edge(tail, head)
# Add a key to the graph if requested.
if show_key:
key_tab = []
for k, v in cs.items():
key_tab.append(('<tr><td BGCOLOR="{}" BORDER="0" ALIGN="center"'
'>{}</td></tr>').format(v, k))
# The first < and last > are DOT syntax, rest is DOT HTML.
f.node("Key", label=('<<table BORDER="1" CELLBORDER="1" '
'CELLPADDING="2" CELLSPACING="1"><tr><td BORDER="0">'
'Key:</td></tr>{}</table>>').format(''.join(key_tab)))
# Render if required
if filename is not None or view is not None:
f.render(filename=filename, view=view, format=render_format)
# Else pipe out a SVG
return f.pipe(format='svg')
def display(self, filename=None, format='pdf', view=False):
"""
Plot the CFG. In IPython notebook, the return image object can be
inlined.
The *filename* option can be set to a specific path for the rendered
output to write to. If *view* option is True, the plot is opened by
the system default application for the image format (PDF). *format* can
be any valid format string accepted by graphviz, default is 'pdf'.
"""
rawbyt = self.pretty_printer(filename=filename, view=view,
render_format=format, **self.kwargs)
return rawbyt.decode('utf-8')
def _repr_svg_(self):
return self.pretty_printer(**self.kwargs).decode('utf-8')
def __repr__(self):
return self.dot
|
_CFG
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 63353,
"end": 63838
}
|
class ____:
xlButtonControl = 0 # from enum XlFormControl
xlCheckBox = 1 # from enum XlFormControl
xlDropDown = 2 # from enum XlFormControl
xlEditBox = 3 # from enum XlFormControl
xlGroupBox = 4 # from enum XlFormControl
xlLabel = 5 # from enum XlFormControl
xlListBox = 6 # from enum XlFormControl
xlOptionButton = 7 # from enum XlFormControl
xlScrollBar = 8 # from enum XlFormControl
xlSpinner = 9 # from enum XlFormControl
|
FormControl
|
python
|
numpy__numpy
|
numpy/f2py/tests/test_string.py
|
{
"start": 518,
"end": 979
}
|
class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "string.f")]
def test_example(self):
a = np.array(b"123\0\0")
b = np.array(b"123\0\0")
c = np.array(b"123")
d = np.array(b"123")
self.module.foo(a, b, c, d)
assert a.tobytes() == b"123\0\0"
assert b.tobytes() == b"B23\0\0"
assert c.tobytes() == b"123"
assert d.tobytes() == b"D23"
|
TestDocStringArguments
|
python
|
sympy__sympy
|
sympy/polys/matrices/exceptions.py
|
{
"start": 725,
"end": 818
}
|
class ____(DMError):
"""The matrix in not invertible"""
pass
|
DMNonInvertibleMatrixError
|
python
|
realpython__materials
|
python-tic-tac-toe-game-tkinter/source_code_final/tic_tac_toe.py
|
{
"start": 163,
"end": 221
}
|
class ____(NamedTuple):
label: str
color: str
|
Player
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/metadata.py
|
{
"start": 3992,
"end": 4198
}
|
class ____(graphene.ObjectType):
assetKey = graphene.NonNull(GrapheneAssetKey)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "AssetMetadataEntry"
|
GrapheneAssetMetadataEntry
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py
|
{
"start": 3036,
"end": 5319
}
|
class ____(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
def f():
pass
def g():
# `def f` reaches here
"""
def __init__(self, source_info, graphs):
super(TreeAnnotator, self).__init__(source_info)
self.graphs = graphs
self.allow_skips = False
self.current_analyzer = None
def _proces_function(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
defined_in = self.current_analyzer.in_[cfg_node].value
else:
defined_in = ()
analyzer = Analyzer(subgraph, defined_in)
analyzer.visit_forward()
self.current_analyzer = analyzer
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def visit_FunctionDef(self, node):
return self._proces_function(node)
def visit_Lambda(self, node):
return self._proces_function(node)
def visit(self, node):
# This can happen before entering the top level function
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
extra_node = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_node is not None:
cfg_node = self.current_analyzer.graph.index[extra_node]
anno.setanno(extra_node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
return super(TreeAnnotator, self).visit(node)
def resolve(node, source_info, graphs):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs)
node = visitor.visit(node)
return node
|
TreeAnnotator
|
python
|
mlflow__mlflow
|
mlflow/gateway/config.py
|
{
"start": 15819,
"end": 17404
}
|
class ____(AliasedConfigModel):
endpoints: list[EndpointConfig]
routes: list[TrafficRouteConfig] | None = None
def _load_gateway_config(path: str | Path) -> GatewayConfig:
"""
Reads the gateway configuration yaml file from the storage location and returns an instance
of the configuration RouteConfig class
"""
if isinstance(path, str):
path = Path(path)
try:
configuration = yaml.safe_load(path.read_text())
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"The file at {path} is not a valid yaml file"
) from e
check_configuration_deprecated_fields(configuration)
check_configuration_route_name_collisions(configuration)
try:
return GatewayConfig(**configuration)
except ValidationError as e:
raise MlflowException.invalid_parameter_value(
f"The gateway configuration is invalid: {e}"
) from e
def _save_route_config(config: GatewayConfig, path: str | Path) -> None:
if isinstance(path, str):
path = Path(path)
path.write_text(
yaml.safe_dump(json.loads(json.dumps(config.model_dump(), default=pydantic_encoder)))
)
def _validate_config(config_path: str) -> GatewayConfig:
if not os.path.exists(config_path):
raise MlflowException.invalid_parameter_value(f"{config_path} does not exist")
try:
return _load_gateway_config(config_path)
except Exception as e:
raise MlflowException.invalid_parameter_value(f"Invalid gateway configuration: {e}") from e
|
GatewayConfig
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
|
{
"start": 825,
"end": 11095
}
|
class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_mixed_requires_grad_per_group(self):
"""
Tests training parity with DDP when mixing frozen and non-frozen
parameters in the same FSDP communication group. This checks that
the reduce-scatters reduce the expected numel and that they are called
via the custom autograd function backward (i.e. that they are not
delayed until the end of backward).
"""
self.run_subtests(
{
"reshard_after_forward": [False, True, 2],
"use_activation_checkpointing": [False, True],
"freeze_after_init": [False, True],
},
self._test_train_mixed_requires_grad_per_group,
)
def _test_train_mixed_requires_grad_per_group(
self,
reshard_after_forward: Union[bool, int],
use_activation_checkpointing: bool,
freeze_after_init: bool,
):
torch.manual_seed(42)
num_mlps, lin_dim = (3, 32)
model = nn.Sequential(
*[MLP(lin_dim, torch.device("cpu")) for _ in range(num_mlps)]
)
# Train biases only (e.g. like BitFit)
if not freeze_after_init:
for param_name, param in model.named_parameters():
if "bias" not in param_name:
param.requires_grad_(False)
ref_model = replicate(
copy.deepcopy(model).to(device_type),
device_ids=[self.rank],
find_unused_parameters=freeze_after_init,
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for mlp in model:
if use_activation_checkpointing:
checkpoint(mlp)
fully_shard(mlp, reshard_after_forward=reshard_after_forward)
fully_shard(model, reshard_after_forward=reshard_after_forward)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
orig_reduce_scatter = dist.reduce_scatter_tensor
if freeze_after_init:
for param_name, param in itertools.chain(
model.named_parameters(), ref_model.named_parameters()
):
if "bias" not in param_name:
param.requires_grad_(False)
for mlp in model:
assert isinstance(mlp, MLP), (
"The reduce-scatter numel check assumes the model consists of "
f"only the same MLP class but got {type(mlp)}"
)
expected_numel = sum(
p._local_tensor.numel()
for n, p in model[0].named_parameters()
if "bias" in n
)
def assert_fn(output: torch.Tensor):
self.assertEqual(output.numel(), expected_numel)
reduce_scatter = functools.partial(
reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn
)
orig_backward = RegisterPostBackwardFunction.backward
backward_count = 0
def backward_with_count(*args, **kwargs):
nonlocal backward_count
backward_count += 1
return orig_backward(*args, **kwargs)
torch.manual_seed(42 + self.rank + 1)
device = device_type
with (
patch_reduce_scatter(reduce_scatter),
patch_register_post_backward_hook_backward(backward_with_count),
):
for iter_idx in range(10):
inp = torch.randn((8, lin_dim), device=device)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
check_sharded_parity(self, ref_model, model)
self.assertEqual(losses[0], losses[1])
# Check that the post-backward hooks ran through the autograd
# backward, not the final callback (except possibly that of the
# first MLP, which does not have an input that requires grad)
self.assertTrue(backward_count >= num_mlps - 1)
@skip_if_lt_x_gpu(2)
def test_train_mixed_requires_grad_across_groups(self):
"""
Tests training parity with DDP when mixing frozen and non-frozen
parameters across different FSDP communication groups, including
possibly unfreezing parameters.
"""
self.run_subtests(
{
"reshard_after_forward": [False, True, 2],
"unfreeze_params": [False, True],
},
self._test_train_mixed_requires_grad_across_groups,
)
def _test_train_mixed_requires_grad_across_groups(
self,
reshard_after_forward: Union[bool, int],
unfreeze_params: bool,
):
torch.manual_seed(42)
num_linears, lin_dim = (6, 32)
modules: list[nn.Module] = []
for _ in range(num_linears):
modules += [nn.Linear(lin_dim, lin_dim), nn.ReLU()]
model = nn.Sequential(*modules)
ref_model = replicate(
copy.deepcopy(model).to(device_type),
device_ids=[self.rank],
find_unused_parameters=True,
)
for module in model.modules():
if isinstance(module, nn.Linear):
fully_shard(module, reshard_after_forward=reshard_after_forward)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
orig_backward = RegisterPostBackwardFunction.backward
backward_count = 0
def _set_requires_grad(seq: nn.Module, requires_grad: bool):
for i in range(num_linears):
# Interleave frozen -> non-frozen -> ... linears
if i % 2 == 0:
for param in seq[i % 2].parameters():
param.requires_grad_(requires_grad)
def backward_with_count(*args, **kwargs):
nonlocal backward_count
backward_count += 1
return orig_backward(*args, **kwargs)
_set_requires_grad(model, False)
_set_requires_grad(ref_model, False)
num_iters, no_grad_iter_idx = (3, 1)
torch.manual_seed(42 + self.rank)
inp = torch.randn((8, lin_dim), device=device_type)
with patch_register_post_backward_hook_backward(backward_with_count):
for iter_idx in range(num_iters):
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
# Unfreeze the parameters on the last step to emulate some
# kinds of fine-tuning
if unfreeze_params and iter_idx == num_iters - 1:
_set_requires_grad(model, True)
if iter_idx == no_grad_iter_idx:
with torch.no_grad():
losses.append(_model(inp).sum())
else:
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
self.assertEqual(losses[0], losses[1])
# Check that the post-backward hooks ran through the autograd
# backward, not the final callback (except possibly that of the
# first linear, which does not have an input that requires grad)
self.assertTrue(backward_count >= num_linears - 1)
@skip_if_lt_x_gpu(2)
def test_multi_forward_mixed_requires_grad(self):
"""
Tests training parity with DDP when having trainable and frozen modules
that participate multiple times in forward.
"""
self.run_subtests(
{"reshard_after_forward": [True, False, 2]},
self._test_multi_forward_mixed_requires_grad,
)
def _test_multi_forward_mixed_requires_grad(
self,
reshard_after_forward: Union[bool, int],
):
class MultiForwardModule(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.layer_0 = nn.Linear(5, 5, device=device)
self.layer_no_grad = nn.Linear(5, 5, device=device)
self.layer_with_grad = nn.Linear(5, 5, device=device)
self.layer_no_grad.requires_grad_(False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.layer_0(x)
for _ in range(3):
x = self.layer_no_grad(F.relu(self.layer_with_grad(x)))
# Make sure that calling the same layer multiple times
# works regardless whether gradient is enabled
with torch.no_grad():
x += F.relu(self.layer_with_grad(x))
return x
torch.manual_seed(42)
model = MultiForwardModule(torch.device("cpu"))
ref_model = replicate(
copy.deepcopy(model).to(device_type), device_ids=[self.rank]
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for module in model.modules():
if isinstance(module, nn.Linear):
fully_shard(module, reshard_after_forward=reshard_after_forward)
fully_shard(model, reshard_after_forward=reshard_after_forward)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
for iter_idx in range(10):
inp = torch.randn((8, 5), device=device_type)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
if __name__ == "__main__":
run_tests()
|
TestFullyShardFrozen
|
python
|
huggingface__transformers
|
tests/models/dac/test_modeling_dac.py
|
{
"start": 1340,
"end": 4292
}
|
class ____:
# Ignore copy
def __init__(
self,
parent,
batch_size=3,
num_channels=1,
is_training=False,
intermediate_size=1024,
encoder_hidden_size=16,
downsampling_ratios=[2, 4, 4],
decoder_hidden_size=16,
n_codebooks=6,
codebook_size=512,
codebook_dim=4,
quantizer_dropout=0.0,
commitment_loss_weight=0.25,
codebook_loss_weight=1.0,
sample_rate=16000,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training
self.intermediate_size = intermediate_size
self.sample_rate = sample_rate
self.encoder_hidden_size = encoder_hidden_size
self.downsampling_ratios = downsampling_ratios
self.decoder_hidden_size = decoder_hidden_size
self.n_codebooks = n_codebooks
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim
self.quantizer_dropout = quantizer_dropout
self.commitment_loss_weight = commitment_loss_weight
self.codebook_loss_weight = codebook_loss_weight
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0)
config = self.get_config()
inputs_dict = {"input_values": input_values}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0)
config = self.get_config()
inputs_dict = {"input_values": input_values}
return config, inputs_dict
# Ignore copy
def get_config(self):
return DacConfig(
encoder_hidden_size=self.encoder_hidden_size,
downsampling_ratios=self.downsampling_ratios,
decoder_hidden_size=self.decoder_hidden_size,
n_codebooks=self.n_codebooks,
codebook_size=self.codebook_size,
codebook_dim=self.codebook_dim,
quantizer_dropout=self.quantizer_dropout,
commitment_loss_weight=self.commitment_loss_weight,
codebook_loss_weight=self.codebook_loss_weight,
)
# Ignore copy
def create_and_check_model_forward(self, config, inputs_dict):
model = DacModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
result = model(input_values)
self.parent.assertEqual(result.audio_values.shape, (self.batch_size, self.intermediate_size))
@require_torch
# Copied from transformers.tests.encodec.test_modeling_encodec.EncodecModelTest with Encodec->Dac
|
DacModelTester
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/frames.py
|
{
"start": 41275,
"end": 46046
}
|
class ____(NonStrictDataModel):
"""
:param cls: Augmentation class (see global definitions)
:type cls: str
:param type: Augmentation type (see global definitions)
:type type: str
:param trans_mat: Transform matrix (list of lists). Required for affine
transforms.
:type trans_mat: Sequence[Sequence[float]]
:param params: Transform parameters, an array ot 3 randomly generated values.
Fixed values are passed in case of affine reflect augmentation.
:type params: Sequence[float]
:param arguments: Arguments dictionary, passed to custom augmentations.
:type arguments: dict
:param strength: Transform strength. Required for pixel transforms.
:type strength: float
"""
_schema = {
"properties": {
"arguments": {
"additionalProperties": True,
"description": "Arguments dictionary, passed to custom augmentations.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class (see global definitions)",
"type": ["string", "null"],
},
"params": {
"description": (
"Transform parameters, an array ot 3 randomly generated values. Fixed values are passed in case of"
" affine reflect augmentation."
),
"items": {"type": "number"},
"type": ["array", "null"],
},
"strength": {
"description": "Transform strength. Required for pixel transforms.",
"type": ["number", "null"],
},
"trans_mat": {
"description": "Transform matrix (list of lists). Required for affine transforms.",
"items": {"items": {"type": "number"}, "type": "array"},
"type": ["array", "null"],
},
"type": {
"description": "Augmentation type (see global definitions)",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
cls=None,
type=None,
trans_mat=None,
params=None,
arguments=None,
strength=None,
**kwargs
):
super(Augmentation, self).__init__(**kwargs)
self.cls = cls
self.type = type
self.trans_mat = trans_mat
self.params = params
self.arguments = arguments
self.strength = strength
@schema_property("cls")
def cls(self):
return self._property_cls
@cls.setter
def cls(self, value):
if value is None:
self._property_cls = None
return
self.assert_isinstance(value, "cls", six.string_types)
self._property_cls = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("trans_mat")
def trans_mat(self):
return self._property_trans_mat
@trans_mat.setter
def trans_mat(self, value):
if value is None:
self._property_trans_mat = None
return
self.assert_isinstance(value, "trans_mat", (list, tuple))
self.assert_isinstance(value, "trans_mat", (list, tuple), is_array=True)
self._property_trans_mat = value
@schema_property("params")
def params(self):
return self._property_params
@params.setter
def params(self, value):
if value is None:
self._property_params = None
return
self.assert_isinstance(value, "params", (list, tuple))
self.assert_isinstance(
value, "params", six.integer_types + (float,), is_array=True
)
self._property_params = value
@schema_property("arguments")
def arguments(self):
return self._property_arguments
@arguments.setter
def arguments(self, value):
if value is None:
self._property_arguments = None
return
self.assert_isinstance(value, "arguments", (dict,))
self._property_arguments = value
@schema_property("strength")
def strength(self):
return self._property_strength
@strength.setter
def strength(self, value):
if value is None:
self._property_strength = None
return
self.assert_isinstance(value, "strength", six.integer_types + (float,))
self._property_strength = value
|
Augmentation
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/preview.py
|
{
"start": 441,
"end": 1682
}
|
class ____:
"""Black's `Preview.no_blank_line_before_class_docstring`"""
def f():
"""Black's `Preview.prefer_splitting_right_hand_side_of_assignments`"""
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc.cccccccc
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc().cccccccc
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc(d).cccccccc
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb] = (
cccccccc.ccccccccccccc(d).cccccccc + e
)
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb] = (
cccccccc.ccccccccccccc.cccccccc + e
)
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb] = (
cccccccc.ccccccccccccc.cccccccc
+ eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee
)
self._cache: dict[
DependencyCacheKey, list[list[DependencyPackage]]
] = collections.defaultdict(list)
self._cached_dependencies_by_level: dict[
int, list[DependencyCacheKey]
] = collections.defaultdict(list)
|
RemoveNewlineBeforeClassDocstring
|
python
|
PrefectHQ__prefect
|
tests/test_tasks.py
|
{
"start": 37661,
"end": 40727
}
|
class ____:
@pytest.mark.parametrize("persist_result", [True, False])
def test_persist_result_set_to_bool(self, persist_result):
@task(persist_result=persist_result)
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(persist_result=persist_result)
assert my_task.persist_result is persist_result
assert new_task.persist_result is persist_result
@pytest.mark.parametrize(
"cache_policy",
[policy for policy in CachePolicy.__subclasses__() if policy != NO_CACHE],
)
def test_setting_cache_policy_sets_persist_result_to_true(self, cache_policy):
@task(cache_policy=cache_policy)
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(cache_policy=cache_policy)
assert my_task.persist_result is True
assert new_task.persist_result is True
def test_setting_cache_key_fn_sets_persist_result_to_true(self):
@task(cache_key_fn=lambda *_: "test-key")
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(cache_key_fn=lambda *_: "test-key")
assert my_task.persist_result is True
assert new_task.persist_result is True
def test_setting_result_storage_sets_persist_result_to_true(self, tmpdir):
block = LocalFileSystem(basepath=str(tmpdir))
block.save("test-name", _sync=True)
@task(result_storage=block)
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(result_storage=block)
assert my_task.persist_result is True
assert new_task.persist_result is True
def test_setting_result_serializer_sets_persist_result_to_true(self):
@task(result_serializer="json")
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(result_serializer="json")
assert my_task.persist_result is True
assert new_task.persist_result is True
def test_setting_result_storage_key_sets_persist_result_to_true(self):
@task(result_storage_key="test-key")
def my_task():
pass
@task
def base():
pass
new_task = base.with_options(result_storage_key="test-key")
assert my_task.persist_result is True
assert new_task.persist_result is True
def test_logs_warning_on_serialization_error(self, caplog):
@task(result_serializer="json")
def my_task():
return lambda: 1
my_task()
record = next(
(
record
for record in caplog.records
if "Encountered an error while serializing result" in record.message
),
None,
)
assert record is not None
assert record.levelname == "WARNING"
|
TestResultPersistence
|
python
|
networkx__networkx
|
networkx/readwrite/json_graph/tests/test_adjacency.py
|
{
"start": 178,
"end": 2456
}
|
class ____:
def test_graph(self):
G = nx.path_graph(4)
H = adjacency_graph(adjacency_data(G))
assert graphs_equal(G, H)
def test_graph_attributes(self):
G = nx.path_graph(4)
G.add_node(1, color="red")
G.add_edge(1, 2, width=7)
G.graph["foo"] = "bar"
G.graph[1] = "one"
H = adjacency_graph(adjacency_data(G))
assert graphs_equal(G, H)
assert H.graph["foo"] == "bar"
assert H.nodes[1]["color"] == "red"
assert H[1][2]["width"] == 7
d = json.dumps(adjacency_data(G))
H = adjacency_graph(json.loads(d))
assert graphs_equal(G, H)
assert H.graph["foo"] == "bar"
assert H.graph[1] == "one"
assert H.nodes[1]["color"] == "red"
assert H[1][2]["width"] == 7
def test_digraph(self):
G = nx.DiGraph()
nx.add_path(G, [1, 2, 3])
H = adjacency_graph(adjacency_data(G))
assert H.is_directed()
assert graphs_equal(G, H)
def test_multidigraph(self):
G = nx.MultiDiGraph()
nx.add_path(G, [1, 2, 3])
H = adjacency_graph(adjacency_data(G))
assert H.is_directed()
assert H.is_multigraph()
assert graphs_equal(G, H)
def test_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, key="first")
G.add_edge(1, 2, key="second", color="blue")
H = adjacency_graph(adjacency_data(G))
assert graphs_equal(G, H)
assert H[1][2]["second"]["color"] == "blue"
def test_input_data_is_not_modified_when_building_graph(self):
G = nx.path_graph(4)
input_data = adjacency_data(G)
orig_data = copy.deepcopy(input_data)
# Ensure input is unmodified by deserialisation
assert graphs_equal(G, adjacency_graph(input_data))
assert input_data == orig_data
def test_adjacency_form_json_serialisable(self):
G = nx.path_graph(4)
H = adjacency_graph(json.loads(json.dumps(adjacency_data(G))))
assert graphs_equal(G, H)
def test_exception(self):
with pytest.raises(nx.NetworkXError):
G = nx.MultiDiGraph()
attrs = {"id": "node", "key": "node"}
adjacency_data(G, attrs)
|
TestAdjacency
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/postgresql/test_types.py
|
{
"start": 156955,
"end": 177818
}
|
class ____(_RangeTests):
def _step_value_up(self, value):
"""given a value, return a step up
this is a value that given the lower end of the sample range,
would be less than the upper value of the range
"""
raise NotImplementedError()
def _step_value_down(self, value):
"""given a value, return a step down
this is a value that given the upper end of the sample range,
would be greater than the lower value of the range
"""
raise NotImplementedError()
def _value_values(self):
"""Return a series of values related to the base range
le = left equal
ll = lower than left
re = right equal
rh = higher than right
il = inside lower
ih = inside higher
"""
spec = self._data_obj()
le, re_ = spec.lower, spec.upper
ll = self._step_value_down(le)
il = self._step_value_up(le)
rh = self._step_value_up(re_)
ih = self._step_value_down(re_)
return {"le": le, "re_": re_, "ll": ll, "il": il, "rh": rh, "ih": ih}
@testing.fixture(
params=[
lambda **kw: Range(empty=True),
lambda **kw: Range(bounds="[)"),
lambda le, **kw: Range(upper=le, bounds="[)"),
lambda le, re_, **kw: Range(lower=le, upper=re_, bounds="[)"),
lambda le, re_, **kw: Range(lower=le, upper=re_, bounds="[)"),
lambda le, re_, **kw: Range(lower=le, upper=re_, bounds="[]"),
lambda le, re_, **kw: Range(lower=le, upper=re_, bounds="(]"),
lambda le, re_, **kw: Range(lower=le, upper=re_, bounds="()"),
lambda ll, le, **kw: Range(lower=ll, upper=le, bounds="[)"),
lambda il, ih, **kw: Range(lower=il, upper=ih, bounds="[)"),
lambda ll, le, **kw: Range(lower=ll, upper=le, bounds="(]"),
lambda ll, rh, **kw: Range(lower=ll, upper=rh, bounds="[)"),
]
)
def contains_range_obj_combinations(self, request):
"""ranges that are used for range contains() contained_by() tests"""
data = self._value_values()
range_ = request.param(**data)
yield range_
@testing.fixture(
params=[
lambda l, r: Range(empty=True),
lambda l, r: Range(bounds="()"),
lambda l, r: Range(upper=r, bounds="(]"),
lambda l, r: Range(lower=l, bounds="[)"),
lambda l, r: Range(lower=l, upper=r, bounds="[)"),
lambda l, r: Range(lower=l, upper=r, bounds="[]"),
lambda l, r: Range(lower=l, upper=r, bounds="(]"),
lambda l, r: Range(lower=l, upper=r, bounds="()"),
]
)
def bounds_obj_combinations(self, request):
"""sample ranges used for value and range contains()/contained_by()
tests"""
obj = self._data_obj()
l, r = obj.lower, obj.upper
template = request.param
value = template(l=l, r=r)
yield value
@testing.fixture(params=["ll", "le", "il", "ih", "re_", "rh"])
def value_combinations(self, request):
"""sample values used for value contains() tests"""
data = self._value_values()
return data[request.param]
def test_basic_py_sanity(self):
values = self._value_values()
range_ = self._data_obj()
is_true(range_.contains(Range(lower=values["il"], upper=values["ih"])))
is_true(
range_.contained_by(Range(lower=values["ll"], upper=values["rh"]))
)
is_true(range_.contains(values["il"]))
is_true(values["il"] in range_)
is_false(
range_.contains(Range(lower=values["ll"], upper=values["ih"]))
)
is_false(range_.contains(values["rh"]))
is_false(values["rh"] in range_)
is_true(range_ == range_)
is_false(range_ != range_)
is_false(range_ == None)
def test_compatibility_accessors(self):
range_ = self._data_obj()
is_true(range_.lower_inc)
is_false(range_.upper_inc)
is_false(Range(lower=range_.lower, bounds="()").lower_inc)
is_true(Range(upper=range_.upper, bounds="(]").upper_inc)
is_false(range_.lower_inf)
is_false(range_.upper_inf)
is_false(Range(empty=True).lower_inf)
is_false(Range(empty=True).upper_inf)
is_true(Range().lower_inf)
is_true(Range().upper_inf)
is_false(range_.isempty)
is_true(Range(empty=True).isempty)
def test_contains_value(
self, connection, bounds_obj_combinations, value_combinations
):
range_ = bounds_obj_combinations
range_typ = self._col_str
strvalue = range_._stringify()
v = value_combinations
RANGE = self._col_type
q = select(
literal_column(f"'{strvalue}'::{range_typ}", RANGE).label("r1"),
cast(range_, RANGE).label("r2"),
)
literal_range, cast_range = connection.execute(q).first()
eq_(literal_range, cast_range)
q = select(
cast(range_, RANGE),
cast(range_, RANGE).contains(v),
)
r, expected = connection.execute(q).first()
eq_(r.contains(v), expected)
eq_(v in r, expected)
_common_ranges_to_test = (
lambda r, e: Range(empty=True),
lambda r, e: Range(None, None, bounds="()"),
lambda r, e: Range(r.lower, None, bounds="[)"),
lambda r, e: Range(None, r.upper, bounds="(]"),
lambda r, e: r,
lambda r, e: Range(r.lower, r.upper, bounds="[]"),
lambda r, e: Range(r.lower, r.upper, bounds="(]"),
lambda r, e: Range(r.lower, r.upper, bounds="()"),
)
@testing.combinations(
*_common_ranges_to_test,
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower + e, r.upper + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.upper - e, bounds="(]"),
lambda r, e: Range(r.lower + e, r.upper - e, bounds="[]"),
lambda r, e: Range(r.lower + e, r.upper - e, bounds="(]"),
lambda r, e: Range(r.lower + e, r.upper, bounds="(]"),
lambda r, e: Range(r.lower + e, r.upper, bounds="[]"),
lambda r, e: Range(r.lower + e, r.upper + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.upper - e, bounds="[]"),
lambda r, e: Range(r.lower - 2 * e, r.lower - e, bounds="(]"),
lambda r, e: Range(r.lower - 4 * e, r.lower, bounds="[)"),
lambda r, e: Range(r.upper + 4 * e, r.upper + 6 * e, bounds="()"),
argnames="r2t",
)
def test_contains_range(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).contains(r2),
cast(r1, RANGE).contained_by(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ} @> '{r2}'::{range_typ}"),
literal_column(f"'{r1}'::{range_typ} <@ '{r2}'::{range_typ}"),
)
row = connection.execute(q).first()
validate_row = connection.execute(validate_q).first()
eq_(row, validate_row)
pg_contains, pg_contained = row
py_contains = r1.contains(r2)
eq_(
py_contains,
pg_contains,
f"{r1}.contains({r2}): got {py_contains},"
f" expected {pg_contains}",
)
r2_in_r1 = r2 in r1
eq_(
r2_in_r1,
pg_contains,
f"{r2} in {r1}: got {r2_in_r1}, expected {pg_contains}",
)
py_contained = r1.contained_by(r2)
eq_(
py_contained,
pg_contained,
f"{r1}.contained_by({r2}): got {py_contained},"
f" expected {pg_contained}",
)
eq_(
r2.contains(r1),
pg_contained,
f"{r2}.contains({r1}: got {r2.contains(r1)},"
f" expected {pg_contained})",
)
r1_in_r2 = r1 in r2
eq_(
r1_in_r2,
pg_contained,
f"{r1} in {r2}: got {r1_in_r2}, expected {pg_contained}",
)
@testing.combinations(
*_common_ranges_to_test,
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower - 2 * e, r.lower - e, bounds="(]"),
lambda r, e: Range(r.upper + e, r.upper + 2 * e, bounds="[)"),
argnames="r2t",
)
def test_overlaps(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).overlaps(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ} && '{r2}'::{range_typ}"),
)
row = connection.execute(q).first()
validate_row = connection.execute(validate_q).first()
eq_(row, validate_row)
pg_res = row[0]
py_res = r1.overlaps(r2)
eq_(
py_res,
pg_res,
f"{r1}.overlaps({r2}): got {py_res}, expected {pg_res}",
)
@testing.combinations(
*_common_ranges_to_test,
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.upper, r.upper + 2 * e, bounds="[]"),
lambda r, e: Range(r.upper, r.upper + 2 * e, bounds="(]"),
lambda r, e: Range(r.lower - 2 * e, r.lower, bounds="[]"),
lambda r, e: Range(r.lower - 2 * e, r.lower, bounds="[)"),
argnames="r2t",
)
def test_strictly_left_or_right_of(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).strictly_left_of(r2),
cast(r1, RANGE).strictly_right_of(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ} << '{r2}'::{range_typ}"),
literal_column(f"'{r1}'::{range_typ} >> '{r2}'::{range_typ}"),
)
row = connection.execute(q).first()
validate_row = connection.execute(validate_q).first()
eq_(row, validate_row)
pg_left, pg_right = row
py_left = r1.strictly_left_of(r2)
eq_(
py_left,
pg_left,
f"{r1}.strictly_left_of({r2}): got {py_left}, expected {pg_left}",
)
py_left = r1 << r2
eq_(
py_left,
pg_left,
f"{r1} << {r2}: got {py_left}, expected {pg_left}",
)
py_right = r1.strictly_right_of(r2)
eq_(
py_right,
pg_right,
f"{r1}.strictly_right_of({r2}): got {py_left},"
f" expected {pg_right}",
)
py_right = r1 >> r2
eq_(
py_right,
pg_right,
f"{r1} >> {r2}: got {py_left}, expected {pg_right}",
)
@testing.combinations(
*_common_ranges_to_test,
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.upper, r.upper + 2 * e, bounds="[]"),
lambda r, e: Range(r.upper, r.upper + 2 * e, bounds="(]"),
lambda r, e: Range(r.lower - 2 * e, r.lower, bounds="[]"),
lambda r, e: Range(r.lower - 2 * e, r.lower, bounds="[)"),
argnames="r2t",
)
def test_not_extend_left_or_right_of(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).not_extend_left_of(r2),
cast(r1, RANGE).not_extend_right_of(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ} &> '{r2}'::{range_typ}"),
literal_column(f"'{r1}'::{range_typ} &< '{r2}'::{range_typ}"),
)
row = connection.execute(q).first()
validate_row = connection.execute(validate_q).first()
eq_(row, validate_row)
pg_left, pg_right = row
py_left = r1.not_extend_left_of(r2)
eq_(
py_left,
pg_left,
f"{r1}.not_extend_left_of({r2}): got {py_left},"
f" expected {pg_left}",
)
py_right = r1.not_extend_right_of(r2)
eq_(
py_right,
pg_right,
f"{r1}.not_extend_right_of({r2}): got {py_right},"
f" expected {pg_right}",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower - e, r.lower + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower - e, bounds="[]"),
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower - e, r.lower + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower - e, bounds="[]"),
lambda r, e: Range(r.lower + e, r.upper - e, bounds="(]"),
lambda r, e: Range(r.lower + e, r.upper - e, bounds="[]"),
lambda r, e: Range(r.lower + e, r.upper, bounds="(]"),
lambda r, e: Range(r.lower + e, r.upper, bounds="[]"),
lambda r, e: Range(r.lower + e, r.upper + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower - e, bounds="[]"),
lambda r, e: Range(r.lower - 2 * e, r.lower - e, bounds="(]"),
lambda r, e: Range(r.lower - 4 * e, r.lower, bounds="[)"),
lambda r, e: Range(r.upper + 4 * e, r.upper + 6 * e, bounds="()"),
argnames="r2t",
)
def test_adjacent(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).adjacent_to(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ} -|- '{r2}'::{range_typ}"),
)
row = connection.execute(q).first()
validate_row = connection.execute(validate_q).first()
eq_(row, validate_row)
pg_res = row[0]
py_res = r1.adjacent_to(r2)
eq_(
py_res,
pg_res,
f"{r1}.adjacent_to({r2}): got {py_res}, expected {pg_res}",
)
@testing.combinations(
*_common_ranges_to_test,
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower + e, bounds="[]"),
lambda r, e: Range(r.upper + 4 * e, r.upper + 6 * e, bounds="()"),
argnames="r2t",
)
def test_union(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).union(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ}+'{r2}'::{range_typ}", RANGE),
)
try:
pg_res = connection.execute(q).scalar()
except DBAPIError:
connection.rollback()
with expect_raises(DBAPIError):
connection.execute(validate_q).scalar()
with expect_raises(ValueError):
r1.union(r2)
else:
validate_union = connection.execute(validate_q).scalar()
eq_(pg_res, validate_union)
py_res = r1.union(r2)
eq_(
py_res,
pg_res,
f"{r1}.union({r2}): got {py_res}, expected {pg_res}",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
lambda r, e: Range(r.lower - e, r.upper - e, bounds="[]"),
lambda r, e: Range(r.lower - e, r.upper + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.upper + e, bounds="[]"),
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
lambda r, e: Range(r.lower, r.upper - e, bounds="(]"),
lambda r, e: Range(r.lower, r.lower + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower, bounds="(]"),
lambda r, e: Range(r.lower - e, r.lower + e, bounds="()"),
lambda r, e: Range(r.lower, r.upper, bounds="[]"),
lambda r, e: Range(r.lower, r.upper, bounds="()"),
argnames="r2t",
)
def test_difference(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).difference(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ}-'{r2}'::{range_typ}", RANGE),
)
try:
pg_res = connection.execute(q).scalar()
except DBAPIError:
connection.rollback()
with expect_raises(DBAPIError):
connection.execute(validate_q).scalar()
with expect_raises(ValueError):
r1.difference(r2)
else:
validate_difference = connection.execute(validate_q).scalar()
eq_(pg_res, validate_difference)
py_res = r1.difference(r2)
eq_(
py_res,
pg_res,
f"{r1}.difference({r2}): got {py_res}, expected {pg_res}",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
lambda r, e: Range(r.lower - e, r.upper - e, bounds="[]"),
lambda r, e: Range(r.lower - e, r.upper + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.upper + e, bounds="[]"),
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
lambda r, e: Range(r.lower, r.upper - e, bounds="(]"),
lambda r, e: Range(r.lower, r.lower + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower, bounds="(]"),
lambda r, e: Range(r.lower - e, r.lower + e, bounds="()"),
lambda r, e: Range(r.lower, r.upper, bounds="[]"),
lambda r, e: Range(r.lower, r.upper, bounds="()"),
argnames="r2t",
)
def test_intersection(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
RANGE = self._col_type
range_typ = self._col_str
q = select(
cast(r1, RANGE).intersection(r2),
)
validate_q = select(
literal_column(f"'{r1}'::{range_typ}*'{r2}'::{range_typ}", RANGE),
)
pg_res = connection.execute(q).scalar()
validate_intersection = connection.execute(validate_q).scalar()
eq_(pg_res, validate_intersection)
py_res = r1.intersection(r2)
eq_(
py_res,
pg_res,
f"{r1}.intersection({r2}): got {py_res}, expected {pg_res}",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
argnames="r1t",
)
@testing.combinations(
*_common_ranges_to_test,
lambda r, e: Range(r.lower, r.lower, bounds="[]"),
lambda r, e: Range(r.lower, r.lower + e, bounds="[)"),
lambda r, e: Range(r.lower - e, r.lower, bounds="(]"),
lambda r, e: Range(r.lower - e, r.lower + e, bounds="()"),
argnames="r2t",
)
def test_equality(self, connection, r1t, r2t):
r1 = r1t(self._data_obj(), self._epsilon)
r2 = r2t(self._data_obj(), self._epsilon)
range_typ = self._col_str
q = select(
literal_column(f"'{r1}'::{range_typ} = '{r2}'::{range_typ}")
)
equal = connection.execute(q).scalar()
eq_(r1 == r2, equal, f"{r1} == {r2}: got {r1 == r2}, expected {equal}")
q = select(
literal_column(f"'{r1}'::{range_typ} <> '{r2}'::{range_typ}")
)
different = connection.execute(q).scalar()
eq_(
r1 != r2,
different,
f"{r1} != {r2}: got {r1 != r2}, expected {different}",
)
def test_bool(self):
is_false(bool(Range(empty=True)))
is_true(bool(Range(1, 2)))
|
_RangeComparisonFixtures
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 300967,
"end": 301714
}
|
class ____:
def test_sf_tail(self):
# Expected value computed with mpmath:
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf(800.0)
# c = mpmath.mpf(2.5)
# s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x)
# - 1/mpmath.sqrt(x))))
# print(s)
# Output:
# 6.593376447038406e-30
s = stats.fatiguelife.sf(800.0, 2.5)
assert_allclose(s, 6.593376447038406e-30, rtol=1e-13)
def test_isf_tail(self):
# See test_sf_tail for the mpmath code.
p = 6.593376447038406e-30
q = stats.fatiguelife.isf(p, 2.5)
assert_allclose(q, 800.0, rtol=1e-13)
|
TestFatigueLife
|
python
|
walkccc__LeetCode
|
solutions/443. String Compression/443.py
|
{
"start": 0,
"end": 387
}
|
class ____:
def compress(self, chars: list[str]) -> int:
ans = 0
i = 0
while i < len(chars):
letter = chars[i]
count = 0
while i < len(chars) and chars[i] == letter:
count += 1
i += 1
chars[ans] = letter
ans += 1
if count > 1:
for c in str(count):
chars[ans] = c
ans += 1
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-deepset/destination_deepset/models.py
|
{
"start": 2403,
"end": 3711
}
|
class ____(BaseModel):
name: str = Field(title="Name", description="File Name")
content: bytes | str = Field(title="Content", description="File Content")
meta: dict[str, Any] = Field(default_factory={}, title="Meta Data", description="File Meta Data")
@property
def meta_as_string(self) -> str:
"""Return metadata as a string."""
return json.dumps(self.meta or {})
@classmethod
def from_record(cls, record: AirbyteRecordMessage) -> DeepsetCloudFile:
data = FileData.model_validate(record.data)
name = Path(util.generate_name(data.document_key, record.stream, namespace=record.namespace))
return cls(
name=f"{name.stem}.md",
content=data.content,
meta={
"airbyte": {
"stream": record.stream,
"emitted_at": record.emitted_at,
**({"namespace": record.namespace} if record.namespace else {}),
**({"file_parse_error": data.file_parse_error} if data.file_parse_error else {}),
},
**({"source_file_extension": name.suffix} if name.suffix else {}),
**data.model_dump(exclude={"content", "file_parse_error"}, exclude_none=True),
},
)
|
DeepsetCloudFile
|
python
|
scrapy__scrapy
|
tests/mockserver/http_resources.py
|
{
"start": 10241,
"end": 10667
}
|
class ____(resource.Resource):
"""Return a response with a Set-Cookie header for each request url parameter"""
def render(self, request):
for cookie_name, cookie_values in request.args.items():
for cookie_value in cookie_values:
cookie = (cookie_name.decode() + "=" + cookie_value.decode()).encode()
request.setHeader(b"Set-Cookie", cookie)
return b""
|
SetCookie
|
python
|
pytorch__pytorch
|
torch/nn/utils/_named_member_accessor.py
|
{
"start": 3796,
"end": 14210
}
|
class ____:
"""
A class that provides a way to access the submodules and parameters/buffers of a module.
It provides caching mechanism to speed up submodule lookups.
This is useful for functional programming to manipulate the module state.
"""
def __init__(self, module: "torch.nn.Module") -> None:
self.module = module
self.memo: dict[str, torch.nn.Module] = {}
# Nested attribute access
def get_submodule(self, name: str) -> "torch.nn.Module":
"""
Return the submodule specified by the given path.
For example, to get the submodule mod.layer1.conv1,
use accessor.get_submodule("layer1.conv1")
Compare to mod.get_submodule("layer1.conv1"), this method will cache the
intermediate submodule access to speed up future lookups.
"""
if not name:
return self.module
if name in self.memo:
return self.memo[name]
else:
prefix, dot, attr = name.rpartition(".")
if dot:
module = self.get_submodule(prefix)
else:
module = self.module
try:
submodule = getattr(module, attr)
except AttributeError as ex:
raise AttributeError(
f"{module._get_name()} has no attribute `{attr}`"
) from ex
if not isinstance(submodule, torch.nn.Module):
raise TypeError(
f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module"
)
self.memo[name] = submodule
return submodule
def swap_submodule(self, path: str, value: "torch.nn.Module") -> "torch.nn.Module":
"""
Swap the submodule specified by the given ``path`` to ``value``.
For example, to swap the attribute mod.layer1.conv1 use
``accessor.swap_submodule("layer1.conv1", conv2)``.
"""
prefix, _, attr = path.rpartition(".")
return swap_submodule(self.get_submodule(prefix), attr, value)
def get_tensor(self, name: str) -> torch.Tensor:
"""
Get the tensor specified by the given path to value.
For example, to get the attribute mod.layer1.conv1.weight,
use accessor.get_tensor('layer1.conv1.weight')
Compare to mod.get_parameter("layer1.conv1.weight"), this method will
cache the intermediate submodule access to speed up future lookups.
"""
prefix, _, attr = name.rpartition(".")
submodule = self.get_submodule(prefix)
try:
tensor = getattr(submodule, attr)
except AttributeError as ex:
raise AttributeError(
f"{submodule._get_name()} has no attribute `{name}`"
) from ex
if not isinstance(tensor, torch.Tensor) and tensor is not None:
raise TypeError(f"{tensor} is not an instance of torch.Tensor")
return tensor # type: ignore[return-value]
def set_tensor(self, name: str, value: torch.Tensor) -> None:
"""
Set the attribute specified by the given path to value.
For example, to set the attribute mod.layer1.conv1.weight,
use accessor.set_tensor("layer1.conv1.weight", value)
"""
prefix, _, attr = name.rpartition(".")
set_tensor(self.get_submodule(prefix), attr, value)
def del_tensor(self, name: str) -> None:
"""
Delete the attribute specified by the given path.
For example, to delete the attribute mod.layer1.conv1.weight,
use accessor.del_tensor("layer1.conv1.weight")
"""
prefix, _, attr = name.rpartition(".")
submodule = self.get_submodule(prefix)
try:
delattr(submodule, attr)
except AttributeError as ex:
raise AttributeError(
f"{submodule._get_name()} has no attribute `{name}`"
) from ex
def swap_tensor(
self, name: str, value: torch.Tensor, allow_missing: bool = False
) -> torch.Tensor:
"""
Swap the attribute specified by the given path to value.
For example, to swap the attribute mod.layer1.conv1.weight,
use accessor.swap_tensor("layer1.conv1.weight", value)
"""
prefix, _, attr = name.rpartition(".")
return swap_tensor(
self.get_submodule(prefix), attr, value, allow_missing=allow_missing
)
# Batched operations
def get_tensors(self, names: Iterable[str]) -> list[torch.Tensor]:
"""
Get the tensors specified by the given paths.
For example, to get the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.get_tensors(["layer1.conv1.weight",
"layer1.conv1.bias"])
"""
return [self.get_tensor(name) for name in names]
def set_tensors(self, names: Iterable[str], values: Iterable[torch.Tensor]) -> None:
"""
Set the attributes specified by the given paths to values.
For example, to set the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.set_tensors(["layer1.conv1.weight",
"layer1.conv1.bias"], [weight, bias])
"""
if not isinstance(names, (list, tuple)):
names = list(names)
if not isinstance(values, (list, tuple)):
values = list(values)
assert len(names) == len(values), "names and values must have the same length"
for name, value in zip(names, values, strict=True):
self.set_tensor(name, value)
def set_tensors_dict(self, named_tensors: dict[str, torch.Tensor]) -> None:
"""
Set the attributes specified by the given paths to values.
For example, to set the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.set_tensors_dict({
"layer1.conv1.weight": weight,
"layer1.conv1.bias": bias,
})
"""
for name, value in named_tensors.items():
self.set_tensor(name, value)
def del_tensors(self, names: Iterable[str]) -> None:
"""
Delete the attributes specified by the given paths.
For example, to delete the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.del_tensors(["layer1.conv1.weight",
"layer1.conv1.bias"])
"""
for name in names:
self.del_tensor(name)
def swap_tensors(
self,
names: Iterable[str],
values: Iterable[torch.Tensor],
allow_missing: bool = False,
) -> list[torch.Tensor]:
"""
Swap the attributes specified by the given paths to values.
For example, to swap the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.swap_tensors(["layer1.conv1.weight",
"layer1.conv1.bias"], [weight, bias])
"""
if not isinstance(names, (list, tuple)):
names = list(names)
if not isinstance(values, (list, tuple)):
values = list(values)
assert len(names) == len(values), "names and values must have the same length"
return [
self.swap_tensor(name, value, allow_missing=allow_missing)
for name, value in zip(names, values, strict=True)
]
def swap_tensors_dict(
self, named_tensors: dict[str, torch.Tensor], allow_missing: bool = False
) -> tuple[dict[str, torch.Tensor], list[str]]:
"""
Swap the attributes specified by the given paths to values.
For example, to swap the attributes mod.layer1.conv1.weight and
mod.layer1.conv1.bias, use accessor.swap_tensors_dict({
"layer1.conv1.weight": weight,
"layer1.conv1.bias": bias,
})
"""
orig_named_tensors = {}
missing_keys = []
try:
for name, tensor in named_tensors.items():
orig_tensor = self.swap_tensor(name, tensor, allow_missing=True)
if orig_tensor is _MISSING:
missing_keys.append(name)
orig_named_tensors[name] = orig_tensor
except Exception:
# Swap back if any exception occurs
for name, orig_tensor in orig_named_tensors.items():
self.swap_tensor(name, orig_tensor, allow_missing=True)
raise
if missing_keys and not allow_missing:
# Swap back if any key is missing when allow_missing is False
for name, orig_tensor in orig_named_tensors.items():
self.swap_tensor(name, orig_tensor, allow_missing=True)
raise RuntimeError(f"Missing key(s): {', '.join(map(repr, missing_keys))}.")
return orig_named_tensors, missing_keys
def check_keys(self, keys: Iterable[str]) -> tuple[list[str], list[str]]:
"""Check that the given keys are valid."""
keys = set(keys)
valid_keys = {name for name, _ in self.named_tensors(remove_duplicate=False)}
missing_keys = valid_keys - keys
unexpected_keys = keys - valid_keys
return sorted(missing_keys), sorted(unexpected_keys)
# Shortcut methods
def named_parameters(
self,
remove_duplicate: bool = True,
) -> Iterable[tuple[str, torch.Tensor]]:
"""Iterate over all the parameters in the module."""
yield from self.module.named_parameters(remove_duplicate=remove_duplicate)
def named_buffers(
self,
remove_duplicate: bool = True,
) -> Iterable[tuple[str, torch.Tensor]]:
"""Iterate over all the buffers in the module."""
yield from self.module.named_buffers(remove_duplicate=remove_duplicate)
def named_tensors(
self,
remove_duplicate: bool = True,
) -> Iterable[tuple[str, torch.Tensor]]:
"""Iterate over all the tensors in the module."""
yield from self.module.named_parameters(remove_duplicate=remove_duplicate)
yield from self.module.named_buffers(remove_duplicate=remove_duplicate)
def named_modules(
self,
remove_duplicate: bool = True,
) -> Iterable[tuple[str, "torch.nn.Module"]]:
"""Iterate over all the modules in the module."""
yield from self.module.named_modules(remove_duplicate=remove_duplicate)
|
NamedMemberAccessor
|
python
|
lazyprogrammer__machine_learning_examples
|
ab_testing/server_starter.py
|
{
"start": 605,
"end": 1306
}
|
class ____:
def __init__(self, name):
self.name = name
def sample(self):
# TODO
return 1
# TODO - what else does the Bandit need to do?
# initialize bandits
banditA = Bandit('A')
banditB = Bandit('B')
@app.route('/get_ad')
def get_ad():
# TODO
return jsonify({'advertisement_id': 'A'})
@app.route('/click_ad', methods=['POST'])
def click_ad():
result = 'OK'
if request.form['advertisement_id'] == 'A':
# TODO
pass
elif request.form['advertisement_id'] == 'B':
# TODO
pass
else:
result = 'Invalid Input.'
# nothing to return really
return jsonify({'result': result})
if __name__ == '__main__':
app.run(host='127.0.0.1', port='8888')
|
Bandit
|
python
|
weaviate__weaviate-python-client
|
weaviate/client.py
|
{
"start": 957,
"end": 4689
}
|
class ____(_WeaviateClientExecutor[ConnectionAsync]):
"""The v4 Python-native Weaviate Client class that encapsulates Weaviate functionalities in one object.
WARNING: This client is only compatible with Weaviate v1.23.6 and higher!
A Client instance creates all the needed objects to interact with Weaviate, and connects all of
them to the same Weaviate instance. See below the Attributes of the Client instance. For the
per attribute functionality see that attribute's documentation.
Attributes:
backup (_BackupAsync): Backup object instance connected to the same Weaviate instance as the Client.
This namespace contains all the functionality to upload data in batches to Weaviate for all collections and tenants.
cluster (_ClusterAsync): Cluster object instance connected to the same Weaviate instance as the Client.
This namespace contains all functionality to inspect the connected Weaviate cluster.
collections (_CollectionsAsync): Collections object instance connected to the same Weaviate instance as the Client.
This namespace contains all the functionality to manage Weaviate data collections. It is your main entry point for all
collection-related functionality. Use it to retrieve collection objects using `client.collections.get("MyCollection")`
or to create new collections using `client.collections.create("MyCollection", ...)`.
debug (_DebugAsync): Debug object instance connected to the same Weaviate instance as the Client.
This namespace contains functionality used to debug Weaviate clusters. As such, it is deemed experimental and is subject to change.
We can make no guarantees about the stability of this namespace nor the potential for future breaking changes. Use at your own risk.
roles (_RolesAsync): Roles object instance connected to the same Weaviate instance as the Client.
This namespace contains all functionality to manage Weaviate's RBAC functionality.
users (_UsersAsync): Users object instance connected to the same Weaviate instance as the Client.
This namespace contains all functionality to manage Weaviate users.
"""
def __init__(
self,
connection_params: Optional[ConnectionParams] = None,
embedded_options: Optional[EmbeddedOptions] = None,
auth_client_secret: Optional[AuthCredentials] = None,
additional_headers: Optional[dict] = None,
additional_config: Optional[AdditionalConfig] = None,
skip_init_checks: bool = False,
) -> None:
self._connection_type = ConnectionAsync
super().__init__(
connection_params=connection_params,
embedded_options=embedded_options,
auth_client_secret=auth_client_secret,
additional_headers=additional_headers,
additional_config=additional_config,
skip_init_checks=skip_init_checks,
)
self.alias = _AliasAsync(self._connection)
self.backup = _BackupAsync(self._connection)
self.cluster = _ClusterAsync(self._connection)
self.collections = _CollectionsAsync(self._connection)
self.debug = _DebugAsync(self._connection)
self.groups = _GroupsAsync(self._connection)
self.roles = _RolesAsync(self._connection)
self.users = _UsersAsync(self._connection)
async def __aenter__(self) -> "WeaviateAsyncClient":
await executor.aresult(self.connect())
return self
async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
await executor.aresult(self.close())
@executor.wrap("sync")
|
WeaviateAsyncClient
|
python
|
django-mptt__django-mptt
|
tests/myapp/models.py
|
{
"start": 298,
"end": 378
}
|
class ____(QuerySet):
def custom_method(self):
pass
|
CustomTreeQueryset
|
python
|
doocs__leetcode
|
solution/1700-1799/1718.Construct the Lexicographically Largest Valid Sequence/Solution.py
|
{
"start": 0,
"end": 846
}
|
class ____:
def constructDistancedSequence(self, n: int) -> List[int]:
def dfs(u):
if u == n * 2:
return True
if path[u]:
return dfs(u + 1)
for i in range(n, 1, -1):
if cnt[i] and u + i < n * 2 and path[u + i] == 0:
cnt[i] = 0
path[u] = path[u + i] = i
if dfs(u + 1):
return True
path[u] = path[u + i] = 0
cnt[i] = 2
if cnt[1]:
cnt[1], path[u] = 0, 1
if dfs(u + 1):
return True
path[u], cnt[1] = 0, 1
return False
path = [0] * (n * 2)
cnt = [2] * (n * 2)
cnt[1] = 1
dfs(1)
return path[1:]
|
Solution
|
python
|
altair-viz__altair
|
tools/generate_schema_wrapper.py
|
{
"start": 15323,
"end": 15714
}
|
class ____(SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
@with_property_setters
class {classname}(ValueChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
_encoding_name = "{encodingname}"
{method_code}
{init_code}
'''
)
haspropsetters = True
|
ValueSchemaGenerator
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/openid_connect/provider.py
|
{
"start": 894,
"end": 1047
}
|
class ____(ProviderAccount):
def get_user_data(self) -> Optional[Dict]:
return _pick_data(self.account.extra_data)
|
OpenIDConnectProviderAccount
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/newType1.py
|
{
"start": 1277,
"end": 1982
}
|
class ____(TypedDict):
x: int
# This should generate an error because type cannot be a TypedDict.
NewTypeBad7 = NewType("NewTypeBad7", TD1)
NewTypeGood8 = NewType("NewTypeGood8", MyString)
# This should generate an error because the name doesn't match.
NewTypeBad9 = NewType("NewTypeBad9Not", int)
def func2(x: MyString):
# This should generate two errors because isinstance can't be used
# with a NewType and it violates the isinstance siganture.
if isinstance(x, MyString):
pass
# This should generate two errors because issubclass can't be used
# with a NewType and it violates the issubclass signature.
if issubclass(type(x), (MyString, int)):
pass
|
TD1
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/static_methods.py
|
{
"start": 228,
"end": 415
}
|
class ____:
@staticmethod
def sink(oops):
_test_sink(oops)
def test(source):
return StaticClass.sink(source)
def run_test(source):
test(_test_source())
|
StaticClass
|
python
|
aimacode__aima-python
|
learning4e.py
|
{
"start": 29412,
"end": 30538
}
|
class ____:
"""Given a list of learning algorithms, have them vote."""
def __init__(self, learners):
self.learners = learners
def train(self, dataset):
self.predictors = [learner(dataset) for learner in self.learners]
def predict(self, example):
return mode(predictor.predict(example) for predictor in self.predictors)
def ada_boost(dataset, L, K):
"""[Figure 18.34]"""
examples, target = dataset.examples, dataset.target
n = len(examples)
eps = 1 / (2 * n)
w = [1 / n] * n
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w) if example[target] != h_k.predict(example[:-1]))
# avoid divide-by-0 from either 0% or 100% error rates
error = np.clip(error, eps, 1 - eps)
for j, example in enumerate(examples):
if example[target] == h_k.predict(example[:-1]):
w[j] *= error / (1 - error)
w = normalize(w)
z.append(np.log((1 - error) / error))
return weighted_majority(h, z)
|
EnsembleLearner
|
python
|
google__pytype
|
pytype/test_data/pytree.py
|
{
"start": 1295,
"end": 8429
}
|
class ____:
"""Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
Each subclass of Base must provide a __str__ implementation that returns
exactly the input that was used to create the item in the tree.
Each subclass of Base must provide a value attribute, and the ranges of
values must be distinct for each subclass. This isn't a strict requirement,
but it's convenient so that instead of writing
if isinstance(node, Leaf) and node.type == TOKEN.name:
you can just write
if node.type == TOKEN.name:
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""Compare two nodes for equality.
This calls the method _eq().
"""
if self is other:
return True
# We assume that it doesn't make sense to compare a class with a
# subclass ... if that changes, then the following test needs to be
# changed to something that uses isssubclass.
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other) # Implemented by subclass
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""Post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""Pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn(
"set_prefix() is deprecated; use the prefix property",
DeprecationWarning,
stacklevel=2,
)
self.prefix = prefix
def get_prefix(self):
"""Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn(
"get_prefix() is deprecated; use the prefix property",
DeprecationWarning,
stacklevel=2,
)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_line(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.line
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""Remove the node from the tree.
Returns the position of the node in its parent's children before it was
removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
@property
def prev_sibling(self):
"""The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def leaves(self):
for child in self.children:
yield from child.leaves()
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""Return the string immediately following the invocant node.
This is effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
@property
def type_repr(self):
"""Get the type as a human-readable string."""
return type_repr(self.type)
def descend_to(self, indexes):
"""Takes a sequence of integers and descends via children.
For example,
descend_to([]) returns self;
descend_to([0]) returns self.children[0];
descend_to([2,5,3]) returns self.children[2].children[5],children[3].
In effect, this gives each node a unique number, which is the list of
child # that is needed to get to it.
"""
node = self
for i in indexes:
node = node.children[i]
return node
def label_nodes(self, indexes=None):
"""Create 'label' attritbute for each Node/Leaf.
Args: indexes is used internally to keep track of the path to here.
"""
indexes = indexes or []
self.label = indexes
try:
for i, ch in enumerate(self.children):
ch.label_nodes(indexes + [i])
except AttributeError:
pass # Leaf has no children
|
Base
|
python
|
pandas-dev__pandas
|
pandas/tests/series/indexing/test_setitem.py
|
{
"start": 42844,
"end": 43216
}
|
class ____(CoercionTest):
@pytest.fixture
def obj(self):
return Series(["a", "b", "c", "d"], dtype=StringDtype(na_value=np.nan))
@pytest.mark.parametrize(
"val,exp_dtype,raises",
[
(1, np.complex128, False),
(1.1, np.complex128, False),
(1 + 1j, np.complex128, False),
(True, object, True),
],
)
|
TestCoercionString
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/prefect_aws/observers/ecs.py
|
{
"start": 4628,
"end": 8664
}
|
class ____:
def __init__(self, queue_name: str, queue_region: str | None = None):
self.queue_name = queue_name
self.queue_region = queue_region
async def stream_messages(
self,
) -> AsyncGenerator["MessageTypeDef", None]:
session = aiobotocore.session.get_session()
async with session.create_client(
"sqs", region_name=self.queue_region
) as sqs_client:
try:
queue_url = (await sqs_client.get_queue_url(QueueName=self.queue_name))[
"QueueUrl"
]
except ClientError as e:
if (
e.response.get("Error", {}).get("Code")
== "AWS.SimpleQueueService.NonExistentQueue"
):
logger.warning(
(
"SQS queue '%s' does not exist in region '%s'. "
"This worker will continue to submit ECS tasks, but event replication "
"and crash detection will not work. To enable ECS event replication and "
"crash detection, deploy an SQS queue using "
"`prefect-aws ecs-worker deploy-events` and configure the "
"PREFECT_INTEGRATIONS_AWS_ECS_OBSERVER_SQS_QUEUE_NAME environment "
"variable on your worker to point to the deployed queue."
),
self.queue_name,
self.queue_region or "default",
)
return
raise
track_record: deque[bool] = deque(
[True] * SQS_CONSECUTIVE_FAILURES, maxlen=SQS_CONSECUTIVE_FAILURES
)
failures: deque[tuple[Exception, TracebackType | None]] = deque(
maxlen=SQS_MEMORY
)
backoff_count = 0
while True:
try:
messages = await sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=10,
WaitTimeSeconds=20,
)
for message in messages.get("Messages", []):
if not (receipt_handle := message.get("ReceiptHandle")):
continue
yield message
await sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
)
backoff_count = 0
except Exception as e:
track_record.append(False)
failures.append((e, e.__traceback__))
logger.debug("Failed to receive messages from SQS", exc_info=e)
if not any(track_record):
backoff_count += 1
if backoff_count > SQS_MAX_BACKOFF_ATTEMPTS:
logger.error(
"SQS polling exceeded maximum backoff attempts (%s). "
"Last %s errors: %s",
SQS_MAX_BACKOFF_ATTEMPTS,
len(failures),
[str(e) for e, _ in failures],
)
raise RuntimeError(
f"SQS polling failed after {SQS_MAX_BACKOFF_ATTEMPTS} backoff attempts"
)
track_record.extend([True] * SQS_CONSECUTIVE_FAILURES)
failures.clear()
backoff_seconds = SQS_BACKOFF * 2**backoff_count
logger.debug(
"Backing off due to consecutive errors, using increased interval of %s seconds.",
backoff_seconds,
)
await asyncio.sleep(backoff_seconds)
|
SqsSubscriber
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/prefect_dbt/cloud/credentials.py
|
{
"start": 330,
"end": 6315
}
|
class ____(CredentialsBlock):
"""
Credentials block for credential use across dbt Cloud tasks and flows.
Attributes:
api_key (SecretStr): API key to authenticate with the dbt Cloud
administrative API. Refer to the [Authentication docs](
https://docs.getdbt.com/dbt-cloud/api-v2#section/Authentication)
for retrieving the API key.
account_id (int): ID of dbt Cloud account with which to interact.
domain (Optional[str]): Domain at which the dbt Cloud API is hosted.
Examples:
Load stored dbt Cloud credentials:
```python
from prefect_dbt.cloud import DbtCloudCredentials
dbt_cloud_credentials = DbtCloudCredentials.load("BLOCK_NAME")
```
Use DbtCloudCredentials instance to trigger a job run:
```python
from prefect_dbt.cloud import DbtCloudCredentials
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
async with dbt_cloud_credentials.get_administrative_client() as client:
client.trigger_job_run(job_id=1)
```
Load saved dbt Cloud credentials within a flow:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run
@flow
def trigger_dbt_cloud_job_run_flow():
credentials = DbtCloudCredentials.load("my-dbt-credentials")
trigger_dbt_cloud_job_run(dbt_cloud_credentials=credentials, job_id=1)
trigger_dbt_cloud_job_run_flow()
```
"""
_block_type_name = "dbt Cloud Credentials"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
api_key: SecretStr = Field(
default=...,
title="API Key",
description="A dbt Cloud API key to use for authentication.",
)
account_id: int = Field(
default=..., title="Account ID", description="The ID of your dbt Cloud account."
)
domain: str = Field(
default="cloud.getdbt.com",
description="The base domain of your dbt Cloud instance.",
)
def get_administrative_client(self) -> DbtCloudAdministrativeClient:
"""
Returns a newly instantiated client for working with the dbt Cloud
administrative API.
Returns:
An authenticated dbt Cloud administrative API client.
"""
return DbtCloudAdministrativeClient(
api_key=self.api_key.get_secret_value(),
account_id=self.account_id,
domain=self.domain,
)
def get_metadata_client(self) -> DbtCloudMetadataClient:
"""
Returns a newly instantiated client for working with the dbt Cloud
metadata API.
Example:
Sending queries via the returned metadata client:
```python
from prefect_dbt import DbtCloudCredentials
credentials_block = DbtCloudCredentials.load("test-account")
metadata_client = credentials_block.get_metadata_client()
query = \"\"\"
{
metrics(jobId: 123) {
uniqueId
name
packageName
tags
label
runId
description
type
sql
timestamp
timeGrains
dimensions
meta
resourceType
filters {
field
operator
value
}
model {
name
}
}
}
\"\"\"
metadata_client.query(query)
# Result:
# {
# "data": {
# "metrics": [
# {
# "uniqueId": "metric.tpch.total_revenue",
# "name": "total_revenue",
# "packageName": "tpch",
# "tags": [],
# "label": "Total Revenue ($)",
# "runId": 108952046,
# "description": "",
# "type": "sum",
# "sql": "net_item_sales_amount",
# "timestamp": "order_date",
# "timeGrains": ["day", "week", "month"],
# "dimensions": ["status_code", "priority_code"],
# "meta": {},
# "resourceType": "metric",
# "filters": [],
# "model": { "name": "fct_orders" }
# }
# ]
# }
# }
```
Returns:
An authenticated dbt Cloud metadata API client.
"""
return DbtCloudMetadataClient(
api_key=self.api_key.get_secret_value(),
domain=f"metadata.{self.domain}",
)
def get_client(
self, client_type: Literal["administrative", "metadata"]
) -> Union[DbtCloudAdministrativeClient, DbtCloudMetadataClient]:
"""
Returns a newly instantiated client for working with the dbt Cloud API.
Args:
client_type: Type of client to return. Accepts either 'administrative'
or 'metadata'.
Returns:
The authenticated client of the requested type.
"""
get_client_method = getattr(self, f"get_{client_type}_client", None)
if get_client_method is None:
raise ValueError(f"'{client_type}' is not a supported client type.")
return get_client_method()
|
DbtCloudCredentials
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 87817,
"end": 88258
}
|
class ____(sgqlc.types.Enum):
"""Possible directions in which to order a list of repository
migrations when provided an `orderBy` argument.
Enumeration Choices:
* `ASC`: Specifies an ascending order for a given `orderBy`
argument.
* `DESC`: Specifies a descending order for a given `orderBy`
argument.
"""
__schema__ = github_schema
__choices__ = ("ASC", "DESC")
|
RepositoryMigrationOrderDirection
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/descriptor.py
|
{
"start": 36600,
"end": 39594
}
|
class ____(_NestedDescriptorBase):
"""Descriptor for a service.
Attributes:
name (str): Name of the service.
full_name (str): Full name of the service, including package name.
index (int): 0-indexed index giving the order that this services definition
appears within the .proto file.
methods (list[MethodDescriptor]): List of methods provided by this service.
methods_by_name (dict(str, MethodDescriptor)): Same
:class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but
indexed by "name" attribute in each :class:`MethodDescriptor`.
options (descriptor_pb2.ServiceOptions): Service options message or None to
use default service options.
file (FileDescriptor): Reference to file info.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.ServiceDescriptor
def __new__(
cls,
name=None,
full_name=None,
index=None,
methods=None,
options=None,
serialized_options=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
create_key=None,
):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindServiceByName(full_name)
def __init__(
self,
name,
full_name,
index,
methods,
options=None,
serialized_options=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
create_key=None,
):
if create_key is not _internal_create_key:
_Deprecated('create function ServiceDescriptor()')
super(ServiceDescriptor, self).__init__(
options,
'ServiceOptions',
name,
full_name,
file,
None,
serialized_start=serialized_start,
serialized_end=serialized_end,
serialized_options=serialized_options,
)
self.index = index
self.methods = methods
self.methods_by_name = dict((m.name, m) for m in methods)
# Set the containing service for each method in this service.
for method in self.methods:
method.file = self.file
method.containing_service = self
@property
def _parent(self):
return self.file
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor.
Args:
name (str): Name of the method.
Returns:
MethodDescriptor: The descriptor for the requested method.
Raises:
KeyError: if the method cannot be found in the service.
"""
return self.methods_by_name[name]
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
|
ServiceDescriptor
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 3824,
"end": 3989
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "RunSuccessEvent"
|
GrapheneRunSuccessEvent
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/models.py
|
{
"start": 394,
"end": 1554
}
|
class ____(TimeStampedModel):
"""Additional information about a User."""
user = AutoOneToOneField(
User,
verbose_name=_("User"),
related_name="profile",
on_delete=models.CASCADE,
)
# Shown on the users profile
homepage = models.CharField(_("Homepage"), max_length=100, blank=True)
# User configuration options
allow_ads = models.BooleanField(
_("See paid advertising"),
help_text=_("If unchecked, you will still see community ads."),
default=True,
)
mailing_list = models.BooleanField(
default=False,
help_text=_("Subscribe to our mailing list, and get helpful onboarding suggestions."),
)
# Internal tracking
whitelisted = models.BooleanField(_("Whitelisted"), default=False)
banned = models.BooleanField(_("Banned"), default=False)
# Model history
history = ExtraHistoricalRecords()
def get_absolute_url(self):
return reverse(
"profiles_profile_detail",
kwargs={"username": self.user.username},
)
register(User, records_class=ExtraHistoricalRecords, app=__package__)
|
UserProfile
|
python
|
doocs__leetcode
|
solution/1100-1199/1150.Check If a Number Is Majority Element in a Sorted Array/Solution.py
|
{
"start": 0,
"end": 216
}
|
class ____:
def isMajorityElement(self, nums: List[int], target: int) -> bool:
left = bisect_left(nums, target)
right = bisect_right(nums, target)
return right - left > len(nums) // 2
|
Solution
|
python
|
django__django
|
django/urls/resolvers.py
|
{
"start": 4130,
"end": 5199
}
|
class ____:
def __get__(self, instance, cls=None):
"""
Return a compiled regular expression based on the active language.
"""
if instance is None:
return self
# As a performance optimization, if the given regex string is a regular
# string (not a lazily-translated string proxy), compile it once and
# avoid per-language compilation.
pattern = instance._regex
if isinstance(pattern, str):
instance.__dict__["regex"] = self._compile(pattern)
return instance.__dict__["regex"]
language_code = get_language()
if language_code not in instance._regex_dict:
instance._regex_dict[language_code] = self._compile(str(pattern))
return instance._regex_dict[language_code]
def _compile(self, regex):
try:
return re.compile(regex)
except re.error as e:
raise ImproperlyConfigured(
f'"{regex}" is not a valid regular expression: {e}'
) from e
|
LocaleRegexDescriptor
|
python
|
huggingface__transformers
|
src/transformers/models/bert_generation/modeling_bert_generation.py
|
{
"start": 25988,
"end": 26468
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
logits = self.decoder(hidden_states)
return logits
@auto_docstring(
custom_intro="""
BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
|
BertGenerationOnlyLMHead
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/web_search_result_block_param.py
|
{
"start": 253,
"end": 476
}
|
class ____(TypedDict, total=False):
encrypted_content: Required[str]
title: Required[str]
type: Required[Literal["web_search_result"]]
url: Required[str]
page_age: Optional[str]
|
WebSearchResultBlockParam
|
python
|
joke2k__faker
|
faker/providers/phone_number/el_GR/__init__.py
|
{
"start": 49,
"end": 523
}
|
class ____(PhoneNumberProvider):
formats = (
"69########",
"69## ######",
"69## ### ###",
"210#######",
"210 #######",
"210 ### ####",
"2##0######",
"2##0 ######",
"2##0 ### ###",
"2###0#####",
"2###0 ## ###",
"(+30) 69## ######",
"+30 69## ######",
"+3069########",
"(+30) 2### ######",
"+30 2### ######",
"+302#########",
)
|
Provider
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta/modular_xlm_roberta.py
|
{
"start": 1652,
"end": 1830
}
|
class ____(RobertaModel):
pass
@auto_docstring(
custom_intro="""
XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
|
XLMRobertaModel
|
python
|
PyCQA__pydocstyle
|
src/pydocstyle/parser.py
|
{
"start": 599,
"end": 976
}
|
class ____(ParseError):
def __init__(self, token, expected_kind):
self.token = token
self.expected_kind = expected_kind
def __str__(self):
return "Unexpected token {}, expected {}".format(
self.token, self.expected_kind
)
def humanize(string):
return re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
|
UnexpectedTokenError
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/single_string_slots.py
|
{
"start": 390,
"end": 547
}
|
class ____:
__slots__: tuple[str, ...] = ("bar",)
def __init__(self, bar):
self.bar = bar
# This is a type error, out of scope for the rule
|
Foo
|
python
|
MongoEngine__mongoengine
|
tests/fixtures.py
|
{
"start": 1465,
"end": 1505
}
|
class ____:
name = StringField()
|
Mixin
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/fetch_linked_artifacts.py
|
{
"start": 284,
"end": 378
}
|
class ____(GQLResult):
artifact: Optional[FetchLinkedArtifactsArtifact]
|
FetchLinkedArtifacts
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 32311,
"end": 32434
}
|
class ____(NamedTuple):
primitive: Primitive
inputs: list[Atom]
params: dict[str, Any]
out_binders: list[Var]
|
JaxprEqn
|
python
|
tox-dev__tox
|
src/tox/tox_env/python/virtual_env/package/pyproject.py
|
{
"start": 17281,
"end": 17506
}
|
class ____(Pep517VenvPackager, VirtualEnv):
"""local file system python virtual environment via the virtualenv package."""
@staticmethod
def id() -> str:
return "virtualenv-pep-517"
|
Pep517VirtualEnvPackager
|
python
|
realpython__materials
|
python-isinstance/balls_v2.py
|
{
"start": 208,
"end": 481
}
|
class ____(Ball):
def __init__(self, color, number):
super().__init__(color, shape="sphere")
self.number = number
def get_state(self):
print(
f"Color = {self.color}, Number = {self.number}, Shape = {self.shape}"
)
|
PoolBall
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/colors.py
|
{
"start": 97231,
"end": 108376
}
|
class ____(Normalize):
def __init__(self, vcenter=0, halfrange=None, clip=False):
"""
Normalize symmetrical data around a center (0 by default).
Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
around the center.
Useful when mapping symmetrical data around a conceptual center
e.g., data that range from -2 to 4, with 0 as the midpoint, and
with equal rates of change around that midpoint.
Parameters
----------
vcenter : float, default: 0
The data value that defines ``0.5`` in the normalization.
halfrange : float, optional
The range of data values that defines a range of ``0.5`` in the
normalization, so that *vcenter* - *halfrange* is ``0.0`` and
*vcenter* + *halfrange* is ``1.0`` in the normalization.
Defaults to the largest absolute difference to *vcenter* for
the values in the dataset.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values outside the range ``[vmin, vmax]`` are
also transformed, resulting in values outside ``[0, 1]``. This
behavior is usually desirable, as colormaps can mark these *under*
and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values
above *vmax* are mapped to 1. Such values become indistinguishable
from regular boundary values, which may cause misinterpretation of
the data.
Examples
--------
This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
(assuming equal rates of change above and below 0.0):
>>> import matplotlib.colors as mcolors
>>> norm = mcolors.CenteredNorm(halfrange=4.0)
>>> data = [-2., 0., 4.]
>>> norm(data)
array([0.25, 0.5 , 1. ])
"""
super().__init__(vmin=None, vmax=None, clip=clip)
self._vcenter = vcenter
# calling the halfrange setter to set vmin and vmax
self.halfrange = halfrange
def autoscale(self, A):
"""
Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
"""
A = np.asanyarray(A)
self.halfrange = max(self._vcenter-A.min(),
A.max()-self._vcenter)
def autoscale_None(self, A):
"""Set *vmin* and *vmax*."""
A = np.asanyarray(A)
if self.halfrange is None and A.size:
self.autoscale(A)
@property
def vmin(self):
return self._vmin
@vmin.setter
def vmin(self, value):
value = _sanitize_extrema(value)
if value != self._vmin:
self._vmin = value
self._vmax = 2*self.vcenter - value
self._changed()
@property
def vmax(self):
return self._vmax
@vmax.setter
def vmax(self, value):
value = _sanitize_extrema(value)
if value != self._vmax:
self._vmax = value
self._vmin = 2*self.vcenter - value
self._changed()
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, vcenter):
if vcenter != self._vcenter:
self._vcenter = vcenter
# Trigger an update of the vmin/vmax values through the setter
self.halfrange = self.halfrange
self._changed()
@property
def halfrange(self):
if self.vmin is None or self.vmax is None:
return None
return (self.vmax - self.vmin) / 2
@halfrange.setter
def halfrange(self, halfrange):
if halfrange is None:
self.vmin = None
self.vmax = None
else:
self.vmin = self.vcenter - abs(halfrange)
self.vmax = self.vcenter + abs(halfrange)
def make_norm_from_scale(scale_cls, base_norm_cls=None, *, init=None):
"""
Decorator for building a `.Normalize` subclass from a `~.scale.ScaleBase`
subclass.
After ::
@make_norm_from_scale(scale_cls)
class norm_cls(Normalize):
...
*norm_cls* is filled with methods so that normalization computations are
forwarded to *scale_cls* (i.e., *scale_cls* is the scale that would be used
for the colorbar of a mappable normalized with *norm_cls*).
If *init* is not passed, then the constructor signature of *norm_cls*
will be ``norm_cls(vmin=None, vmax=None, clip=False)``; these three
parameters will be forwarded to the base class (``Normalize.__init__``),
and a *scale_cls* object will be initialized with no arguments (other than
a dummy axis).
If the *scale_cls* constructor takes additional parameters, then *init*
should be passed to `make_norm_from_scale`. It is a callable which is
*only* used for its signature. First, this signature will become the
signature of *norm_cls*. Second, the *norm_cls* constructor will bind the
parameters passed to it using this signature, extract the bound *vmin*,
*vmax*, and *clip* values, pass those to ``Normalize.__init__``, and
forward the remaining bound values (including any defaults defined by the
signature) to the *scale_cls* constructor.
"""
if base_norm_cls is None:
return functools.partial(make_norm_from_scale, scale_cls, init=init)
if isinstance(scale_cls, functools.partial):
scale_args = scale_cls.args
scale_kwargs_items = tuple(scale_cls.keywords.items())
scale_cls = scale_cls.func
else:
scale_args = scale_kwargs_items = ()
if init is None:
def init(vmin=None, vmax=None, clip=False): pass
return _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, inspect.signature(init))
@functools.cache
def _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature,
):
"""
Helper for `make_norm_from_scale`.
This function is split out to enable caching (in particular so that
different unpickles reuse the same class). In order to do so,
- ``functools.partial`` *scale_cls* is expanded into ``func, args, kwargs``
to allow memoizing returned norms (partial instances always compare
unequal, but we can check identity based on ``func, args, kwargs``;
- *init* is replaced by *init_signature*, as signatures are picklable,
unlike to arbitrary lambdas.
"""
class ScaleNorm(base_norm_cls):
def __reduce__(self):
cls = type(self)
# If the class is toplevel-accessible, it is possible to directly
# pickle it "by name". This is required to support norm classes
# defined at a module's toplevel, as the inner base_norm_cls is
# otherwise unpicklable (as it gets shadowed by the generated norm
# class). If either import or attribute access fails, fall back to
# the general path.
try:
if cls is getattr(importlib.import_module(cls.__module__),
cls.__qualname__):
return (_create_empty_object_of_class, (cls,), vars(self))
except (ImportError, AttributeError):
pass
return (_picklable_norm_constructor,
(scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature),
vars(self))
def __init__(self, *args, **kwargs):
ba = bound_init_signature.bind(*args, **kwargs)
ba.apply_defaults()
super().__init__(
**{k: ba.arguments.pop(k) for k in ["vmin", "vmax", "clip"]})
self._scale = functools.partial(
scale_cls, *scale_args, **dict(scale_kwargs_items))(
axis=None, **ba.arguments)
self._trf = self._scale.get_transform()
__init__.__signature__ = bound_init_signature.replace(parameters=[
inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD),
*bound_init_signature.parameters.values()])
def __call__(self, value, clip=None):
value, is_scalar = self.process_value(value)
if self.vmin is None or self.vmax is None:
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
t_value = self._trf.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
value, is_scalar = self.process_value(value)
rescaled = value * (t_vmax - t_vmin)
rescaled += t_vmin
value = (self._trf
.inverted()
.transform(rescaled)
.reshape(np.shape(value)))
return value[0] if is_scalar else value
def autoscale_None(self, A):
# i.e. A[np.isfinite(...)], but also for non-array A's
in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A)
if in_trf_domain.size == 0:
in_trf_domain = np.ma.masked
return super().autoscale_None(in_trf_domain)
if base_norm_cls is Normalize:
ScaleNorm.__name__ = f"{scale_cls.__name__}Norm"
ScaleNorm.__qualname__ = f"{scale_cls.__qualname__}Norm"
else:
ScaleNorm.__name__ = base_norm_cls.__name__
ScaleNorm.__qualname__ = base_norm_cls.__qualname__
ScaleNorm.__module__ = base_norm_cls.__module__
ScaleNorm.__doc__ = base_norm_cls.__doc__
return ScaleNorm
def _create_empty_object_of_class(cls):
return cls.__new__(cls)
def _picklable_norm_constructor(*args):
return _create_empty_object_of_class(_make_norm_from_scale(*args))
@make_norm_from_scale(
scale.FuncScale,
init=lambda functions, vmin=None, vmax=None, clip=False: None)
|
CenteredNorm
|
python
|
ray-project__ray
|
python/ray/tune/tests/test_trial_scheduler.py
|
{
"start": 8370,
"end": 10403
}
|
class ____:
def __init__(self, scheduler):
self._scheduler_alg = scheduler
self.search_alg = None
self.trials = []
def process_action(self, trial, action):
if action == TrialScheduler.CONTINUE:
pass
elif action == TrialScheduler.PAUSE:
self.pause_trial(trial)
elif action == TrialScheduler.STOP:
self.stop_trial(trial)
def pause_trial(self, trial, should_checkpoint: bool = True):
if should_checkpoint:
self._schedule_trial_save(trial, None)
trial.status = Trial.PAUSED
def stop_trial(self, trial, error=False, error_msg=None):
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
else:
self._scheduler_alg.on_trial_complete(self, trial, result(100, 10))
trial.status = Trial.ERROR if error else Trial.TERMINATED
def add_trial(self, trial):
self.trials.append(trial)
self._scheduler_alg.on_trial_add(self, trial)
def get_trials(self):
return self.trials
def get_live_trials(self):
return {t for t in self.trials if t.status != Trial.TERMINATED}
def _launch_trial(self, trial):
trial.status = Trial.RUNNING
def _set_trial_status(self, trial, status):
trial.status = status
def start_trial(self, trial, checkpoint_obj=None, train=True):
trial.logger_running = True
if checkpoint_obj:
trial.restored_checkpoint = checkpoint_obj.dir_or_data
trial.status = Trial.RUNNING
return True
def _schedule_trial_restore(self, trial):
pass
def _schedule_trial_save(self, trial, result=None):
return _FakeFutureResult(
_TrainingResult(
checkpoint=Checkpoint.from_directory(trial.trainable_name),
metrics=result,
)
)
|
_MockTrialRunner
|
python
|
django__django
|
tests/template_tests/filter_tests/test_truncatechars_html.py
|
{
"start": 103,
"end": 1495
}
|
class ____(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(
truncatechars_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 0
),
"",
)
def test_truncate(self):
self.assertEqual(
truncatechars_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 4
),
"<p>one…</p>",
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 9
),
'<p>one <a href="#">two …</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 100
),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(
truncatechars_html("<b>\xc5ngstr\xf6m</b> was here", 3), "<b>\xc5n…</b>"
)
def test_truncate_something(self):
self.assertEqual(truncatechars_html("a<b>b</b>c", 3), "a<b>b</b>c")
def test_invalid_arg(self):
html = '<p>one <a href="#">two - three <br>four</a> five</p>'
self.assertEqual(truncatechars_html(html, "a"), html)
|
FunctionTests
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py
|
{
"start": 507,
"end": 582
}
|
class ____:
for _ in ...:
def __eq__(self, other): ...
|
MaybeEqFor
|
python
|
huggingface__transformers
|
src/transformers/models/xlm/modeling_xlm.py
|
{
"start": 8918,
"end": 11712
}
|
class ____(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config ([`XLMConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model.
"""
def __init__(self, config: XLMConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
The hidden states of the first tokens for the labeled span.
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
The position of the first token for the labeled span.
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Position of the CLS token for each sentence in the batch. If `None`, takes the last token.
<Tip>
One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
`start_states`.
</Tip>
Returns:
`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, (
"One of start_states, start_positions should be not None"
)
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
|
XLMPoolerAnswerClass
|
python
|
mamba-org__mamba
|
docs/source/tools/mermaid_inheritance.py
|
{
"start": 1657,
"end": 4464
}
|
class ____(InheritanceGraph):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
mermaid graph from them.
"""
# These are the default attrs
default_graph_attrs = {}
# 'rankdir': 'LR',
# 'size': '"8.0, 12.0"',
# 'bgcolor': 'transparent',
# }
default_node_attrs = {}
# 'shape': 'box',
# 'fontsize': 10,
# 'height': 0.25,
# 'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
# 'Arial, Helvetica, sans"',
# 'style': '"setlinewidth(0.5),filled"',
# 'fillcolor': 'white',
# }
default_edge_attrs = {}
# 'arrowsize': 0.5,
# 'style': '"setlinewidth(0.5)"',
# }
def _format_node_attrs(self, attrs: dict) -> str:
# return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
return ""
def _format_graph_attrs(self, attrs: dict) -> str:
# return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
return ""
def generate_dot(
self,
name: str,
urls: dict = {}, # noqa
env: BuildEnvironment = None,
graph_attrs: dict = {}, # noqa
node_attrs: dict = {}, # noqa
edge_attrs: dict = {}, # noqa
) -> str:
"""Generate a mermaid graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
# g_attrs = self.default_graph_attrs.copy()
# n_attrs = self.default_node_attrs.copy()
# e_attrs = self.default_edge_attrs.copy()
# g_attrs.update(graph_attrs)
# n_attrs.update(node_attrs)
# e_attrs.update(edge_attrs)
# if env:
# g_attrs.update(env.config.inheritance_graph_attrs)
# n_attrs.update(env.config.inheritance_node_attrs)
# e_attrs.update(env.config.inheritance_edge_attrs)
res = [] # type: List[str]
res.append("classDiagram\n")
for name, fullname, bases, tooltip in sorted(self.class_info):
# Write the node
res.append(f" class {name!s}\n")
if fullname in urls:
res.append(
' link {!s} "./{!s}" {!s}\n'.format(
name, urls[fullname], tooltip or f'"{name}"'
)
)
# Write the edges
for base_name in bases:
res.append(f" {base_name!s} <|-- {name!s}\n")
return "".join(res)
|
MermaidGraph
|
python
|
PyCQA__pylint
|
tests/functional/u/used/used_before_assignment_typing.py
|
{
"start": 3781,
"end": 4237
}
|
class ____: # pylint: disable=too-few-public-methods
"""Class to test conditional imports guarded by TYPE_CHECKING two levels
up then used in function annotation. See https://github.com/pylint-dev/pylint/issues/7539"""
def is_close(self, comparator: math.isclose, first, second): # <3.14:[used-before-assignment]
"""Conditional imports guarded are only valid for variable annotations."""
comparator(first, second)
|
MyFourthClass
|
python
|
openai__openai-python
|
src/openai/types/eval_create_params.py
|
{
"start": 6242,
"end": 6371
}
|
class ____(PythonGraderParam, total=False):
pass_threshold: float
"""The threshold for the score."""
|
TestingCriterionPython
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/descriptor_props.py
|
{
"start": 32181,
"end": 32648
}
|
class ____(CompositeProperty[_T], _DeclarativeMapped[_T]):
"""Declarative-compatible front-end for the :class:`.CompositeProperty`
class.
Public constructor is the :func:`_orm.composite` function.
.. versionchanged:: 2.0 Added :class:`_orm.Composite` as a Declarative
compatible subclass of :class:`_orm.CompositeProperty`.
.. seealso::
:ref:`mapper_composite`
"""
inherit_cache = True
""":meta private:"""
|
Composite
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/dicts.py
|
{
"start": 40341,
"end": 43573
}
|
class ____(ConstDictVariable):
def __init__(
self,
items: dict[VariableTracker, VariableTracker],
user_cls: type,
default_factory: Optional[VariableTracker] = None,
**kwargs: Any,
) -> None:
super().__init__(items, user_cls, **kwargs)
assert user_cls is collections.defaultdict
if default_factory is None:
default_factory = ConstantVariable.create(None)
self.default_factory = default_factory
def is_python_constant(self) -> bool:
# Return false for unsupported defaults. This ensures that a bad handler
# path is not taken in BuiltinVariable for getitem.
if self.default_factory not in [list, tuple, dict] and not self.items:
return False
return super().is_python_constant()
def debug_repr(self) -> str:
assert self.default_factory is not None
return (
f"defaultdict({self.default_factory.debug_repr()}, {super().debug_repr()})"
)
@staticmethod
def is_supported_arg(arg: VariableTracker) -> bool:
if isinstance(arg, variables.BuiltinVariable):
return arg.fn in (list, tuple, dict, set)
else:
return isinstance(
arg,
(
variables.functions.BaseUserFunctionVariable,
variables.functions.PolyfilledFunctionVariable,
),
)
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if name == "__getitem__":
if len(args) != 1:
raise_args_mismatch(tx, name, "1 args", f"{len(args)} args")
if args[0] in self:
return self.getitem_const(tx, args[0])
else:
if (
istype(self.default_factory, ConstantVariable)
and self.default_factory.value is None
):
raise_observed_exception(KeyError, tx, args=[args[0]])
else:
default_var = self.default_factory.call_function(tx, [], {})
super().call_method(
tx, "__setitem__", [args[0], default_var], kwargs
)
return default_var
else:
return super().call_method(tx, name, args, kwargs)
def reconstruct(self, codegen: "PyCodegen") -> None:
# emit `defaultdict(default_factory, new_dict)`
codegen.add_push_null(
lambda: codegen.extend_output(
[
codegen.create_load_python_module(collections),
codegen.create_load_attr("defaultdict"),
]
)
)
codegen(self.default_factory)
self.reconstruct_kvs_into_new_dict(codegen)
codegen.extend_output(create_call_function(2, False))
# TODO: Implementing this via inheritance rather than composition is a
# footgun, because self method calls in dict will route back to the set
# implementation, which is almost assuredly wrong
|
DefaultDictVariable
|
python
|
ray-project__ray
|
python/ray/serve/_private/router.py
|
{
"start": 2028,
"end": 20301
}
|
class ____:
"""Manages metrics for the router."""
PUSH_METRICS_TO_CONTROLLER_TASK_NAME = "push_metrics_to_controller"
RECORD_METRICS_TASK_NAME = "record_metrics"
def __init__(
self,
deployment_id: DeploymentID,
handle_id: str,
self_actor_id: str,
handle_source: DeploymentHandleSource,
controller_handle: ActorHandle,
router_requests_counter: metrics.Counter,
queued_requests_gauge: metrics.Gauge,
running_requests_gauge: metrics.Gauge,
event_loop: asyncio.BaseEventLoop,
):
self._handle_id = handle_id
self._deployment_id = deployment_id
self._self_actor_id = self_actor_id
self._handle_source = handle_source
self._controller_handle = controller_handle
# Exported metrics
self.num_router_requests = router_requests_counter
self.num_router_requests.set_default_tags(
{
"deployment": deployment_id.name,
"application": deployment_id.app_name,
"handle": self._handle_id,
"actor_id": self._self_actor_id,
}
)
self.num_queued_requests = 0
self.num_queued_requests_gauge = queued_requests_gauge
self.num_queued_requests_gauge.set_default_tags(
{
"deployment": deployment_id.name,
"application": deployment_id.app_name,
"handle": self._handle_id,
"actor_id": self._self_actor_id,
}
)
self.num_queued_requests_gauge.set(0)
# Track queries sent to replicas for the autoscaling algorithm.
self.num_requests_sent_to_replicas: DefaultDict[ReplicaID, int] = defaultdict(
int
)
self.num_running_requests_gauge = running_requests_gauge
self.num_running_requests_gauge.set_default_tags(
{
"deployment": deployment_id.name,
"application": deployment_id.app_name,
"handle": self._handle_id,
"actor_id": self._self_actor_id,
}
)
# We use Ray object ref callbacks to update state when tracking
# number of requests running on replicas. The callbacks will be
# called from a C++ thread into the router's async event loop,
# so non-atomic read and write operations need to be guarded by
# this thread-safe lock.
self._queries_lock = threading.Lock()
# Regularly aggregate and push autoscaling metrics to controller
self.metrics_pusher = MetricsPusher()
self.metrics_store = InMemoryMetricsStore()
# The config for the deployment this router sends requests to will be broadcast
# by the controller. That means it is not available until we get the first
# update. This includes an optional autoscaling config.
self._deployment_config: Optional[DeploymentConfig] = None
# Track whether the metrics manager has been shutdown
self._shutdown: bool = False
# If the interval is set to 0, eagerly sets all metrics.
self._cached_metrics_enabled = RAY_SERVE_METRICS_EXPORT_INTERVAL_MS != 0
self._cached_metrics_interval_s = RAY_SERVE_METRICS_EXPORT_INTERVAL_MS / 1000
if self._cached_metrics_enabled:
self._cached_num_router_requests = defaultdict(int)
def create_metrics_task():
event_loop.create_task(self._report_cached_metrics_forever())
# the constructor is called in the user thread, but its trying to create a task on the event loop
# which is running in the router thread. This is not thread safe, so we need to use call_soon_threadsafe
# to create the task on the event loop thread safely.
event_loop.call_soon_threadsafe(create_metrics_task)
@contextmanager
def wrap_request_assignment(self, request_meta: RequestMetadata):
max_queued_requests = (
self._deployment_config.max_queued_requests
if self._deployment_config is not None
else -1
)
if (
max_queued_requests != -1
and self.num_queued_requests >= max_queued_requests
):
# Due to the async nature of request handling, we may reject more requests
# than strictly necessary. This is more likely to happen during
# high concurrency. Here's why:
#
# When multiple requests arrive simultaneously with max_queued_requests=1:
# 1. First request increments num_queued_requests to 1
# 2. Before that request gets assigned to a replica and decrements the counter,
# we yield to the event loop
# 3. Other requests see num_queued_requests=1 and get rejected, even though
# the first request will soon free up the queue slot
#
# For example, with max_queued_requests=1 and 4 simultaneous requests:
# - Request 1 gets queued (num_queued_requests=1)
# - Requests 2,3,4 get rejected since queue appears full
# - Request 1 gets assigned and frees queue slot (num_queued_requests=0)
# - But we already rejected Request 2 which could have been queued
e = BackPressureError(
num_queued_requests=self.num_queued_requests,
max_queued_requests=max_queued_requests,
)
logger.warning(e.message)
raise e
self.inc_num_total_requests(request_meta.route)
yield
@contextmanager
def wrap_queued_request(self, is_retry: bool, num_curr_replicas: int):
"""Increment queued requests gauge and maybe push autoscaling metrics to controller."""
try:
self.inc_num_queued_requests()
# Optimization: if there are currently zero replicas for a deployment,
# push handle metric to controller to allow for fast cold start time.
# Only do this on the first attempt to route the request.
if not is_retry and self.should_send_scaled_to_zero_optimized_push(
curr_num_replicas=num_curr_replicas
):
self.push_autoscaling_metrics_to_controller()
yield
finally:
# If the request is disconnected before assignment, this coroutine
# gets cancelled by the caller and an asyncio.CancelledError is
# raised. The finally block ensures that num_queued_requests
# is correctly decremented in this case.
self.dec_num_queued_requests()
def _update_running_replicas(self, running_replicas: List[RunningReplicaInfo]):
"""Prune list of replica ids in self.num_queries_sent_to_replicas.
We want to avoid self.num_queries_sent_to_replicas from growing
in memory as the deployment upscales and downscales over time.
"""
running_replica_set = {replica.replica_id for replica in running_replicas}
with self._queries_lock:
self.num_requests_sent_to_replicas = defaultdict(
int,
{
id: self.num_requests_sent_to_replicas[id]
for id, num_queries in self.num_requests_sent_to_replicas.items()
if num_queries or id in running_replica_set
},
)
@property
def autoscaling_config(self) -> Optional[AutoscalingConfig]:
if self._deployment_config is None:
return None
return self._deployment_config.autoscaling_config
def update_deployment_config(
self, deployment_config: DeploymentConfig, curr_num_replicas: int
):
"""Update the config for the deployment this router sends requests to."""
if self._shutdown:
return
self._deployment_config = deployment_config
# Start the metrics pusher if autoscaling is enabled.
autoscaling_config = self.autoscaling_config
if autoscaling_config:
self.metrics_pusher.start()
# Optimization for autoscaling cold start time. If there are
# currently 0 replicas for the deployment, and there is at
# least one queued request on this router, then immediately
# push handle metric to the controller.
if self.should_send_scaled_to_zero_optimized_push(curr_num_replicas):
self.push_autoscaling_metrics_to_controller()
# Record number of queued + ongoing requests at regular
# intervals into the in-memory metrics store
self.metrics_pusher.register_or_update_task(
self.RECORD_METRICS_TASK_NAME,
self._add_autoscaling_metrics_point,
min(
RAY_SERVE_HANDLE_AUTOSCALING_METRIC_RECORD_INTERVAL_S,
autoscaling_config.metrics_interval_s,
),
)
# Push metrics to the controller periodically.
self.metrics_pusher.register_or_update_task(
self.PUSH_METRICS_TO_CONTROLLER_TASK_NAME,
self.push_autoscaling_metrics_to_controller,
autoscaling_config.metrics_interval_s,
)
else:
if self.metrics_pusher:
self.metrics_pusher.stop_tasks()
def _report_cached_metrics(self):
for route, count in self._cached_num_router_requests.items():
self.num_router_requests.inc(count, tags={"route": route})
self._cached_num_router_requests.clear()
self.num_queued_requests_gauge.set(self.num_queued_requests)
self.num_running_requests_gauge.set(
sum(self.num_requests_sent_to_replicas.values())
)
async def _report_cached_metrics_forever(self):
assert self._cached_metrics_interval_s > 0
consecutive_errors = 0
while True:
try:
await asyncio.sleep(self._cached_metrics_interval_s)
self._report_cached_metrics()
consecutive_errors = 0
except Exception:
logger.exception("Unexpected error reporting metrics.")
# Exponential backoff starting at 1s and capping at 10s.
backoff_time_s = min(10, 2**consecutive_errors)
consecutive_errors += 1
await asyncio.sleep(backoff_time_s)
def inc_num_total_requests(self, route: str):
if self._cached_metrics_enabled:
self._cached_num_router_requests[route] += 1
else:
self.num_router_requests.inc(tags={"route": route})
def inc_num_queued_requests(self):
self.num_queued_requests += 1
if not self._cached_metrics_enabled:
self.num_queued_requests_gauge.set(self.num_queued_requests)
def dec_num_queued_requests(self):
self.num_queued_requests -= 1
if not self._cached_metrics_enabled:
self.num_queued_requests_gauge.set(self.num_queued_requests)
def inc_num_running_requests_for_replica(self, replica_id: ReplicaID):
with self._queries_lock:
self.num_requests_sent_to_replicas[replica_id] += 1
if not self._cached_metrics_enabled:
self.num_running_requests_gauge.set(
sum(self.num_requests_sent_to_replicas.values())
)
def dec_num_running_requests_for_replica(self, replica_id: ReplicaID):
with self._queries_lock:
self.num_requests_sent_to_replicas[replica_id] -= 1
if not self._cached_metrics_enabled:
self.num_running_requests_gauge.set(
sum(self.num_requests_sent_to_replicas.values())
)
def should_send_scaled_to_zero_optimized_push(self, curr_num_replicas: int) -> bool:
return (
self.autoscaling_config is not None
and curr_num_replicas == 0
and self.num_queued_requests > 0
)
def push_autoscaling_metrics_to_controller(self):
"""Pushes queued and running request metrics to the controller.
These metrics are used by the controller for autoscaling.
"""
self._controller_handle.record_autoscaling_metrics_from_handle.remote(
self._get_metrics_report()
)
def _add_autoscaling_metrics_point(self):
"""Adds metrics point for queued and running requests at replicas.
Also prunes keys in the in memory metrics store with outdated datapoints.
┌─────────────────────────────────────────────────────────────────┐
│ Handle-based metrics collection │
├─────────────────────────────────────────────────────────────────┤
│ │
│ Client Handle Replicas │
│ ┌──────┐ ┌────────┐ ┌─────────┐ │
│ │ App │───────────>│ Handle │─────────>│ Replica │ │
│ │ │ Requests │ │ Forwards │ 1 │ │
│ └──────┘ │ Tracks │ └─────────┘ │
│ │ Queued │ │
│ │ + │ ┌─────────┐ │
│ │Running │─────────>│ Replica │ │
│ │Requests│ Forwards │ 2 │ │
│ └────────┘ └─────────┘ │
│ │ │
│ │ Push metrics │
│ └─────────────────> Controller │
│ │
└─────────────────────────────────────────────────────────────────┘
:::{note}
The long-term plan is to deprecate handle-based metrics collection in favor of
replica-based collection. Replica-based collection will become the default in a
future release. Queued requests will be continues to be tracked at the handle.
:::
"""
timestamp = time.time()
self.metrics_store.add_metrics_point(
{QUEUED_REQUESTS_KEY: self.num_queued_requests}, timestamp
)
if RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE:
self.metrics_store.add_metrics_point(
self.num_requests_sent_to_replicas, timestamp
)
# Prevent in memory metrics store memory from growing
start_timestamp = time.time() - self.autoscaling_config.look_back_period_s
self.metrics_store.prune_keys_and_compact_data(start_timestamp)
def _get_metrics_report(self) -> HandleMetricReport:
timestamp = time.time()
running_requests = dict()
avg_running_requests = dict()
look_back_period = self.autoscaling_config.look_back_period_s
self.metrics_store.prune_keys_and_compact_data(time.time() - look_back_period)
avg_queued_requests = self.metrics_store.aggregate_avg([QUEUED_REQUESTS_KEY])[0]
if avg_queued_requests is None:
# If the queued requests timeseries is empty, we set the
# average to the current number of queued requests.
avg_queued_requests = self.num_queued_requests
# If the queued requests timeseries is empty, we set the number of data points to 1.
# This is to avoid division by zero.
num_data_points = self.metrics_store.timeseries_count(QUEUED_REQUESTS_KEY) or 1
queued_requests = self.metrics_store.data.get(
QUEUED_REQUESTS_KEY, [TimeStampedValue(timestamp, self.num_queued_requests)]
)
if RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE and self.autoscaling_config:
for replica_id, num_requests in self.num_requests_sent_to_replicas.items():
# Calculate avg running requests.
# NOTE (abrar): The number of data points from queued requests is often higher than
# those from running requests. This is because replica metrics are only collected
# once a replica is up, whereas queued request metrics are collected continuously
# as long as the handle is alive. To approximate the true average of ongoing requests,
# we should normalize by using the same number of data points for both queued and
# running request time series.
running_requests_sum = self.metrics_store.aggregate_sum([replica_id])[0]
if running_requests_sum is None:
# If the running requests timeseries is empty, we set the sum
# to the current number of requests.
running_requests_sum = num_requests
avg_running_requests[replica_id] = (
running_requests_sum / num_data_points
)
# Get running requests data
running_requests[replica_id] = self.metrics_store.data.get(
replica_id, [TimeStampedValue(timestamp, num_requests)]
)
handle_metric_report = HandleMetricReport(
deployment_id=self._deployment_id,
handle_id=self._handle_id,
actor_id=self._self_actor_id,
handle_source=self._handle_source,
aggregated_queued_requests=avg_queued_requests,
queued_requests=queued_requests,
aggregated_metrics={
RUNNING_REQUESTS_KEY: avg_running_requests,
},
metrics={
RUNNING_REQUESTS_KEY: running_requests,
},
timestamp=timestamp,
)
return handle_metric_report
async def shutdown(self):
"""Shutdown metrics manager gracefully."""
if self.metrics_pusher:
await self.metrics_pusher.graceful_shutdown()
self._shutdown = True
|
RouterMetricsManager
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_blas.py
|
{
"start": 26635,
"end": 27054
}
|
class ____:
@parametrize_blas(fblas, "gemm", "sdcz")
def test_gemm(self, f, dtype):
assert_array_almost_equal(f(3, [3], [-4]), [[-36]])
assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21])
if dtype in COMPLEX_DTYPES:
assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]])
assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j])
|
TestFBLAS3Simple
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py
|
{
"start": 1025,
"end": 23378
}
|
class ____(ICloudInstanceProvider):
"""
This class is a thin wrapper around the Kubernetes API client. It modifies
the RayCluster resource spec on the Kubernetes API server to scale the cluster:
It launches new instances/nodes by submitting patches to the Kubernetes API
to update the RayCluster CRD.
"""
def __init__(
self,
cluster_name: str,
provider_config: Dict[str, Any],
k8s_api_client: Optional[IKubernetesHttpApiClient] = None,
):
"""
Args:
cluster_name: The name of the RayCluster resource.
provider_config: The namespace of the RayCluster.
k8s_api_client: The client to the Kubernetes API server.
This could be used to mock the Kubernetes API server for testing.
"""
self._cluster_name = cluster_name
self._namespace = provider_config["namespace"]
self._k8s_api_client = k8s_api_client or KubernetesHttpApiClient(
namespace=self._namespace
)
# Below are states that are cached locally.
self._requests = set()
self._launch_errors_queue = []
self._terminate_errors_queue = []
# Below are states that are fetched from the Kubernetes API server.
self._ray_cluster = None
self._cached_instances: Dict[CloudInstanceId, CloudInstance]
@dataclass
class ScaleRequest:
"""Represents a scale request that contains the current states and go-to states
for the ray cluster.
This class will be converted to patches to be submitted to the Kubernetes API
server:
- For launching new instances, it will adjust the `replicas` field in the
workerGroupSpecs.
- For terminating instances, it will adjust the `workersToDelete` field in the
workerGroupSpecs.
"""
# The desired number of workers for each node type.
desired_num_workers: Dict[NodeType, int] = field(default_factory=dict)
# The workers to delete for each node type.
workers_to_delete: Dict[NodeType, List[CloudInstanceId]] = field(
default_factory=dict
)
# The worker groups with empty workersToDelete field.
# This is needed since we will also need to clear the workersToDelete field
# for the worker groups that have finished deletes.
worker_groups_without_pending_deletes: Set[NodeType] = field(
default_factory=set
)
# The worker groups that still have workers to be deleted.
worker_groups_with_pending_deletes: Set[NodeType] = field(default_factory=set)
################################
# Interface for ICloudInstanceProvider
################################
def get_non_terminated(self) -> Dict[CloudInstanceId, CloudInstance]:
self._sync_with_api_server()
return copy.deepcopy(dict(self._cached_instances))
def terminate(self, ids: List[CloudInstanceId], request_id: str) -> None:
if request_id in self._requests:
# This request is already processed.
logger.warning(f"Request {request_id} is already processed for: {ids}")
return
self._requests.add(request_id)
logger.info("Terminating worker pods: {}".format(ids))
scale_request = self._initialize_scale_request(
to_launch={}, to_delete_instances=ids
)
if scale_request.worker_groups_with_pending_deletes:
errors_msg = (
"There are workers to be deleted from: "
f"{scale_request.worker_groups_with_pending_deletes}. "
"Waiting for them to be deleted before adding new workers "
" to be deleted"
)
logger.warning(errors_msg)
self._add_terminate_errors(
ids,
request_id,
details=errors_msg,
)
return
try:
self._submit_scale_request(scale_request)
except Exception as e:
logger.exception(f"Error terminating nodes: {scale_request}")
self._add_terminate_errors(ids, request_id, details=str(e), e=e)
def launch(self, shape: Dict[NodeType, int], request_id: str) -> None:
if request_id in self._requests:
# This request is already processed.
return
self._requests.add(request_id)
scale_request = self._initialize_scale_request(
to_launch=shape, to_delete_instances=[]
)
if scale_request.worker_groups_with_pending_deletes:
error_msg = (
"There are workers to be deleted from: "
f"{scale_request.worker_groups_with_pending_deletes}. "
"Waiting for them to be deleted before creating new workers."
)
logger.warning(error_msg)
self._add_launch_errors(
shape,
request_id,
details=error_msg,
)
return
try:
self._submit_scale_request(scale_request)
except Exception as e:
logger.exception(f"Error launching nodes: {scale_request}")
self._add_launch_errors(shape, request_id, details=str(e), e=e)
def poll_errors(self) -> List[CloudInstanceProviderError]:
errors = []
errors += self._launch_errors_queue
errors += self._terminate_errors_queue
self._launch_errors_queue = []
self._terminate_errors_queue = []
return errors
############################
# Private
############################
def _initialize_scale_request(
self, to_launch: Dict[NodeType, int], to_delete_instances: List[CloudInstanceId]
) -> "KubeRayProvider.ScaleRequest":
"""
Initialize the scale request based on the current state of the cluster and
the desired state (to launch, to delete).
Args:
to_launch: The desired number of workers to launch for each node type.
to_delete_instances: The instances to delete.
Returns:
The scale request.
"""
# Update the cached states.
self._sync_with_api_server()
ray_cluster = self.ray_cluster
cur_instances = self.instances
# Get the worker groups that have pending deletes and the worker groups that
# have finished deletes, and the set of workers included in the workersToDelete
# field of any worker group.
(
worker_groups_with_pending_deletes,
worker_groups_without_pending_deletes,
worker_to_delete_set,
) = self._get_workers_delete_info(ray_cluster, set(cur_instances.keys()))
observed_workers_dict = defaultdict(int)
for instance in cur_instances.values():
if instance.node_kind != NodeKind.WORKER:
continue
if instance.cloud_instance_id in worker_to_delete_set:
continue
observed_workers_dict[instance.node_type] += 1
# Calculate the desired number of workers by type.
num_workers_dict = defaultdict(int)
worker_groups = ray_cluster["spec"].get("workerGroupSpecs", [])
for worker_group in worker_groups:
node_type = worker_group["groupName"]
# Handle the case where users manually increase `minReplicas`
# to scale up the number of worker Pods. In this scenario,
# `replicas` will be smaller than `minReplicas`.
# num_workers_dict should account for multi-host replicas when
# `numOfHosts`` is set.
num_of_hosts = worker_group.get("numOfHosts", 1)
replicas = (
max(worker_group["replicas"], worker_group["minReplicas"])
* num_of_hosts
)
# The `replicas` field in worker group specs can be updated by users at any time.
# However, users should only increase the field (manually upscaling the worker group), not decrease it,
# because downscaling the worker group requires specifying which workers to delete explicitly in the `workersToDelete` field.
# Since we don't have a way to enforce this, we need to fix unexpected decreases on the `replicas` field by using actual observations.
# For example, if the user manually decreases the `replicas` field to 0 without specifying which workers to delete,
# we should fix the `replicas` field back to the number of observed workers excluding the workers to be deleted,
# otherwise, we won't have a correct `replicas` matches the actual number of workers eventually.
num_workers_dict[node_type] = max(
replicas, observed_workers_dict[node_type]
)
# Add to launch nodes.
for node_type, count in to_launch.items():
num_workers_dict[node_type] += count
to_delete_instances_by_type = defaultdict(list)
# Update the number of workers with to_delete_instances
# and group them by type.
for to_delete_id in to_delete_instances:
to_delete_instance = cur_instances.get(to_delete_id, None)
if to_delete_instance is None:
# This instance has already been deleted.
continue
if to_delete_instance.node_kind == NodeKind.HEAD:
# Not possible to delete head node.
continue
if to_delete_instance.cloud_instance_id in worker_to_delete_set:
# If the instance is already in the workersToDelete field of
# any worker group, skip it.
continue
num_workers_dict[to_delete_instance.node_type] -= 1
assert num_workers_dict[to_delete_instance.node_type] >= 0
to_delete_instances_by_type[to_delete_instance.node_type].append(
to_delete_instance
)
scale_request = KubeRayProvider.ScaleRequest(
desired_num_workers=num_workers_dict,
workers_to_delete=to_delete_instances_by_type,
worker_groups_without_pending_deletes=worker_groups_without_pending_deletes,
worker_groups_with_pending_deletes=worker_groups_with_pending_deletes,
)
return scale_request
def _submit_scale_request(
self, scale_request: "KubeRayProvider.ScaleRequest"
) -> None:
"""Submits a scale request to the Kubernetes API server.
This method will convert the scale request to patches and submit the patches
to the Kubernetes API server.
Args:
scale_request: The scale request.
Raises:
Exception: An exception is raised if the Kubernetes API server returns an
error.
"""
# Get the current ray cluster spec.
patch_payload = []
raycluster = self.ray_cluster
# Collect patches for replica counts.
for node_type, num_workers in scale_request.desired_num_workers.items():
group_index = _worker_group_index(raycluster, node_type)
group_max_replicas = _worker_group_max_replicas(raycluster, group_index)
group_num_of_hosts = _worker_group_num_of_hosts(raycluster, group_index)
# the num_workers from the scale request is multiplied by numOfHosts, so we need to divide it back.
target_replicas = num_workers // group_num_of_hosts
# Cap the replica count to maxReplicas.
if group_max_replicas is not None and group_max_replicas < target_replicas:
logger.warning(
"Autoscaler attempted to create "
+ "more than maxReplicas pods of type {}.".format(node_type)
)
target_replicas = group_max_replicas
# Check if we need to change the target count.
if target_replicas == _worker_group_replicas(raycluster, group_index):
# No patch required.
continue
# Need to patch replica count. Format the patch and add it to the payload.
patch = worker_replica_patch(group_index, target_replicas)
patch_payload.append(patch)
# Maps node_type to nodes to delete for that group.
for (
node_type,
workers_to_delete_of_type,
) in scale_request.workers_to_delete.items():
group_index = _worker_group_index(raycluster, node_type)
worker_ids_to_delete = [
worker.cloud_instance_id for worker in workers_to_delete_of_type
]
patch = worker_delete_patch(group_index, worker_ids_to_delete)
patch_payload.append(patch)
# Clear the workersToDelete field for the worker groups that have been deleted.
for node_type in scale_request.worker_groups_without_pending_deletes:
if node_type in scale_request.workers_to_delete:
# This node type is still being deleted.
continue
group_index = _worker_group_index(raycluster, node_type)
patch = worker_delete_patch(group_index, [])
patch_payload.append(patch)
if len(patch_payload) == 0:
# No patch required.
return
logger.info(f"Submitting a scale request: {scale_request}")
self._patch(f"rayclusters/{self._cluster_name}", patch_payload)
def _add_launch_errors(
self,
shape: Dict[NodeType, int],
request_id: str,
details: str,
e: Optional[Exception] = None,
) -> None:
"""
Adds launch errors to the error queue.
Args:
shape: The shape of the nodes that failed to launch.
request_id: The request id of the launch request.
details: The details of the error.
e: The exception that caused the error.
"""
for node_type, count in shape.items():
self._launch_errors_queue.append(
LaunchNodeError(
node_type=node_type,
timestamp_ns=time.time_ns(),
count=count,
request_id=request_id,
details=details,
cause=e,
)
)
def _add_terminate_errors(
self,
ids: List[CloudInstanceId],
request_id: str,
details: str,
e: Optional[Exception] = None,
) -> None:
"""
Adds terminate errors to the error queue.
Args:
ids: The ids of the nodes that failed to terminate.
request_id: The request id of the terminate request.
details: The details of the error.
e: The exception that caused the error.
"""
for id in ids:
self._terminate_errors_queue.append(
TerminateNodeError(
cloud_instance_id=id,
timestamp_ns=time.time_ns(),
request_id=request_id,
details=details,
cause=e,
)
)
def _sync_with_api_server(self) -> None:
"""Fetches the RayCluster resource from the Kubernetes API server."""
self._ray_cluster = self._get(f"rayclusters/{self._cluster_name}")
self._cached_instances = self._fetch_instances()
@property
def ray_cluster(self) -> Dict[str, Any]:
return copy.deepcopy(self._ray_cluster)
@property
def instances(self) -> Dict[CloudInstanceId, CloudInstance]:
return copy.deepcopy(self._cached_instances)
@staticmethod
def _get_workers_delete_info(
ray_cluster_spec: Dict[str, Any], node_set: Set[CloudInstanceId]
) -> Tuple[Set[NodeType], Set[NodeType], Set[CloudInstanceId]]:
"""
Gets the worker groups that have pending deletes and the worker groups that
have finished deletes.
Returns:
worker_groups_with_pending_deletes: The worker groups that have pending
deletes.
worker_groups_with_finished_deletes: The worker groups that have finished
deletes.
worker_to_delete_set: A set of Pods that are included in the workersToDelete
field of any worker group.
"""
worker_groups_with_pending_deletes = set()
worker_groups_with_deletes = set()
worker_to_delete_set = set()
worker_groups = ray_cluster_spec["spec"].get("workerGroupSpecs", [])
for worker_group in worker_groups:
workersToDelete = worker_group.get("scaleStrategy", {}).get(
"workersToDelete", []
)
if not workersToDelete:
# No workers to delete in this group.
continue
node_type = worker_group["groupName"]
worker_groups_with_deletes.add(node_type)
for worker in workersToDelete:
worker_to_delete_set.add(worker)
if worker in node_set:
worker_groups_with_pending_deletes.add(node_type)
worker_groups_with_finished_deletes = (
worker_groups_with_deletes - worker_groups_with_pending_deletes
)
return (
worker_groups_with_pending_deletes,
worker_groups_with_finished_deletes,
worker_to_delete_set,
)
def _fetch_instances(self) -> Dict[CloudInstanceId, CloudInstance]:
"""
Fetches the pods from the Kubernetes API server and convert them to Ray
CloudInstance.
Returns:
A dict of CloudInstanceId to CloudInstance.
"""
# Get the pods resource version.
# Specifying a resource version in list requests is important for scalability:
# https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list
resource_version = self._get_head_pod_resource_version()
if resource_version:
logger.info(
f"Listing pods for RayCluster {self._cluster_name}"
f" in namespace {self._namespace}"
f" at pods resource version >= {resource_version}."
)
# Filter pods by cluster_name.
label_selector = requests.utils.quote(f"ray.io/cluster={self._cluster_name}")
resource_path = f"pods?labelSelector={label_selector}"
if resource_version:
resource_path += (
f"&resourceVersion={resource_version}"
+ "&resourceVersionMatch=NotOlderThan"
)
pod_list = self._get(resource_path)
fetched_resource_version = pod_list["metadata"]["resourceVersion"]
logger.info(
f"Fetched pod data at resource version" f" {fetched_resource_version}."
)
# Extract node data from the pod list.
cloud_instances = {}
for pod in pod_list["items"]:
# Kubernetes sets metadata.deletionTimestamp immediately after admitting a
# request to delete an object. Full removal of the object may take some time
# after the deletion timestamp is set. See link for details:
# https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-deletion
if "deletionTimestamp" in pod["metadata"]:
# Ignore pods marked for termination.
continue
pod_name = pod["metadata"]["name"]
cloud_instance = self._cloud_instance_from_pod(pod)
if cloud_instance:
cloud_instances[pod_name] = cloud_instance
return cloud_instances
@staticmethod
def _cloud_instance_from_pod(pod: Dict[str, Any]) -> Optional[CloudInstance]:
"""
Convert a pod to a Ray CloudInstance.
Args:
pod: The pod resource dict.
"""
labels = pod["metadata"]["labels"]
if labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_HEAD:
kind = NodeKind.HEAD
type = labels[KUBERAY_LABEL_KEY_TYPE]
elif labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_WORKER:
kind = NodeKind.WORKER
type = labels[KUBERAY_LABEL_KEY_TYPE]
else:
# Other ray nodes types defined by KubeRay.
# e.g. this could also be `redis-cleanup`
# We will not track these nodes.
return None
# TODO: we should prob get from the pod's env var (RAY_CLOUD_INSTANCE_ID)
# directly.
cloud_instance_id = pod["metadata"]["name"]
return CloudInstance(
cloud_instance_id=cloud_instance_id,
node_type=type,
node_kind=kind,
is_running=KubeRayProvider._is_running(pod),
)
@staticmethod
def _is_running(pod) -> bool:
"""Convert pod state to Ray NodeStatus
A cloud instance is considered running if the pod is in the running state,
else it could be pending/containers-terminated.
When it disappears from the list, it is considered terminated.
"""
if (
"containerStatuses" not in pod["status"]
or not pod["status"]["containerStatuses"]
):
return False
state = pod["status"]["containerStatuses"][0]["state"]
if "running" in state:
return True
return False
def _get(self, remote_path: str) -> Dict[str, Any]:
"""Get a resource from the Kubernetes API server."""
return self._k8s_api_client.get(remote_path)
def _patch(self, remote_path: str, payload: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Patch a resource on the Kubernetes API server."""
return self._k8s_api_client.patch(remote_path, payload)
def _get_head_pod_resource_version(self) -> str:
"""
Extract a recent pods resource version by reading the head pod's
metadata.resourceVersion of the response.
"""
if not RAY_HEAD_POD_NAME:
return None
pod_resp = self._get(f"pods/{RAY_HEAD_POD_NAME}")
return pod_resp["metadata"]["resourceVersion"]
|
KubeRayProvider
|
python
|
pytorch__pytorch
|
torch/fx/passes/utils/source_matcher_utils.py
|
{
"start": 940,
"end": 5781
}
|
class ____:
# Nodes in a particular partition
nodes: list[Node]
# The source these nodes decomposed from
source: Any
# Nodes in the graph that are needed as inputs to the partition
# These do not include the params of the partition
input_nodes: list[Node] = field(default_factory=list)
# Nodes in the partition that are being used by nodes outside of the
# partition
output_nodes: list[Node] = field(default_factory=list)
# Parameters that are being used
params: list[Node] = field(default_factory=list)
@compatibility(is_backward_compatible=False) # type: ignore[misc]
def get_source_partitions(
graph: Graph,
wanted_sources: list[Any],
filter_fn: Optional[Callable[[Node], bool]] = None,
) -> dict[Any, list[SourcePartition]]:
"""
Args:
graph: The graph we want to partition
wanted_sources: List of sources of nodes that were decomposed from this
source. This can be a function (ex. torch.nn.functional.linear) or a
leaf module type (ex. torch.nn.Linear).
Returns:
Dictionary mapping sources that were given to a list of SourcePartitions
that correspond to the list of nodes that were decomposed from the given
source.
"""
modules: dict[type, dict[str, list[Node]]] = {}
for node in graph.nodes:
# The metadata source_fn should contain a tuple of a unique name for the
# source, and the source function if the node is decomposed from a
# function, or the type of module if the node is decomposed from a leaf
# module
# TODO: Bypass "torch_fn" when "source_fn_stack" because now "torch_fn" can
# be different from "source_fn_stack", for example for the add_ node
# decomposed from batch norm. We should remove the check on "source_fn_stack"
# after we fix "torch_fn". T199561090
if (source_fn_st := node.meta.get("source_fn_stack", None)) is None and (
torch_fn := node.meta.get("torch_fn", None)
) is not None:
node_fqn, source_fn = torch_fn
source_fn_name = source_fn.split(".")[1]
if source_fn_name in wanted_sources:
diff_modules = modules.setdefault(source_fn_name, {})
partition = diff_modules.setdefault(node_fqn, [])
partition.append(node)
if (source_fn_st := node.meta.get("source_fn_stack", None)) is not None:
source_fn = source_fn_st[-1]
if source_fn[1] in wanted_sources:
diff_modules = modules.setdefault(source_fn[1], {})
partition = diff_modules.setdefault(source_fn[0], [])
partition.append(node)
def make_partition(nodes: list[Node], module_type: type) -> SourcePartition:
input_nodes = set()
output_nodes = set()
params = set()
for node in nodes:
for arg in node.args:
if isinstance(arg, Node) and arg not in nodes and arg.op != "get_attr":
input_nodes.add(arg)
if node.op == "get_attr":
params.add(node)
# get_attr nodes won't be output nodes
continue
for user in node.users:
if user not in nodes:
output_nodes.add(node)
return SourcePartition(
nodes,
module_type,
list(input_nodes),
list(output_nodes),
list(params), # type: ignore[arg-type]
)
ret: dict[type[Any], list[SourcePartition]] = {}
if filter_fn:
# for each partition, we apply filter_fn to filter out all partitions that doesn't satisfy the
# filter condition
filtered_modules = {}
for tp, name_to_partition in modules.items():
filtered_name_to_partition = {
name: partition
for name, partition in name_to_partition.items()
if all(map(filter_fn, partition))
}
filtered_modules[tp] = filtered_name_to_partition
modules = filtered_modules
for k, v in modules.items():
ret[k] = [make_partition(partition, k) for partition in v.values()]
return ret
@compatibility(is_backward_compatible=False) # type: ignore[misc]
def check_subgraphs_connected(
subgraph1: SourcePartition, subgraph2: SourcePartition
) -> bool:
"""
Given two subgraphs A and B (in the form of a list of nodes), checks if
A has nodes connecting to at least one node in B -- aka there exists a node
in B that uses a node in A (not the other way around).
"""
for node in reversed(subgraph1.nodes):
for user in node.users:
if user in subgraph2.nodes:
return True
return False
|
SourcePartition
|
python
|
huggingface__transformers
|
src/transformers/models/vits/modeling_vits.py
|
{
"start": 54073,
"end": 61536
}
|
class ____(VitsPreTrainedModel):
def __init__(self, config: VitsConfig):
super().__init__(config)
self.config = config
self.text_encoder = VitsTextEncoder(config)
self.flow = VitsResidualCouplingBlock(config)
self.decoder = VitsHifiGan(config)
if config.use_stochastic_duration_prediction:
self.duration_predictor = VitsStochasticDurationPredictor(config)
else:
self.duration_predictor = VitsDurationPredictor(config)
if config.num_speakers > 1:
self.embed_speaker = nn.Embedding(config.num_speakers, config.speaker_embedding_size)
# This is used only for training.
self.posterior_encoder = VitsPosteriorEncoder(config)
# These parameters control the synthesised speech properties
self.speaking_rate = config.speaking_rate
self.noise_scale = config.noise_scale
self.noise_scale_duration = config.noise_scale_duration
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
speaker_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.FloatTensor] = None,
) -> Union[tuple[Any], VitsModelOutput]:
r"""
speaker_id (`int`, *optional*):
Which speaker embedding to use. Only used for multispeaker models.
labels (`torch.FloatTensor` of shape `(batch_size, config.spectrogram_bins, sequence_length)`, *optional*):
Float values of target spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss
computation.
Example:
```python
>>> from transformers import VitsTokenizer, VitsModel, set_seed
>>> import torch
>>> tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng")
>>> model = VitsModel.from_pretrained("facebook/mms-tts-eng")
>>> inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt")
>>> set_seed(555) # make deterministic
>>> with torch.no_grad():
... outputs = model(inputs["input_ids"])
>>> outputs.waveform.shape
torch.Size([1, 45824])
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
raise NotImplementedError("Training of VITS is not supported yet.")
mask_dtype = self.text_encoder.embed_tokens.weight.dtype
if attention_mask is not None:
input_padding_mask = attention_mask.unsqueeze(-1).to(mask_dtype)
else:
input_padding_mask = torch.ones_like(input_ids).unsqueeze(-1).to(mask_dtype)
if self.config.num_speakers > 1 and speaker_id is not None:
if not 0 <= speaker_id < self.config.num_speakers:
raise ValueError(f"Set `speaker_id` in the range 0-{self.config.num_speakers - 1}.")
if isinstance(speaker_id, int):
speaker_id = torch.full(size=(1,), fill_value=speaker_id, device=self.device)
speaker_embeddings = self.embed_speaker(speaker_id).unsqueeze(-1)
else:
speaker_embeddings = None
text_encoder_output = self.text_encoder(
input_ids=input_ids,
padding_mask=input_padding_mask,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = text_encoder_output[0] if not return_dict else text_encoder_output.last_hidden_state
hidden_states = hidden_states.transpose(1, 2)
input_padding_mask = input_padding_mask.transpose(1, 2)
prior_means = text_encoder_output[1] if not return_dict else text_encoder_output.prior_means
prior_log_variances = text_encoder_output[2] if not return_dict else text_encoder_output.prior_log_variances
if self.config.use_stochastic_duration_prediction:
log_duration = self.duration_predictor(
hidden_states,
input_padding_mask,
speaker_embeddings,
reverse=True,
noise_scale=self.noise_scale_duration,
)
else:
log_duration = self.duration_predictor(hidden_states, input_padding_mask, speaker_embeddings)
length_scale = 1.0 / self.speaking_rate
duration = torch.ceil(torch.exp(log_duration) * input_padding_mask * length_scale)
predicted_lengths = torch.clamp_min(torch.sum(duration, [1, 2]), 1).long()
# Create a padding mask for the output lengths of shape (batch, 1, max_output_length)
indices = torch.arange(predicted_lengths.max(), dtype=predicted_lengths.dtype, device=predicted_lengths.device)
output_padding_mask = indices.unsqueeze(0) < predicted_lengths.unsqueeze(1)
output_padding_mask = output_padding_mask.unsqueeze(1).to(input_padding_mask.dtype)
# Reconstruct an attention tensor of shape (batch, 1, out_length, in_length)
attn_mask = torch.unsqueeze(input_padding_mask, 2) * torch.unsqueeze(output_padding_mask, -1)
batch_size, _, output_length, input_length = attn_mask.shape
cum_duration = torch.cumsum(duration, -1).view(batch_size * input_length, 1)
indices = torch.arange(output_length, dtype=duration.dtype, device=duration.device)
valid_indices = indices.unsqueeze(0) < cum_duration
valid_indices = valid_indices.to(attn_mask.dtype).view(batch_size, input_length, output_length)
padded_indices = valid_indices - nn.functional.pad(valid_indices, [0, 0, 1, 0, 0, 0])[:, :-1]
attn = padded_indices.unsqueeze(1).transpose(2, 3) * attn_mask
# Expand prior distribution
prior_means = torch.matmul(attn.squeeze(1), prior_means).transpose(1, 2)
prior_log_variances = torch.matmul(attn.squeeze(1), prior_log_variances).transpose(1, 2)
prior_latents = prior_means + torch.randn_like(prior_means) * torch.exp(prior_log_variances) * self.noise_scale
latents = self.flow(prior_latents, output_padding_mask, speaker_embeddings, reverse=True)
spectrogram = latents * output_padding_mask
waveform = self.decoder(spectrogram, speaker_embeddings)
waveform = waveform.squeeze(1)
sequence_lengths = predicted_lengths * np.prod(self.config.upsample_rates)
if not return_dict:
outputs = (waveform, sequence_lengths, spectrogram) + text_encoder_output[3:]
return outputs
return VitsModelOutput(
waveform=waveform,
sequence_lengths=sequence_lengths,
spectrogram=spectrogram,
hidden_states=text_encoder_output.hidden_states,
attentions=text_encoder_output.attentions,
)
__all__ = ["VitsModel", "VitsPreTrainedModel"]
|
VitsModel
|
python
|
scikit-image__scikit-image
|
tests/skimage/morphology/test_extrema.py
|
{
"start": 11380,
"end": 26972
}
|
class ____(unittest.TestCase):
"""Some tests for local_minima are included as well."""
supported_dtypes = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
]
image = np.array(
[
[1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 3, 3, 0, 0, 4, 0, 2, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 4, 4, 0, 3, 0, 0, 0],
[0, 2, 0, 1, 0, 2, 1, 0, 0, 0, 0, 3, 0, 0, 0],
[0, 0, 2, 0, 2, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
# Connectivity 2, maxima can touch border, returned with default values
expected_default = np.array(
[
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
# Connectivity 1 (cross), maxima can touch border
expected_cross = np.array(
[
[1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
def test_empty(self):
"""Test result with empty image."""
result = extrema.local_maxima(np.array([[]]), indices=False)
assert result.size == 0
assert result.dtype == bool
assert result.shape == (1, 0)
result = extrema.local_maxima(np.array([]), indices=True)
assert isinstance(result, tuple)
assert len(result) == 1
assert result[0].size == 0
assert result[0].dtype == np.intp
result = extrema.local_maxima(np.array([[]]), indices=True)
assert isinstance(result, tuple)
assert len(result) == 2
assert result[0].size == 0
assert result[0].dtype == np.intp
assert result[1].size == 0
assert result[1].dtype == np.intp
def test_dtypes(self):
"""Test results with default configuration for all supported dtypes."""
for dtype in self.supported_dtypes:
result = extrema.local_maxima(self.image.astype(dtype))
assert result.dtype == bool
assert_equal(result, self.expected_default)
def test_dtypes_old(self):
"""
Test results with default configuration and data copied from old unit
tests for all supported dtypes.
"""
data = np.array(
[
[10, 11, 13, 14, 14, 15, 14, 14, 13, 11],
[11, 13, 15, 16, 16, 16, 16, 16, 15, 13],
[13, 15, 40, 40, 18, 18, 18, 60, 60, 15],
[14, 16, 40, 40, 19, 19, 19, 60, 60, 16],
[14, 16, 18, 19, 19, 19, 19, 19, 18, 16],
[15, 16, 18, 19, 19, 20, 19, 19, 18, 16],
[14, 16, 18, 19, 19, 19, 19, 19, 18, 16],
[14, 16, 80, 80, 19, 19, 19, 100, 100, 16],
[13, 15, 80, 80, 18, 18, 18, 100, 100, 15],
[11, 13, 15, 16, 16, 16, 16, 16, 15, 13],
],
dtype=np.uint8,
)
expected = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
for dtype in self.supported_dtypes:
image = data.astype(dtype)
result = extrema.local_maxima(image)
assert result.dtype == bool
assert_equal(result, expected)
def test_connectivity(self):
"""Test results if footprint is a scalar."""
# Connectivity 1: generates cross shaped footprint
result_conn1 = extrema.local_maxima(self.image, connectivity=1)
assert result_conn1.dtype == bool
assert_equal(result_conn1, self.expected_cross)
# Connectivity 2: generates square shaped footprint
result_conn2 = extrema.local_maxima(self.image, connectivity=2)
assert result_conn2.dtype == bool
assert_equal(result_conn2, self.expected_default)
# Connectivity 3: generates square shaped footprint
result_conn3 = extrema.local_maxima(self.image, connectivity=3)
assert result_conn3.dtype == bool
assert_equal(result_conn3, self.expected_default)
def test_footprint(self):
"""Test results if footprint is given."""
footprint_cross = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=bool)
result_footprint_cross = extrema.local_maxima(
self.image, footprint=footprint_cross
)
assert result_footprint_cross.dtype == bool
assert_equal(result_footprint_cross, self.expected_cross)
for footprint in [
((True,) * 3,) * 3,
np.ones((3, 3), dtype=np.float64),
np.ones((3, 3), dtype=np.uint8),
np.ones((3, 3), dtype=bool),
]:
# Test different dtypes for footprint which expects a boolean array
# but will accept and convert other types if possible
result_footprint_square = extrema.local_maxima(
self.image, footprint=footprint
)
assert result_footprint_square.dtype == bool
assert_equal(result_footprint_square, self.expected_default)
footprint_x = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], dtype=bool)
expected_footprint_x = np.array(
[
[1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result_footprint_x = extrema.local_maxima(self.image, footprint=footprint_x)
assert result_footprint_x.dtype == bool
assert_equal(result_footprint_x, expected_footprint_x)
def test_indices(self):
"""Test output if indices of peaks are desired."""
# Connectivity 1
expected_conn1 = np.nonzero(self.expected_cross)
result_conn1 = extrema.local_maxima(self.image, connectivity=1, indices=True)
assert_equal(result_conn1, expected_conn1)
# Connectivity 2
expected_conn2 = np.nonzero(self.expected_default)
result_conn2 = extrema.local_maxima(self.image, connectivity=2, indices=True)
assert_equal(result_conn2, expected_conn2)
def test_allow_borders(self):
"""Test maxima detection at the image border."""
# Use connectivity 1 to allow many maxima, only filtering at border is
# of interest
result_with_boder = extrema.local_maxima(
self.image, connectivity=1, allow_borders=True
)
assert result_with_boder.dtype == bool
assert_equal(result_with_boder, self.expected_cross)
expected_without_border = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result_without_border = extrema.local_maxima(
self.image, connectivity=1, allow_borders=False
)
assert result_with_boder.dtype == bool
assert_equal(result_without_border, expected_without_border)
def test_nd(self):
"""Test one- and three-dimensional case."""
# One-dimension
x_1d = np.array([1, 1, 0, 1, 2, 3, 0, 2, 1, 2, 0])
expected_1d = np.array([1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)
result_1d = extrema.local_maxima(x_1d)
assert result_1d.dtype == bool
assert_equal(result_1d, expected_1d)
# 3-dimensions (adapted from old unit test)
x_3d = np.zeros((8, 8, 8), dtype=np.uint8)
expected_3d = np.zeros((8, 8, 8), dtype=bool)
# first maximum: only one pixel
x_3d[1, 1:3, 1:3] = 100
x_3d[2, 2, 2] = 200
x_3d[3, 1:3, 1:3] = 100
expected_3d[2, 2, 2] = 1
# second maximum: three pixels in z-direction
x_3d[5:8, 1, 1] = 200
expected_3d[5:8, 1, 1] = 1
# third: two maxima in 0 and 3.
x_3d[0, 5:8, 5:8] = 200
x_3d[1, 6, 6] = 100
x_3d[2, 5:7, 5:7] = 200
x_3d[0:3, 5:8, 5:8] += 50
expected_3d[0, 5:8, 5:8] = 1
expected_3d[2, 5:7, 5:7] = 1
# four : one maximum in the corner of the square
x_3d[6:8, 6:8, 6:8] = 200
x_3d[7, 7, 7] = 255
expected_3d[7, 7, 7] = 1
result_3d = extrema.local_maxima(x_3d)
assert result_3d.dtype == bool
assert_equal(result_3d, expected_3d)
def test_constant(self):
"""Test behaviour for 'flat' images."""
const_image = np.full((7, 6), 42, dtype=np.uint8)
expected = np.zeros((7, 6), dtype=np.uint8)
for dtype in self.supported_dtypes:
const_image = const_image.astype(dtype)
# test for local maxima
result = extrema.local_maxima(const_image)
assert result.dtype == bool
assert_equal(result, expected)
# test for local minima
result = extrema.local_minima(const_image)
assert result.dtype == bool
assert_equal(result, expected)
def test_extrema_float(self):
"""Specific tests for float type."""
# Copied from old unit test for local_maxima
image = np.array(
[
[0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14, 0.14, 0.13, 0.11],
[0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13],
[0.13, 0.15, 0.40, 0.40, 0.18, 0.18, 0.18, 0.60, 0.60, 0.15],
[0.14, 0.16, 0.40, 0.40, 0.19, 0.19, 0.19, 0.60, 0.60, 0.16],
[0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16],
[0.15, 0.182, 0.18, 0.19, 0.204, 0.20, 0.19, 0.19, 0.18, 0.16],
[0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16],
[0.14, 0.16, 0.80, 0.80, 0.19, 0.19, 0.19, 1.0, 1.0, 0.16],
[0.13, 0.15, 0.80, 0.80, 0.18, 0.18, 0.18, 1.0, 1.0, 0.15],
[0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13],
],
dtype=np.float32,
)
inverted_image = 1.0 - image
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
# Test for local maxima with automatic step calculation
result = extrema.local_maxima(image)
assert result.dtype == bool
assert_equal(result, expected_result)
# Test for local minima with automatic step calculation
result = extrema.local_minima(inverted_image)
assert result.dtype == bool
assert_equal(result, expected_result)
def test_extrema_small_float(self):
image = np.array(
[
[9.89232736e-20, 8.78543302e-20, 8.78543302e-20, 9.89232736e-20],
[8.78543302e-20, 6.38842355e-20, 6.38842355e-20, 8.78543302e-20],
[8.78543302e-20, 6.38842355e-20, 6.38842355e-20, 8.78543302e-20],
[9.89232736e-20, 8.78543302e-20, 8.78543302e-20, 9.89232736e-20],
]
)
result = extrema.local_minima(image)
expected_result = np.array(
[
[False, False, False, False],
[False, True, True, False],
[False, True, True, False],
[False, False, False, False],
]
)
assert_equal(result, expected_result)
def test_exceptions(self):
"""Test if input validation triggers correct exceptions."""
# Mismatching number of dimensions
with raises(ValueError, match="number of dimensions"):
extrema.local_maxima(self.image, footprint=np.ones((3, 3, 3), dtype=bool))
with raises(ValueError, match="number of dimensions"):
extrema.local_maxima(self.image, footprint=np.ones((3,), dtype=bool))
# All dimensions in footprint must be of size 3
with raises(ValueError, match="dimension size"):
extrema.local_maxima(self.image, footprint=np.ones((2, 3), dtype=bool))
with raises(ValueError, match="dimension size"):
extrema.local_maxima(self.image, footprint=np.ones((5, 5), dtype=bool))
with raises(TypeError, match="float16 which is not supported"):
extrema.local_maxima(np.empty(1, dtype=np.float16))
def test_small_array(self):
"""Test output for arrays with dimension smaller 3.
If any dimension of an array is smaller than 3 and `allow_borders` is
false a footprint, which has at least 3 elements in each
dimension, can't be applied. This is an implementation detail so
`local_maxima` should still return valid output (see gh-3261).
If `allow_borders` is true the array is padded internally and there is
no problem.
"""
warning_msg = "maxima can't exist .* any dimension smaller 3 .*"
x = np.array([0, 1])
extrema.local_maxima(x, allow_borders=True) # no warning
with warns(UserWarning, match=warning_msg):
result = extrema.local_maxima(x, allow_borders=False)
assert_equal(result, [0, 0])
assert result.dtype == bool
x = np.array([[1, 2], [2, 2]])
extrema.local_maxima(x, allow_borders=True, indices=True) # no warning
with warns(UserWarning, match=warning_msg):
result = extrema.local_maxima(x, allow_borders=False, indices=True)
assert_equal(result, np.zeros((2, 0), dtype=np.intp))
assert result[0].dtype == np.intp
assert result[1].dtype == np.intp
|
TestLocalMaxima
|
python
|
getsentry__sentry
|
src/sentry/uptime/migrations/0045_backfill_detector_thresholds.py
|
{
"start": 1686,
"end": 2253
}
|
class ____(CheckedMigration):
# This is a data migration that can take a while on large deployments
# and should be run manually after code deployment
is_post_deployment = True
dependencies = [
("uptime", "0044_remove_project_uptime_subscription"),
("workflow_engine", "0085_crons_link_detectors_to_all_workflows"),
]
operations = [
migrations.RunPython(
backfill_detector_thresholds,
migrations.RunPython.noop,
hints={"tables": ["workflow_engine_detector"]},
)
]
|
Migration
|
python
|
facebookresearch__faiss
|
tests/test_graph_based.py
|
{
"start": 372,
"end": 6926
}
|
class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 0
nb = 1500
nq = 500
(_, self.xb, self.xq) = get_dataset_2(d, nt, nb, nq)
index = faiss.IndexFlatL2(d)
index.add(self.xb)
Dref, Iref = index.search(self.xq, 1)
self.Iref = Iref
def test_hnsw(self):
d = self.xq.shape[1]
index = faiss.IndexHNSWFlat(d, 16)
index.add(self.xb)
Dhnsw, Ihnsw = index.search(self.xq, 1)
self.assertGreaterEqual((self.Iref == Ihnsw).sum(), 460)
self.io_and_retest(index, Dhnsw, Ihnsw)
def test_range_search(self):
index_flat = faiss.IndexFlat(self.xb.shape[1])
index_flat.add(self.xb)
D, _ = index_flat.search(self.xq, 10)
radius = np.median(D[:, -1])
lims_ref, Dref, Iref = index_flat.range_search(self.xq, radius)
index = faiss.IndexHNSWFlat(self.xb.shape[1], 16)
index.add(self.xb)
lims, D, I = index.range_search(self.xq, radius)
nmiss = 0
# check if returned results are a subset of the reference results
for i in range(len(self.xq)):
ref = Iref[lims_ref[i]: lims_ref[i + 1]]
new = I[lims[i]: lims[i + 1]]
self.assertLessEqual(set(new), set(ref))
nmiss += len(ref) - len(new)
# currently we miss 405 / 6019 neighbors
self.assertLessEqual(nmiss, lims_ref[-1] * 0.1)
def test_hnsw_unbounded_queue(self):
d = self.xq.shape[1]
index = faiss.IndexHNSWFlat(d, 16)
index.add(self.xb)
index.hnsw.search_bounded_queue = False
Dhnsw, Ihnsw = index.search(self.xq, 1)
self.assertGreaterEqual((self.Iref == Ihnsw).sum(), 460)
self.io_and_retest(index, Dhnsw, Ihnsw)
def test_hnsw_no_init_level0(self):
d = self.xq.shape[1]
index = faiss.IndexHNSWFlat(d, 16)
index.init_level0 = False
index.add(self.xb)
Dhnsw, Ihnsw = index.search(self.xq, 1)
# This is expected to be smaller because we are not initializing
# vectors into level 0.
self.assertGreaterEqual((self.Iref == Ihnsw).sum(), 25)
self.io_and_retest(index, Dhnsw, Ihnsw)
def io_and_retest(self, index, Dhnsw, Ihnsw):
index2 = faiss.deserialize_index(faiss.serialize_index(index))
Dhnsw2, Ihnsw2 = index2.search(self.xq, 1)
self.assertTrue(np.all(Dhnsw2 == Dhnsw))
self.assertTrue(np.all(Ihnsw2 == Ihnsw))
# also test clone
index3 = faiss.clone_index(index)
Dhnsw3, Ihnsw3 = index3.search(self.xq, 1)
self.assertTrue(np.all(Dhnsw3 == Dhnsw))
self.assertTrue(np.all(Ihnsw3 == Ihnsw))
def test_hnsw_2level(self):
d = self.xq.shape[1]
quant = faiss.IndexFlatL2(d)
index = faiss.IndexHNSW2Level(quant, 256, 8, 8)
index.train(self.xb)
index.add(self.xb)
Dhnsw, Ihnsw = index.search(self.xq, 1)
self.assertGreaterEqual((self.Iref == Ihnsw).sum(), 307)
self.io_and_retest(index, Dhnsw, Ihnsw)
def test_hnsw_2level_mixed_search(self):
d = self.xq.shape[1]
quant = faiss.IndexFlatL2(d)
storage = faiss.IndexIVFPQ(quant, d, 32, 8, 8)
storage.make_direct_map()
index = faiss.IndexHNSW2Level(quant, 32, 8, 8)
index.storage = storage
index.train(self.xb)
index.add(self.xb)
Dhnsw, Ihnsw = index.search(self.xq, 1)
# It is expected that the mixed search will perform worse.
self.assertGreaterEqual((self.Iref == Ihnsw).sum(), 200)
self.io_and_retest(index, Dhnsw, Ihnsw)
def test_add_0_vecs(self):
index = faiss.IndexHNSWFlat(10, 16)
zero_vecs = np.zeros((0, 10), dtype='float32')
# infinite loop
index.add(zero_vecs)
def test_hnsw_IP(self):
d = self.xq.shape[1]
index_IP = faiss.IndexFlatIP(d)
index_IP.add(self.xb)
Dref, Iref = index_IP.search(self.xq, 1)
index = faiss.IndexHNSWFlat(d, 16, faiss.METRIC_INNER_PRODUCT)
index.add(self.xb)
Dhnsw, Ihnsw = index.search(self.xq, 1)
self.assertGreaterEqual((Iref == Ihnsw).sum(), 470)
mask = Iref[:, 0] == Ihnsw[:, 0]
assert np.allclose(Dref[mask, 0], Dhnsw[mask, 0])
def test_ndis_stats(self):
d = self.xq.shape[1]
index = faiss.IndexHNSWFlat(d, 16)
index.add(self.xb)
stats = faiss.cvar.hnsw_stats
stats.reset()
Dhnsw, Ihnsw = index.search(self.xq, 1)
self.assertGreater(stats.ndis, len(self.xq) * index.hnsw.efSearch)
def test_io_no_storage(self):
d = self.xq.shape[1]
index = faiss.IndexHNSWFlat(d, 16)
index.add(self.xb)
Dref, Iref = index.search(self.xq, 5)
# test writing without storage
index2 = faiss.deserialize_index(
faiss.serialize_index(index, faiss.IO_FLAG_SKIP_STORAGE)
)
self.assertEqual(index2.storage, None)
self.assertRaises(
RuntimeError,
index2.search, self.xb, 1)
# make sure we can store an index with empty storage
index4 = faiss.deserialize_index(
faiss.serialize_index(index2))
# add storage afterwards
index.storage = faiss.clone_index(index.storage)
index.own_fields = True
Dnew, Inew = index.search(self.xq, 5)
np.testing.assert_array_equal(Dnew, Dref)
np.testing.assert_array_equal(Inew, Iref)
if False:
# test reading without storage
# not implemented because it is hard to skip over an index
index3 = faiss.deserialize_index(
faiss.serialize_index(index), faiss.IO_FLAG_SKIP_STORAGE
)
self.assertEqual(index3.storage, None)
def test_hnsw_reset(self):
d = self.xb.shape[1]
index_flat = faiss.IndexFlat(d)
index_flat.add(self.xb)
self.assertEqual(index_flat.ntotal, self.xb.shape[0])
index_hnsw = faiss.IndexHNSW(index_flat)
index_hnsw.add(self.xb)
# * 2 because we add to storage twice. This is just for testing
# that storage gets cleared correctly.
self.assertEqual(index_hnsw.ntotal, self.xb.shape[0] * 2)
index_hnsw.reset()
self.assertEqual(index_flat.ntotal, 0)
self.assertEqual(index_hnsw.ntotal, 0)
|
TestHNSW
|
python
|
numba__numba
|
numba/core/pythonapi.py
|
{
"start": 3084,
"end": 4971
}
|
class ____(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = sys.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
|
EnvironmentManager
|
python
|
Textualize__textual
|
docs/examples/guide/dom2.py
|
{
"start": 88,
"end": 263
}
|
class ____(App):
def compose(self) -> ComposeResult:
yield Header()
yield Footer()
if __name__ == "__main__":
app = ExampleApp()
app.run()
|
ExampleApp
|
python
|
walkccc__LeetCode
|
solutions/3210. Find the Encrypted String/3210.py
|
{
"start": 0,
"end": 113
}
|
class ____:
def getEncryptedString(self, s: str, k: int) -> str:
k %= len(s)
return s[k:] + s[0:k]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/metrics.py
|
{
"start": 98357,
"end": 99976
}
|
class ____(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Standalone usage:
>>> m = tf.keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.70710677
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
|
RootMeanSquaredError
|
python
|
google__pytype
|
pytype/tests/test_test_code.py
|
{
"start": 3349,
"end": 4350
}
|
class ____(test_base.BaseTest):
"""Tests for unittest.mock."""
def test_patch(self):
self.Check("""
import unittest
from unittest import mock
foo = __any_object__
bar = __any_object__
class Foo(unittest.TestCase):
def setUp(self):
super().setUp()
self.some_mock = mock.patch.object(foo, 'foo').start()
self.some_mock.return_value = True
def test_bar(self):
other_mock = mock.patch.object(bar, 'bar').start()
other_mock.return_value.__enter__ = lambda x: x
""")
def test_decorated_setup(self):
self.Check("""
from typing import Any
import unittest
from unittest import mock
random_module: Any
class FooTest(unittest.TestCase):
@mock.patch.object(random_module, 'attr')
def setUp(self):
self.x = 42
def test_something(self):
assert_type(self.x, int)
""")
if __name__ == "__main__":
test_base.main()
|
MockTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.